##// END OF EJS Templates
clonebundles: allow manifest to specify sha256 digest of bundles
Joerg Sonnenberger -
r52875:aa7f4a45 default
parent child Browse files
Show More
@@ -1,570 +1,612
1 # bundlecaches.py - utility to deal with pre-computed bundle for servers
1 # bundlecaches.py - utility to deal with pre-computed bundle for servers
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 from __future__ import annotations
6 from __future__ import annotations
7
7
8 import collections
8 import collections
9 import re
9 import typing
10 import typing
10
11
11 from typing import (
12 from typing import (
12 Dict,
13 Dict,
13 Union,
14 Union,
14 cast,
15 cast,
15 )
16 )
16
17
17 from .i18n import _
18 from .i18n import _
18
19
19 from .thirdparty import attr
20 from .thirdparty import attr
20
21
21 # Force pytype to use the non-vendored package
22 # Force pytype to use the non-vendored package
22 if typing.TYPE_CHECKING:
23 if typing.TYPE_CHECKING:
23 # noinspection PyPackageRequirements
24 # noinspection PyPackageRequirements
24 import attr
25 import attr
25
26
26 from . import (
27 from . import (
27 error,
28 error,
28 requirements as requirementsmod,
29 requirements as requirementsmod,
29 sslutil,
30 sslutil,
31 url as urlmod,
30 util,
32 util,
31 )
33 )
32 from .utils import stringutil
34 from .utils import stringutil
33
35
34 urlreq = util.urlreq
36 urlreq = util.urlreq
35
37
36 BUNDLE_CACHE_DIR = b'bundle-cache'
38 BUNDLE_CACHE_DIR = b'bundle-cache'
37 CB_MANIFEST_FILE = b'clonebundles.manifest'
39 CB_MANIFEST_FILE = b'clonebundles.manifest'
38 CLONEBUNDLESCHEME = b"peer-bundle-cache://"
40 CLONEBUNDLESCHEME = b"peer-bundle-cache://"
39
41
40
42
41 def get_manifest(repo) -> bytes:
43 def get_manifest(repo) -> bytes:
42 """get the bundle manifest to be served to a client from a server"""
44 """get the bundle manifest to be served to a client from a server"""
43 raw_text = repo.vfs.tryread(CB_MANIFEST_FILE)
45 raw_text = repo.vfs.tryread(CB_MANIFEST_FILE)
44 entries = [e.split(b' ', 1) for e in raw_text.splitlines()]
46 entries = [e.split(b' ', 1) for e in raw_text.splitlines()]
45
47
46 new_lines = []
48 new_lines = []
47 for e in entries:
49 for e in entries:
48 url = alter_bundle_url(repo, e[0])
50 url = alter_bundle_url(repo, e[0])
49 if len(e) == 1:
51 if len(e) == 1:
50 line = url + b'\n'
52 line = url + b'\n'
51 else:
53 else:
52 line = b"%s %s\n" % (url, e[1])
54 line = b"%s %s\n" % (url, e[1])
53 new_lines.append(line)
55 new_lines.append(line)
54 return b''.join(new_lines)
56 return b''.join(new_lines)
55
57
56
58
57 def alter_bundle_url(repo, url: bytes) -> bytes:
59 def alter_bundle_url(repo, url: bytes) -> bytes:
58 """a function that exist to help extension and hosting to alter the url
60 """a function that exist to help extension and hosting to alter the url
59
61
60 This will typically be used to inject authentication information in the url
62 This will typically be used to inject authentication information in the url
61 of cached bundles."""
63 of cached bundles."""
62 return url
64 return url
63
65
64
66
65 SUPPORTED_CLONEBUNDLE_SCHEMES = [
67 SUPPORTED_CLONEBUNDLE_SCHEMES = [
66 b"http://",
68 b"http://",
67 b"https://",
69 b"https://",
68 b"largefile://",
70 b"largefile://",
69 CLONEBUNDLESCHEME,
71 CLONEBUNDLESCHEME,
70 ]
72 ]
71
73
72
74
73 @attr.s
75 @attr.s
74 class bundlespec:
76 class bundlespec:
75 compression = attr.ib()
77 compression = attr.ib()
76 wirecompression = attr.ib()
78 wirecompression = attr.ib()
77 version = attr.ib()
79 version = attr.ib()
78 wireversion = attr.ib()
80 wireversion = attr.ib()
79 # parameters explicitly overwritten by the config or the specification
81 # parameters explicitly overwritten by the config or the specification
80 _explicit_params = attr.ib()
82 _explicit_params = attr.ib()
81 # default parameter for the version
83 # default parameter for the version
82 #
84 #
83 # Keeping it separated is useful to check what was actually overwritten.
85 # Keeping it separated is useful to check what was actually overwritten.
84 _default_opts = attr.ib()
86 _default_opts = attr.ib()
85
87
86 @property
88 @property
87 def params(self):
89 def params(self):
88 return collections.ChainMap(self._explicit_params, self._default_opts)
90 return collections.ChainMap(self._explicit_params, self._default_opts)
89
91
90 @property
92 @property
91 def contentopts(self):
93 def contentopts(self):
92 # kept for Backward Compatibility concerns.
94 # kept for Backward Compatibility concerns.
93 return self.params
95 return self.params
94
96
95 def set_param(self, key, value, overwrite=True):
97 def set_param(self, key, value, overwrite=True):
96 """Set a bundle parameter value.
98 """Set a bundle parameter value.
97
99
98 Will only overwrite if overwrite is true"""
100 Will only overwrite if overwrite is true"""
99 if overwrite or key not in self._explicit_params:
101 if overwrite or key not in self._explicit_params:
100 self._explicit_params[key] = value
102 self._explicit_params[key] = value
101
103
102 def as_spec(self):
104 def as_spec(self):
103 parts = [b"%s-%s" % (self.compression, self.version)]
105 parts = [b"%s-%s" % (self.compression, self.version)]
104 for param in sorted(self._explicit_params.items()):
106 for param in sorted(self._explicit_params.items()):
105 parts.append(b'%s=%s' % param)
107 parts.append(b'%s=%s' % param)
106 return b';'.join(parts)
108 return b';'.join(parts)
107
109
108
110
109 # Maps bundle version human names to changegroup versions.
111 # Maps bundle version human names to changegroup versions.
110 _bundlespeccgversions = {
112 _bundlespeccgversions = {
111 b'v1': b'01',
113 b'v1': b'01',
112 b'v2': b'02',
114 b'v2': b'02',
113 b'v3': b'03',
115 b'v3': b'03',
114 b'packed1': b's1',
116 b'packed1': b's1',
115 b'bundle2': b'02', # legacy
117 b'bundle2': b'02', # legacy
116 }
118 }
117
119
118 # Maps bundle version with content opts to choose which part to bundle
120 # Maps bundle version with content opts to choose which part to bundle
119 _bundlespeccontentopts: Dict[bytes, Dict[bytes, Union[bool, bytes]]] = {
121 _bundlespeccontentopts: Dict[bytes, Dict[bytes, Union[bool, bytes]]] = {
120 b'v1': {
122 b'v1': {
121 b'changegroup': True,
123 b'changegroup': True,
122 b'cg.version': b'01',
124 b'cg.version': b'01',
123 b'obsolescence': False,
125 b'obsolescence': False,
124 b'phases': False,
126 b'phases': False,
125 b'tagsfnodescache': False,
127 b'tagsfnodescache': False,
126 b'revbranchcache': False,
128 b'revbranchcache': False,
127 },
129 },
128 b'v2': {
130 b'v2': {
129 b'changegroup': True,
131 b'changegroup': True,
130 b'cg.version': b'02',
132 b'cg.version': b'02',
131 b'obsolescence': False,
133 b'obsolescence': False,
132 b'phases': False,
134 b'phases': False,
133 b'tagsfnodescache': True,
135 b'tagsfnodescache': True,
134 b'revbranchcache': True,
136 b'revbranchcache': True,
135 },
137 },
136 b'v3': {
138 b'v3': {
137 b'changegroup': True,
139 b'changegroup': True,
138 b'cg.version': b'03',
140 b'cg.version': b'03',
139 b'obsolescence': False,
141 b'obsolescence': False,
140 b'phases': True,
142 b'phases': True,
141 b'tagsfnodescache': True,
143 b'tagsfnodescache': True,
142 b'revbranchcache': True,
144 b'revbranchcache': True,
143 },
145 },
144 b'streamv2': {
146 b'streamv2': {
145 b'changegroup': False,
147 b'changegroup': False,
146 b'cg.version': b'02',
148 b'cg.version': b'02',
147 b'obsolescence': False,
149 b'obsolescence': False,
148 b'phases': False,
150 b'phases': False,
149 b"stream": b"v2",
151 b"stream": b"v2",
150 b'tagsfnodescache': False,
152 b'tagsfnodescache': False,
151 b'revbranchcache': False,
153 b'revbranchcache': False,
152 },
154 },
153 b'streamv3-exp': {
155 b'streamv3-exp': {
154 b'changegroup': False,
156 b'changegroup': False,
155 b'cg.version': b'03',
157 b'cg.version': b'03',
156 b'obsolescence': False,
158 b'obsolescence': False,
157 b'phases': False,
159 b'phases': False,
158 b"stream": b"v3-exp",
160 b"stream": b"v3-exp",
159 b'tagsfnodescache': False,
161 b'tagsfnodescache': False,
160 b'revbranchcache': False,
162 b'revbranchcache': False,
161 },
163 },
162 b'packed1': {
164 b'packed1': {
163 b'cg.version': b's1',
165 b'cg.version': b's1',
164 },
166 },
165 b'bundle2': { # legacy
167 b'bundle2': { # legacy
166 b'cg.version': b'02',
168 b'cg.version': b'02',
167 },
169 },
168 }
170 }
169 _bundlespeccontentopts[b'bundle2'] = _bundlespeccontentopts[b'v2']
171 _bundlespeccontentopts[b'bundle2'] = _bundlespeccontentopts[b'v2']
170
172
171 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
173 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
172 _bundlespecv1compengines = {b'gzip', b'bzip2', b'none'}
174 _bundlespecv1compengines = {b'gzip', b'bzip2', b'none'}
173
175
174
176
175 def param_bool(key, value):
177 def param_bool(key, value):
176 """make a boolean out of a parameter value"""
178 """make a boolean out of a parameter value"""
177 b = stringutil.parsebool(value)
179 b = stringutil.parsebool(value)
178 if b is None:
180 if b is None:
179 msg = _(b"parameter %s should be a boolean ('%s')")
181 msg = _(b"parameter %s should be a boolean ('%s')")
180 msg %= (key, value)
182 msg %= (key, value)
181 raise error.InvalidBundleSpecification(msg)
183 raise error.InvalidBundleSpecification(msg)
182 return b
184 return b
183
185
184
186
185 # mapping of known parameter name need their value processed
187 # mapping of known parameter name need their value processed
186 bundle_spec_param_processing = {
188 bundle_spec_param_processing = {
187 b"obsolescence": param_bool,
189 b"obsolescence": param_bool,
188 b"obsolescence-mandatory": param_bool,
190 b"obsolescence-mandatory": param_bool,
189 b"phases": param_bool,
191 b"phases": param_bool,
190 b"changegroup": param_bool,
192 b"changegroup": param_bool,
191 b"tagsfnodescache": param_bool,
193 b"tagsfnodescache": param_bool,
192 b"revbranchcache": param_bool,
194 b"revbranchcache": param_bool,
193 }
195 }
194
196
195
197
196 def _parseparams(s):
198 def _parseparams(s):
197 """parse bundlespec parameter section
199 """parse bundlespec parameter section
198
200
199 input: "comp-version;params" string
201 input: "comp-version;params" string
200
202
201 return: (spec; {param_key: param_value})
203 return: (spec; {param_key: param_value})
202 """
204 """
203 if b';' not in s:
205 if b';' not in s:
204 return s, {}
206 return s, {}
205
207
206 params = {}
208 params = {}
207 version, paramstr = s.split(b';', 1)
209 version, paramstr = s.split(b';', 1)
208
210
209 err = _(b'invalid bundle specification: missing "=" in parameter: %s')
211 err = _(b'invalid bundle specification: missing "=" in parameter: %s')
210 for p in paramstr.split(b';'):
212 for p in paramstr.split(b';'):
211 if b'=' not in p:
213 if b'=' not in p:
212 msg = err % p
214 msg = err % p
213 raise error.InvalidBundleSpecification(msg)
215 raise error.InvalidBundleSpecification(msg)
214
216
215 key, value = p.split(b'=', 1)
217 key, value = p.split(b'=', 1)
216 key = urlreq.unquote(key)
218 key = urlreq.unquote(key)
217 value = urlreq.unquote(value)
219 value = urlreq.unquote(value)
218 process = bundle_spec_param_processing.get(key)
220 process = bundle_spec_param_processing.get(key)
219 if process is not None:
221 if process is not None:
220 value = process(key, value)
222 value = process(key, value)
221 params[key] = value
223 params[key] = value
222
224
223 return version, params
225 return version, params
224
226
225
227
226 def parsebundlespec(repo, spec, strict=True):
228 def parsebundlespec(repo, spec, strict=True):
227 """Parse a bundle string specification into parts.
229 """Parse a bundle string specification into parts.
228
230
229 Bundle specifications denote a well-defined bundle/exchange format.
231 Bundle specifications denote a well-defined bundle/exchange format.
230 The content of a given specification should not change over time in
232 The content of a given specification should not change over time in
231 order to ensure that bundles produced by a newer version of Mercurial are
233 order to ensure that bundles produced by a newer version of Mercurial are
232 readable from an older version.
234 readable from an older version.
233
235
234 The string currently has the form:
236 The string currently has the form:
235
237
236 <compression>-<type>[;<parameter0>[;<parameter1>]]
238 <compression>-<type>[;<parameter0>[;<parameter1>]]
237
239
238 Where <compression> is one of the supported compression formats
240 Where <compression> is one of the supported compression formats
239 and <type> is (currently) a version string. A ";" can follow the type and
241 and <type> is (currently) a version string. A ";" can follow the type and
240 all text afterwards is interpreted as URI encoded, ";" delimited key=value
242 all text afterwards is interpreted as URI encoded, ";" delimited key=value
241 pairs.
243 pairs.
242
244
243 If ``strict`` is True (the default) <compression> is required. Otherwise,
245 If ``strict`` is True (the default) <compression> is required. Otherwise,
244 it is optional.
246 it is optional.
245
247
246 Returns a bundlespec object of (compression, version, parameters).
248 Returns a bundlespec object of (compression, version, parameters).
247 Compression will be ``None`` if not in strict mode and a compression isn't
249 Compression will be ``None`` if not in strict mode and a compression isn't
248 defined.
250 defined.
249
251
250 An ``InvalidBundleSpecification`` is raised when the specification is
252 An ``InvalidBundleSpecification`` is raised when the specification is
251 not syntactically well formed.
253 not syntactically well formed.
252
254
253 An ``UnsupportedBundleSpecification`` is raised when the compression or
255 An ``UnsupportedBundleSpecification`` is raised when the compression or
254 bundle type/version is not recognized.
256 bundle type/version is not recognized.
255
257
256 Note: this function will likely eventually return a more complex data
258 Note: this function will likely eventually return a more complex data
257 structure, including bundle2 part information.
259 structure, including bundle2 part information.
258 """
260 """
259 if strict and b'-' not in spec:
261 if strict and b'-' not in spec:
260 raise error.InvalidBundleSpecification(
262 raise error.InvalidBundleSpecification(
261 _(
263 _(
262 b'invalid bundle specification; '
264 b'invalid bundle specification; '
263 b'must be prefixed with compression: %s'
265 b'must be prefixed with compression: %s'
264 )
266 )
265 % spec
267 % spec
266 )
268 )
267
269
268 pre_args = spec.split(b';', 1)[0]
270 pre_args = spec.split(b';', 1)[0]
269 if b'-' in pre_args:
271 if b'-' in pre_args:
270 compression, version = spec.split(b'-', 1)
272 compression, version = spec.split(b'-', 1)
271
273
272 if compression not in util.compengines.supportedbundlenames:
274 if compression not in util.compengines.supportedbundlenames:
273 raise error.UnsupportedBundleSpecification(
275 raise error.UnsupportedBundleSpecification(
274 _(b'%s compression is not supported') % compression
276 _(b'%s compression is not supported') % compression
275 )
277 )
276
278
277 version, params = _parseparams(version)
279 version, params = _parseparams(version)
278
280
279 if version not in _bundlespeccontentopts:
281 if version not in _bundlespeccontentopts:
280 raise error.UnsupportedBundleSpecification(
282 raise error.UnsupportedBundleSpecification(
281 _(b'%s is not a recognized bundle version') % version
283 _(b'%s is not a recognized bundle version') % version
282 )
284 )
283 else:
285 else:
284 # Value could be just the compression or just the version, in which
286 # Value could be just the compression or just the version, in which
285 # case some defaults are assumed (but only when not in strict mode).
287 # case some defaults are assumed (but only when not in strict mode).
286 assert not strict
288 assert not strict
287
289
288 spec, params = _parseparams(spec)
290 spec, params = _parseparams(spec)
289
291
290 if spec in util.compengines.supportedbundlenames:
292 if spec in util.compengines.supportedbundlenames:
291 compression = spec
293 compression = spec
292 version = b'v1'
294 version = b'v1'
293 # Generaldelta repos require v2.
295 # Generaldelta repos require v2.
294 if requirementsmod.GENERALDELTA_REQUIREMENT in repo.requirements:
296 if requirementsmod.GENERALDELTA_REQUIREMENT in repo.requirements:
295 version = b'v2'
297 version = b'v2'
296 elif requirementsmod.REVLOGV2_REQUIREMENT in repo.requirements:
298 elif requirementsmod.REVLOGV2_REQUIREMENT in repo.requirements:
297 version = b'v2'
299 version = b'v2'
298 # Modern compression engines require v2.
300 # Modern compression engines require v2.
299 if compression not in _bundlespecv1compengines:
301 if compression not in _bundlespecv1compengines:
300 version = b'v2'
302 version = b'v2'
301 elif spec in _bundlespeccontentopts:
303 elif spec in _bundlespeccontentopts:
302 if spec == b'packed1':
304 if spec == b'packed1':
303 compression = b'none'
305 compression = b'none'
304 else:
306 else:
305 compression = b'bzip2'
307 compression = b'bzip2'
306 version = spec
308 version = spec
307 else:
309 else:
308 raise error.UnsupportedBundleSpecification(
310 raise error.UnsupportedBundleSpecification(
309 _(b'%s is not a recognized bundle specification') % spec
311 _(b'%s is not a recognized bundle specification') % spec
310 )
312 )
311
313
312 # Bundle version 1 only supports a known set of compression engines.
314 # Bundle version 1 only supports a known set of compression engines.
313 if version == b'v1' and compression not in _bundlespecv1compengines:
315 if version == b'v1' and compression not in _bundlespecv1compengines:
314 raise error.UnsupportedBundleSpecification(
316 raise error.UnsupportedBundleSpecification(
315 _(b'compression engine %s is not supported on v1 bundles')
317 _(b'compression engine %s is not supported on v1 bundles')
316 % compression
318 % compression
317 )
319 )
318
320
319 # The specification for packed1 can optionally declare the data formats
321 # The specification for packed1 can optionally declare the data formats
320 # required to apply it. If we see this metadata, compare against what the
322 # required to apply it. If we see this metadata, compare against what the
321 # repo supports and error if the bundle isn't compatible.
323 # repo supports and error if the bundle isn't compatible.
322 if version == b'packed1' and b'requirements' in params:
324 if version == b'packed1' and b'requirements' in params:
323 requirements = set(cast(bytes, params[b'requirements']).split(b','))
325 requirements = set(cast(bytes, params[b'requirements']).split(b','))
324 missingreqs = requirements - requirementsmod.STREAM_FIXED_REQUIREMENTS
326 missingreqs = requirements - requirementsmod.STREAM_FIXED_REQUIREMENTS
325 if missingreqs:
327 if missingreqs:
326 raise error.UnsupportedBundleSpecification(
328 raise error.UnsupportedBundleSpecification(
327 _(b'missing support for repository features: %s')
329 _(b'missing support for repository features: %s')
328 % b', '.join(sorted(missingreqs))
330 % b', '.join(sorted(missingreqs))
329 )
331 )
330
332
331 # Compute contentopts based on the version
333 # Compute contentopts based on the version
332 if b"stream" in params:
334 if b"stream" in params:
333 # This case is fishy as this mostly derails the version selection
335 # This case is fishy as this mostly derails the version selection
334 # mechanism. `stream` bundles are quite specific and used differently
336 # mechanism. `stream` bundles are quite specific and used differently
335 # as "normal" bundles.
337 # as "normal" bundles.
336 #
338 #
337 # (we should probably define a cleaner way to do this and raise a
339 # (we should probably define a cleaner way to do this and raise a
338 # warning when the old way is encountered)
340 # warning when the old way is encountered)
339 if params[b"stream"] == b"v2":
341 if params[b"stream"] == b"v2":
340 version = b"streamv2"
342 version = b"streamv2"
341 if params[b"stream"] == b"v3-exp":
343 if params[b"stream"] == b"v3-exp":
342 version = b"streamv3-exp"
344 version = b"streamv3-exp"
343 contentopts = _bundlespeccontentopts.get(version, {}).copy()
345 contentopts = _bundlespeccontentopts.get(version, {}).copy()
344 if version == b"streamv2" or version == b"streamv3-exp":
346 if version == b"streamv2" or version == b"streamv3-exp":
345 # streamv2 have been reported as "v2" for a while.
347 # streamv2 have been reported as "v2" for a while.
346 version = b"v2"
348 version = b"v2"
347
349
348 engine = util.compengines.forbundlename(compression)
350 engine = util.compengines.forbundlename(compression)
349 compression, wirecompression = engine.bundletype()
351 compression, wirecompression = engine.bundletype()
350 wireversion = _bundlespeccontentopts[version][b'cg.version']
352 wireversion = _bundlespeccontentopts[version][b'cg.version']
351
353
352 return bundlespec(
354 return bundlespec(
353 compression, wirecompression, version, wireversion, params, contentopts
355 compression, wirecompression, version, wireversion, params, contentopts
354 )
356 )
355
357
356
358
357 def parseclonebundlesmanifest(repo, s):
359 def parseclonebundlesmanifest(repo, s):
358 """Parses the raw text of a clone bundles manifest.
360 """Parses the raw text of a clone bundles manifest.
359
361
360 Returns a list of dicts. The dicts have a ``URL`` key corresponding
362 Returns a list of dicts. The dicts have a ``URL`` key corresponding
361 to the URL and other keys are the attributes for the entry.
363 to the URL and other keys are the attributes for the entry.
362 """
364 """
363 m = []
365 m = []
364 for line in s.splitlines():
366 for line in s.splitlines():
365 fields = line.split()
367 fields = line.split()
366 if not fields:
368 if not fields:
367 continue
369 continue
368 attrs = {b'URL': fields[0]}
370 attrs = {b'URL': fields[0]}
369 for rawattr in fields[1:]:
371 for rawattr in fields[1:]:
370 key, value = rawattr.split(b'=', 1)
372 key, value = rawattr.split(b'=', 1)
371 key = util.urlreq.unquote(key)
373 key = util.urlreq.unquote(key)
372 value = util.urlreq.unquote(value)
374 value = util.urlreq.unquote(value)
373 attrs[key] = value
375 attrs[key] = value
374
376
375 # Parse BUNDLESPEC into components. This makes client-side
377 # Parse BUNDLESPEC into components. This makes client-side
376 # preferences easier to specify since you can prefer a single
378 # preferences easier to specify since you can prefer a single
377 # component of the BUNDLESPEC.
379 # component of the BUNDLESPEC.
378 if key == b'BUNDLESPEC':
380 if key == b'BUNDLESPEC':
379 try:
381 try:
380 bundlespec = parsebundlespec(repo, value)
382 bundlespec = parsebundlespec(repo, value)
381 attrs[b'COMPRESSION'] = bundlespec.compression
383 attrs[b'COMPRESSION'] = bundlespec.compression
382 attrs[b'VERSION'] = bundlespec.version
384 attrs[b'VERSION'] = bundlespec.version
383 except error.InvalidBundleSpecification:
385 except error.InvalidBundleSpecification:
384 pass
386 pass
385 except error.UnsupportedBundleSpecification:
387 except error.UnsupportedBundleSpecification:
386 pass
388 pass
387
389
388 m.append(attrs)
390 m.append(attrs)
389
391
390 return m
392 return m
391
393
392
394
393 def isstreamclonespec(bundlespec):
395 def isstreamclonespec(bundlespec):
394 # Stream clone v1
396 # Stream clone v1
395 if bundlespec.wirecompression == b'UN' and bundlespec.wireversion == b's1':
397 if bundlespec.wirecompression == b'UN' and bundlespec.wireversion == b's1':
396 return True
398 return True
397
399
398 # Stream clone v2
400 # Stream clone v2
399 if (
401 if (
400 bundlespec.wirecompression == b'UN'
402 bundlespec.wirecompression == b'UN'
401 and bundlespec.wireversion == b'02'
403 and bundlespec.wireversion == b'02'
402 and bundlespec.contentopts.get(b'stream', None) in (b"v2", b"v3-exp")
404 and bundlespec.contentopts.get(b'stream', None) in (b"v2", b"v3-exp")
403 ):
405 ):
404 return True
406 return True
405
407
406 return False
408 return False
407
409
408
410
411 digest_regex = re.compile(b'^[a-z0-9]+:[0-9a-f]+(,[a-z0-9]+:[0-9a-f]+)*$')
412
413
409 def filterclonebundleentries(
414 def filterclonebundleentries(
410 repo, entries, streamclonerequested=False, pullbundles=False
415 repo, entries, streamclonerequested=False, pullbundles=False
411 ):
416 ):
412 """Remove incompatible clone bundle manifest entries.
417 """Remove incompatible clone bundle manifest entries.
413
418
414 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
419 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
415 and returns a new list consisting of only the entries that this client
420 and returns a new list consisting of only the entries that this client
416 should be able to apply.
421 should be able to apply.
417
422
418 There is no guarantee we'll be able to apply all returned entries because
423 There is no guarantee we'll be able to apply all returned entries because
419 the metadata we use to filter on may be missing or wrong.
424 the metadata we use to filter on may be missing or wrong.
420 """
425 """
421 newentries = []
426 newentries = []
422 for entry in entries:
427 for entry in entries:
423 url = entry.get(b'URL')
428 url = entry.get(b'URL')
424 if not pullbundles and not any(
429 if not pullbundles and not any(
425 [url.startswith(scheme) for scheme in SUPPORTED_CLONEBUNDLE_SCHEMES]
430 [url.startswith(scheme) for scheme in SUPPORTED_CLONEBUNDLE_SCHEMES]
426 ):
431 ):
427 repo.ui.debug(
432 repo.ui.debug(
428 b'filtering %s because not a supported clonebundle scheme\n'
433 b'filtering %s because not a supported clonebundle scheme\n'
429 % url
434 % url
430 )
435 )
431 continue
436 continue
432
437
433 spec = entry.get(b'BUNDLESPEC')
438 spec = entry.get(b'BUNDLESPEC')
434 if spec:
439 if spec:
435 try:
440 try:
436 bundlespec = parsebundlespec(repo, spec, strict=True)
441 bundlespec = parsebundlespec(repo, spec, strict=True)
437
442
438 # If a stream clone was requested, filter out non-streamclone
443 # If a stream clone was requested, filter out non-streamclone
439 # entries.
444 # entries.
440 if streamclonerequested and not isstreamclonespec(bundlespec):
445 if streamclonerequested and not isstreamclonespec(bundlespec):
441 repo.ui.debug(
446 repo.ui.debug(
442 b'filtering %s because not a stream clone\n' % url
447 b'filtering %s because not a stream clone\n' % url
443 )
448 )
444 continue
449 continue
445
450
446 except error.InvalidBundleSpecification as e:
451 except error.InvalidBundleSpecification as e:
447 repo.ui.debug(stringutil.forcebytestr(e) + b'\n')
452 repo.ui.debug(stringutil.forcebytestr(e) + b'\n')
448 continue
453 continue
449 except error.UnsupportedBundleSpecification as e:
454 except error.UnsupportedBundleSpecification as e:
450 repo.ui.debug(
455 repo.ui.debug(
451 b'filtering %s because unsupported bundle '
456 b'filtering %s because unsupported bundle '
452 b'spec: %s\n' % (url, stringutil.forcebytestr(e))
457 b'spec: %s\n' % (url, stringutil.forcebytestr(e))
453 )
458 )
454 continue
459 continue
455 # If we don't have a spec and requested a stream clone, we don't know
460 # If we don't have a spec and requested a stream clone, we don't know
456 # what the entry is so don't attempt to apply it.
461 # what the entry is so don't attempt to apply it.
457 elif streamclonerequested:
462 elif streamclonerequested:
458 repo.ui.debug(
463 repo.ui.debug(
459 b'filtering %s because cannot determine if a stream '
464 b'filtering %s because cannot determine if a stream '
460 b'clone bundle\n' % url
465 b'clone bundle\n' % url
461 )
466 )
462 continue
467 continue
463
468
464 if b'REQUIRESNI' in entry and not sslutil.hassni:
469 if b'REQUIRESNI' in entry and not sslutil.hassni:
465 repo.ui.debug(b'filtering %s because SNI not supported\n' % url)
470 repo.ui.debug(b'filtering %s because SNI not supported\n' % url)
466 continue
471 continue
467
472
468 if b'REQUIREDRAM' in entry:
473 if b'REQUIREDRAM' in entry:
469 try:
474 try:
470 requiredram = util.sizetoint(entry[b'REQUIREDRAM'])
475 requiredram = util.sizetoint(entry[b'REQUIREDRAM'])
471 except error.ParseError:
476 except error.ParseError:
472 repo.ui.debug(
477 repo.ui.debug(
473 b'filtering %s due to a bad REQUIREDRAM attribute\n' % url
478 b'filtering %s due to a bad REQUIREDRAM attribute\n' % url
474 )
479 )
475 continue
480 continue
476 actualram = repo.ui.estimatememory()
481 actualram = repo.ui.estimatememory()
477 if actualram is not None and actualram * 0.66 < requiredram:
482 if actualram is not None and actualram * 0.66 < requiredram:
478 repo.ui.debug(
483 repo.ui.debug(
479 b'filtering %s as it needs more than 2/3 of system memory\n'
484 b'filtering %s as it needs more than 2/3 of system memory\n'
480 % url
485 % url
481 )
486 )
482 continue
487 continue
483
488
489 if b'DIGEST' in entry:
490 if not digest_regex.match(entry[b'DIGEST']):
491 repo.ui.debug(
492 b'filtering %s due to a bad DIGEST attribute\n' % url
493 )
494 continue
495 supported = 0
496 seen = {}
497 for digest_entry in entry[b'DIGEST'].split(b','):
498 algo, digest = digest_entry.split(b':')
499 if algo not in seen:
500 seen[algo] = digest
501 elif seen[algo] != digest:
502 repo.ui.debug(
503 b'filtering %s due to conflicting %s digests\n'
504 % (url, algo)
505 )
506 supported = 0
507 break
508 digester = urlmod.digesthandler.digest_algorithms.get(algo)
509 if digester is None:
510 continue
511 if len(digest) != digester().digest_size * 2:
512 repo.ui.debug(
513 b'filtering %s due to a bad %s digest\n' % (url, algo)
514 )
515 supported = 0
516 break
517 supported += 1
518 else:
519 if supported == 0:
520 repo.ui.debug(
521 b'filtering %s due to lack of supported digest\n' % url
522 )
523 if supported == 0:
524 continue
525
484 newentries.append(entry)
526 newentries.append(entry)
485
527
486 return newentries
528 return newentries
487
529
488
530
489 class clonebundleentry:
531 class clonebundleentry:
490 """Represents an item in a clone bundles manifest.
532 """Represents an item in a clone bundles manifest.
491
533
492 This rich class is needed to support sorting since sorted() in Python 3
534 This rich class is needed to support sorting since sorted() in Python 3
493 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
535 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
494 won't work.
536 won't work.
495 """
537 """
496
538
497 def __init__(self, value, prefers):
539 def __init__(self, value, prefers):
498 self.value = value
540 self.value = value
499 self.prefers = prefers
541 self.prefers = prefers
500
542
501 def _cmp(self, other):
543 def _cmp(self, other):
502 for prefkey, prefvalue in self.prefers:
544 for prefkey, prefvalue in self.prefers:
503 avalue = self.value.get(prefkey)
545 avalue = self.value.get(prefkey)
504 bvalue = other.value.get(prefkey)
546 bvalue = other.value.get(prefkey)
505
547
506 # Special case for b missing attribute and a matches exactly.
548 # Special case for b missing attribute and a matches exactly.
507 if avalue is not None and bvalue is None and avalue == prefvalue:
549 if avalue is not None and bvalue is None and avalue == prefvalue:
508 return -1
550 return -1
509
551
510 # Special case for a missing attribute and b matches exactly.
552 # Special case for a missing attribute and b matches exactly.
511 if bvalue is not None and avalue is None and bvalue == prefvalue:
553 if bvalue is not None and avalue is None and bvalue == prefvalue:
512 return 1
554 return 1
513
555
514 # We can't compare unless attribute present on both.
556 # We can't compare unless attribute present on both.
515 if avalue is None or bvalue is None:
557 if avalue is None or bvalue is None:
516 continue
558 continue
517
559
518 # Same values should fall back to next attribute.
560 # Same values should fall back to next attribute.
519 if avalue == bvalue:
561 if avalue == bvalue:
520 continue
562 continue
521
563
522 # Exact matches come first.
564 # Exact matches come first.
523 if avalue == prefvalue:
565 if avalue == prefvalue:
524 return -1
566 return -1
525 if bvalue == prefvalue:
567 if bvalue == prefvalue:
526 return 1
568 return 1
527
569
528 # Fall back to next attribute.
570 # Fall back to next attribute.
529 continue
571 continue
530
572
531 # If we got here we couldn't sort by attributes and prefers. Fall
573 # If we got here we couldn't sort by attributes and prefers. Fall
532 # back to index order.
574 # back to index order.
533 return 0
575 return 0
534
576
535 def __lt__(self, other):
577 def __lt__(self, other):
536 return self._cmp(other) < 0
578 return self._cmp(other) < 0
537
579
538 def __gt__(self, other):
580 def __gt__(self, other):
539 return self._cmp(other) > 0
581 return self._cmp(other) > 0
540
582
541 def __eq__(self, other):
583 def __eq__(self, other):
542 return self._cmp(other) == 0
584 return self._cmp(other) == 0
543
585
544 def __le__(self, other):
586 def __le__(self, other):
545 return self._cmp(other) <= 0
587 return self._cmp(other) <= 0
546
588
547 def __ge__(self, other):
589 def __ge__(self, other):
548 return self._cmp(other) >= 0
590 return self._cmp(other) >= 0
549
591
550 def __ne__(self, other):
592 def __ne__(self, other):
551 return self._cmp(other) != 0
593 return self._cmp(other) != 0
552
594
553
595
554 def sortclonebundleentries(ui, entries):
596 def sortclonebundleentries(ui, entries):
555 prefers = ui.configlist(b'ui', b'clonebundleprefers')
597 prefers = ui.configlist(b'ui', b'clonebundleprefers')
556 if not prefers:
598 if not prefers:
557 return list(entries)
599 return list(entries)
558
600
559 def _split(p):
601 def _split(p):
560 if b'=' not in p:
602 if b'=' not in p:
561 hint = _(b"each comma separated item should be key=value pairs")
603 hint = _(b"each comma separated item should be key=value pairs")
562 raise error.Abort(
604 raise error.Abort(
563 _(b"invalid ui.clonebundleprefers item: %s") % p, hint=hint
605 _(b"invalid ui.clonebundleprefers item: %s") % p, hint=hint
564 )
606 )
565 return p.split(b'=', 1)
607 return p.split(b'=', 1)
566
608
567 prefers = [_split(p) for p in prefers]
609 prefers = [_split(p) for p in prefers]
568
610
569 items = sorted(clonebundleentry(v, prefers) for v in entries)
611 items = sorted(clonebundleentry(v, prefers) for v in entries)
570 return [i.value for i in items]
612 return [i.value for i in items]
@@ -1,2959 +1,2974
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import annotations
8 from __future__ import annotations
9
9
10 import collections
10 import collections
11 import weakref
11 import weakref
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 hex,
15 hex,
16 nullrev,
16 nullrev,
17 )
17 )
18 from . import (
18 from . import (
19 bookmarks as bookmod,
19 bookmarks as bookmod,
20 bundle2,
20 bundle2,
21 bundlecaches,
21 bundlecaches,
22 changegroup,
22 changegroup,
23 discovery,
23 discovery,
24 error,
24 error,
25 lock as lockmod,
25 lock as lockmod,
26 logexchange,
26 logexchange,
27 narrowspec,
27 narrowspec,
28 obsolete,
28 obsolete,
29 obsutil,
29 obsutil,
30 phases,
30 phases,
31 pushkey,
31 pushkey,
32 pycompat,
32 pycompat,
33 requirements,
33 requirements,
34 scmutil,
34 scmutil,
35 streamclone,
35 streamclone,
36 url as urlmod,
36 url as urlmod,
37 util,
37 util,
38 wireprototypes,
38 wireprototypes,
39 )
39 )
40 from .utils import (
40 from .utils import (
41 hashutil,
41 hashutil,
42 stringutil,
42 stringutil,
43 urlutil,
43 urlutil,
44 )
44 )
45 from .interfaces import repository
45 from .interfaces import repository
46
46
47 urlerr = util.urlerr
47 urlerr = util.urlerr
48 urlreq = util.urlreq
48 urlreq = util.urlreq
49
49
50 _NARROWACL_SECTION = b'narrowacl'
50 _NARROWACL_SECTION = b'narrowacl'
51
51
52
52
53 def readbundle(ui, fh, fname, vfs=None):
53 def readbundle(ui, fh, fname, vfs=None):
54 header = changegroup.readexactly(fh, 4)
54 header = changegroup.readexactly(fh, 4)
55
55
56 alg = None
56 alg = None
57 if not fname:
57 if not fname:
58 fname = b"stream"
58 fname = b"stream"
59 if not header.startswith(b'HG') and header.startswith(b'\0'):
59 if not header.startswith(b'HG') and header.startswith(b'\0'):
60 fh = changegroup.headerlessfixup(fh, header)
60 fh = changegroup.headerlessfixup(fh, header)
61 header = b"HG10"
61 header = b"HG10"
62 alg = b'UN'
62 alg = b'UN'
63 elif vfs:
63 elif vfs:
64 fname = vfs.join(fname)
64 fname = vfs.join(fname)
65
65
66 magic, version = header[0:2], header[2:4]
66 magic, version = header[0:2], header[2:4]
67
67
68 if magic != b'HG':
68 if magic != b'HG':
69 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
69 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
70 if version == b'10':
70 if version == b'10':
71 if alg is None:
71 if alg is None:
72 alg = changegroup.readexactly(fh, 2)
72 alg = changegroup.readexactly(fh, 2)
73 return changegroup.cg1unpacker(fh, alg)
73 return changegroup.cg1unpacker(fh, alg)
74 elif version.startswith(b'2'):
74 elif version.startswith(b'2'):
75 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
75 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
76 elif version == b'S1':
76 elif version == b'S1':
77 return streamclone.streamcloneapplier(fh)
77 return streamclone.streamcloneapplier(fh)
78 else:
78 else:
79 raise error.Abort(
79 raise error.Abort(
80 _(b'%s: unknown bundle version %s') % (fname, version)
80 _(b'%s: unknown bundle version %s') % (fname, version)
81 )
81 )
82
82
83
83
84 def _format_params(params):
84 def _format_params(params):
85 parts = []
85 parts = []
86 for key, value in sorted(params.items()):
86 for key, value in sorted(params.items()):
87 value = urlreq.quote(value)
87 value = urlreq.quote(value)
88 parts.append(b"%s=%s" % (key, value))
88 parts.append(b"%s=%s" % (key, value))
89 return b';'.join(parts)
89 return b';'.join(parts)
90
90
91
91
92 def getbundlespec(ui, fh):
92 def getbundlespec(ui, fh):
93 """Infer the bundlespec from a bundle file handle.
93 """Infer the bundlespec from a bundle file handle.
94
94
95 The input file handle is seeked and the original seek position is not
95 The input file handle is seeked and the original seek position is not
96 restored.
96 restored.
97 """
97 """
98
98
99 def speccompression(alg):
99 def speccompression(alg):
100 try:
100 try:
101 return util.compengines.forbundletype(alg).bundletype()[0]
101 return util.compengines.forbundletype(alg).bundletype()[0]
102 except KeyError:
102 except KeyError:
103 return None
103 return None
104
104
105 params = {}
105 params = {}
106
106
107 b = readbundle(ui, fh, None)
107 b = readbundle(ui, fh, None)
108 if isinstance(b, changegroup.cg1unpacker):
108 if isinstance(b, changegroup.cg1unpacker):
109 alg = b._type
109 alg = b._type
110 if alg == b'_truncatedBZ':
110 if alg == b'_truncatedBZ':
111 alg = b'BZ'
111 alg = b'BZ'
112 comp = speccompression(alg)
112 comp = speccompression(alg)
113 if not comp:
113 if not comp:
114 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
114 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
115 return b'%s-v1' % comp
115 return b'%s-v1' % comp
116 elif isinstance(b, bundle2.unbundle20):
116 elif isinstance(b, bundle2.unbundle20):
117 if b'Compression' in b.params:
117 if b'Compression' in b.params:
118 comp = speccompression(b.params[b'Compression'])
118 comp = speccompression(b.params[b'Compression'])
119 if not comp:
119 if not comp:
120 raise error.Abort(
120 raise error.Abort(
121 _(b'unknown compression algorithm: %s') % comp
121 _(b'unknown compression algorithm: %s') % comp
122 )
122 )
123 else:
123 else:
124 comp = b'none'
124 comp = b'none'
125
125
126 version = None
126 version = None
127 for part in b.iterparts():
127 for part in b.iterparts():
128 if part.type == b'changegroup':
128 if part.type == b'changegroup':
129 cgversion = part.params[b'version']
129 cgversion = part.params[b'version']
130 if cgversion in (b'01', b'02'):
130 if cgversion in (b'01', b'02'):
131 version = b'v2'
131 version = b'v2'
132 elif cgversion in (b'03',):
132 elif cgversion in (b'03',):
133 version = b'v2'
133 version = b'v2'
134 params[b'cg.version'] = cgversion
134 params[b'cg.version'] = cgversion
135 else:
135 else:
136 raise error.Abort(
136 raise error.Abort(
137 _(
137 _(
138 b'changegroup version %s does not have '
138 b'changegroup version %s does not have '
139 b'a known bundlespec'
139 b'a known bundlespec'
140 )
140 )
141 % version,
141 % version,
142 hint=_(b'try upgrading your Mercurial client'),
142 hint=_(b'try upgrading your Mercurial client'),
143 )
143 )
144 elif part.type == b'stream2' and version is None:
144 elif part.type == b'stream2' and version is None:
145 # A stream2 part requires to be part of a v2 bundle
145 # A stream2 part requires to be part of a v2 bundle
146 requirements = urlreq.unquote(part.params[b'requirements'])
146 requirements = urlreq.unquote(part.params[b'requirements'])
147 splitted = requirements.split()
147 splitted = requirements.split()
148 params = bundle2._formatrequirementsparams(splitted)
148 params = bundle2._formatrequirementsparams(splitted)
149 return b'none-v2;stream=v2;%s' % params
149 return b'none-v2;stream=v2;%s' % params
150 elif part.type == b'stream3-exp' and version is None:
150 elif part.type == b'stream3-exp' and version is None:
151 # A stream3 part requires to be part of a v2 bundle
151 # A stream3 part requires to be part of a v2 bundle
152 requirements = urlreq.unquote(part.params[b'requirements'])
152 requirements = urlreq.unquote(part.params[b'requirements'])
153 splitted = requirements.split()
153 splitted = requirements.split()
154 params = bundle2._formatrequirementsparams(splitted)
154 params = bundle2._formatrequirementsparams(splitted)
155 return b'none-v2;stream=v3-exp;%s' % params
155 return b'none-v2;stream=v3-exp;%s' % params
156 elif part.type == b'obsmarkers':
156 elif part.type == b'obsmarkers':
157 params[b'obsolescence'] = b'yes'
157 params[b'obsolescence'] = b'yes'
158 if not part.mandatory:
158 if not part.mandatory:
159 params[b'obsolescence-mandatory'] = b'no'
159 params[b'obsolescence-mandatory'] = b'no'
160
160
161 if not version:
161 if not version:
162 params[b'changegroup'] = b'no'
162 params[b'changegroup'] = b'no'
163 version = b'v2'
163 version = b'v2'
164 spec = b'%s-%s' % (comp, version)
164 spec = b'%s-%s' % (comp, version)
165 if params:
165 if params:
166 spec += b';'
166 spec += b';'
167 spec += _format_params(params)
167 spec += _format_params(params)
168 return spec
168 return spec
169
169
170 elif isinstance(b, streamclone.streamcloneapplier):
170 elif isinstance(b, streamclone.streamcloneapplier):
171 requirements = streamclone.readbundle1header(fh)[2]
171 requirements = streamclone.readbundle1header(fh)[2]
172 formatted = bundle2._formatrequirementsparams(requirements)
172 formatted = bundle2._formatrequirementsparams(requirements)
173 return b'none-packed1;%s' % formatted
173 return b'none-packed1;%s' % formatted
174 else:
174 else:
175 raise error.Abort(_(b'unknown bundle type: %s') % b)
175 raise error.Abort(_(b'unknown bundle type: %s') % b)
176
176
177
177
178 def _computeoutgoing(repo, heads, common):
178 def _computeoutgoing(repo, heads, common):
179 """Computes which revs are outgoing given a set of common
179 """Computes which revs are outgoing given a set of common
180 and a set of heads.
180 and a set of heads.
181
181
182 This is a separate function so extensions can have access to
182 This is a separate function so extensions can have access to
183 the logic.
183 the logic.
184
184
185 Returns a discovery.outgoing object.
185 Returns a discovery.outgoing object.
186 """
186 """
187 cl = repo.changelog
187 cl = repo.changelog
188 if common:
188 if common:
189 hasnode = cl.hasnode
189 hasnode = cl.hasnode
190 common = [n for n in common if hasnode(n)]
190 common = [n for n in common if hasnode(n)]
191 else:
191 else:
192 common = [repo.nullid]
192 common = [repo.nullid]
193 if not heads:
193 if not heads:
194 heads = cl.heads()
194 heads = cl.heads()
195 return discovery.outgoing(repo, common, heads)
195 return discovery.outgoing(repo, common, heads)
196
196
197
197
198 def _checkpublish(pushop):
198 def _checkpublish(pushop):
199 repo = pushop.repo
199 repo = pushop.repo
200 ui = repo.ui
200 ui = repo.ui
201 behavior = ui.config(b'experimental', b'auto-publish')
201 behavior = ui.config(b'experimental', b'auto-publish')
202 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
202 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
203 return
203 return
204 remotephases = listkeys(pushop.remote, b'phases')
204 remotephases = listkeys(pushop.remote, b'phases')
205 if not remotephases.get(b'publishing', False):
205 if not remotephases.get(b'publishing', False):
206 return
206 return
207
207
208 if pushop.revs is None:
208 if pushop.revs is None:
209 published = repo.filtered(b'served').revs(b'not public()')
209 published = repo.filtered(b'served').revs(b'not public()')
210 else:
210 else:
211 published = repo.revs(b'::%ln - public()', pushop.revs)
211 published = repo.revs(b'::%ln - public()', pushop.revs)
212 # we want to use pushop.revs in the revset even if they themselves are
212 # we want to use pushop.revs in the revset even if they themselves are
213 # secret, but we don't want to have anything that the server won't see
213 # secret, but we don't want to have anything that the server won't see
214 # in the result of this expression
214 # in the result of this expression
215 published &= repo.filtered(b'served')
215 published &= repo.filtered(b'served')
216 if published:
216 if published:
217 if behavior == b'warn':
217 if behavior == b'warn':
218 ui.warn(
218 ui.warn(
219 _(b'%i changesets about to be published\n') % len(published)
219 _(b'%i changesets about to be published\n') % len(published)
220 )
220 )
221 elif behavior == b'confirm':
221 elif behavior == b'confirm':
222 if ui.promptchoice(
222 if ui.promptchoice(
223 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
223 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
224 % len(published)
224 % len(published)
225 ):
225 ):
226 raise error.CanceledError(_(b'user quit'))
226 raise error.CanceledError(_(b'user quit'))
227 elif behavior == b'abort':
227 elif behavior == b'abort':
228 msg = _(b'push would publish %i changesets') % len(published)
228 msg = _(b'push would publish %i changesets') % len(published)
229 hint = _(
229 hint = _(
230 b"use --publish or adjust 'experimental.auto-publish'"
230 b"use --publish or adjust 'experimental.auto-publish'"
231 b" config"
231 b" config"
232 )
232 )
233 raise error.Abort(msg, hint=hint)
233 raise error.Abort(msg, hint=hint)
234
234
235
235
236 def _forcebundle1(op):
236 def _forcebundle1(op):
237 """return true if a pull/push must use bundle1
237 """return true if a pull/push must use bundle1
238
238
239 This function is used to allow testing of the older bundle version"""
239 This function is used to allow testing of the older bundle version"""
240 ui = op.repo.ui
240 ui = op.repo.ui
241 # The goal is this config is to allow developer to choose the bundle
241 # The goal is this config is to allow developer to choose the bundle
242 # version used during exchanged. This is especially handy during test.
242 # version used during exchanged. This is especially handy during test.
243 # Value is a list of bundle version to be picked from, highest version
243 # Value is a list of bundle version to be picked from, highest version
244 # should be used.
244 # should be used.
245 #
245 #
246 # developer config: devel.legacy.exchange
246 # developer config: devel.legacy.exchange
247 exchange = ui.configlist(b'devel', b'legacy.exchange')
247 exchange = ui.configlist(b'devel', b'legacy.exchange')
248 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
248 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
249 return forcebundle1 or not op.remote.capable(b'bundle2')
249 return forcebundle1 or not op.remote.capable(b'bundle2')
250
250
251
251
252 class pushoperation:
252 class pushoperation:
253 """A object that represent a single push operation
253 """A object that represent a single push operation
254
254
255 Its purpose is to carry push related state and very common operations.
255 Its purpose is to carry push related state and very common operations.
256
256
257 A new pushoperation should be created at the beginning of each push and
257 A new pushoperation should be created at the beginning of each push and
258 discarded afterward.
258 discarded afterward.
259 """
259 """
260
260
261 def __init__(
261 def __init__(
262 self,
262 self,
263 repo,
263 repo,
264 remote,
264 remote,
265 force=False,
265 force=False,
266 revs=None,
266 revs=None,
267 newbranch=False,
267 newbranch=False,
268 bookmarks=(),
268 bookmarks=(),
269 publish=False,
269 publish=False,
270 pushvars=None,
270 pushvars=None,
271 ):
271 ):
272 # repo we push from
272 # repo we push from
273 self.repo = repo
273 self.repo = repo
274 self.ui = repo.ui
274 self.ui = repo.ui
275 # repo we push to
275 # repo we push to
276 self.remote = remote
276 self.remote = remote
277 # force option provided
277 # force option provided
278 self.force = force
278 self.force = force
279 # revs to be pushed (None is "all")
279 # revs to be pushed (None is "all")
280 self.revs = revs
280 self.revs = revs
281 # bookmark explicitly pushed
281 # bookmark explicitly pushed
282 self.bookmarks = bookmarks
282 self.bookmarks = bookmarks
283 # allow push of new branch
283 # allow push of new branch
284 self.newbranch = newbranch
284 self.newbranch = newbranch
285 # step already performed
285 # step already performed
286 # (used to check what steps have been already performed through bundle2)
286 # (used to check what steps have been already performed through bundle2)
287 self.stepsdone = set()
287 self.stepsdone = set()
288 # Integer version of the changegroup push result
288 # Integer version of the changegroup push result
289 # - None means nothing to push
289 # - None means nothing to push
290 # - 0 means HTTP error
290 # - 0 means HTTP error
291 # - 1 means we pushed and remote head count is unchanged *or*
291 # - 1 means we pushed and remote head count is unchanged *or*
292 # we have outgoing changesets but refused to push
292 # we have outgoing changesets but refused to push
293 # - other values as described by addchangegroup()
293 # - other values as described by addchangegroup()
294 self.cgresult = None
294 self.cgresult = None
295 # Boolean value for the bookmark push
295 # Boolean value for the bookmark push
296 self.bkresult = None
296 self.bkresult = None
297 # discover.outgoing object (contains common and outgoing data)
297 # discover.outgoing object (contains common and outgoing data)
298 self.outgoing = None
298 self.outgoing = None
299 # all remote topological heads before the push
299 # all remote topological heads before the push
300 self.remoteheads = None
300 self.remoteheads = None
301 # Details of the remote branch pre and post push
301 # Details of the remote branch pre and post push
302 #
302 #
303 # mapping: {'branch': ([remoteheads],
303 # mapping: {'branch': ([remoteheads],
304 # [newheads],
304 # [newheads],
305 # [unsyncedheads],
305 # [unsyncedheads],
306 # [discardedheads])}
306 # [discardedheads])}
307 # - branch: the branch name
307 # - branch: the branch name
308 # - remoteheads: the list of remote heads known locally
308 # - remoteheads: the list of remote heads known locally
309 # None if the branch is new
309 # None if the branch is new
310 # - newheads: the new remote heads (known locally) with outgoing pushed
310 # - newheads: the new remote heads (known locally) with outgoing pushed
311 # - unsyncedheads: the list of remote heads unknown locally.
311 # - unsyncedheads: the list of remote heads unknown locally.
312 # - discardedheads: the list of remote heads made obsolete by the push
312 # - discardedheads: the list of remote heads made obsolete by the push
313 self.pushbranchmap = None
313 self.pushbranchmap = None
314 # testable as a boolean indicating if any nodes are missing locally.
314 # testable as a boolean indicating if any nodes are missing locally.
315 self.incoming = None
315 self.incoming = None
316 # summary of the remote phase situation
316 # summary of the remote phase situation
317 self.remotephases = None
317 self.remotephases = None
318 # phases changes that must be pushed along side the changesets
318 # phases changes that must be pushed along side the changesets
319 self.outdatedphases = None
319 self.outdatedphases = None
320 # phases changes that must be pushed if changeset push fails
320 # phases changes that must be pushed if changeset push fails
321 self.fallbackoutdatedphases = None
321 self.fallbackoutdatedphases = None
322 # outgoing obsmarkers
322 # outgoing obsmarkers
323 self.outobsmarkers = set()
323 self.outobsmarkers = set()
324 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
324 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
325 self.outbookmarks = []
325 self.outbookmarks = []
326 # transaction manager
326 # transaction manager
327 self.trmanager = None
327 self.trmanager = None
328 # map { pushkey partid -> callback handling failure}
328 # map { pushkey partid -> callback handling failure}
329 # used to handle exception from mandatory pushkey part failure
329 # used to handle exception from mandatory pushkey part failure
330 self.pkfailcb = {}
330 self.pkfailcb = {}
331 # an iterable of pushvars or None
331 # an iterable of pushvars or None
332 self.pushvars = pushvars
332 self.pushvars = pushvars
333 # publish pushed changesets
333 # publish pushed changesets
334 self.publish = publish
334 self.publish = publish
335
335
336 @util.propertycache
336 @util.propertycache
337 def futureheads(self):
337 def futureheads(self):
338 """future remote heads if the changeset push succeeds"""
338 """future remote heads if the changeset push succeeds"""
339 return self.outgoing.ancestorsof
339 return self.outgoing.ancestorsof
340
340
341 @util.propertycache
341 @util.propertycache
342 def fallbackheads(self):
342 def fallbackheads(self):
343 """future remote heads if the changeset push fails"""
343 """future remote heads if the changeset push fails"""
344 if self.revs is None:
344 if self.revs is None:
345 # not target to push, all common are relevant
345 # not target to push, all common are relevant
346 return self.outgoing.commonheads
346 return self.outgoing.commonheads
347 unfi = self.repo.unfiltered()
347 unfi = self.repo.unfiltered()
348 # I want cheads = heads(::push_heads and ::commonheads)
348 # I want cheads = heads(::push_heads and ::commonheads)
349 #
349 #
350 # To push, we already computed
350 # To push, we already computed
351 # common = (::commonheads)
351 # common = (::commonheads)
352 # missing = ((commonheads::push_heads) - commonheads)
352 # missing = ((commonheads::push_heads) - commonheads)
353 #
353 #
354 # So we basically search
354 # So we basically search
355 #
355 #
356 # almost_heads = heads((parents(missing) + push_heads) & common)
356 # almost_heads = heads((parents(missing) + push_heads) & common)
357 #
357 #
358 # We use "almost" here as this can return revision that are ancestors
358 # We use "almost" here as this can return revision that are ancestors
359 # of other in the set and we need to explicitly turn it into an
359 # of other in the set and we need to explicitly turn it into an
360 # antichain later. We can do so using:
360 # antichain later. We can do so using:
361 #
361 #
362 # cheads = heads(almost_heads::almost_heads)
362 # cheads = heads(almost_heads::almost_heads)
363 #
363 #
364 # In pratice the code is a bit more convulted to avoid some extra
364 # In pratice the code is a bit more convulted to avoid some extra
365 # computation. It aims at doing the same computation as highlighted
365 # computation. It aims at doing the same computation as highlighted
366 # above however.
366 # above however.
367 common = self.outgoing.common
367 common = self.outgoing.common
368 unfi = self.repo.unfiltered()
368 unfi = self.repo.unfiltered()
369 cl = unfi.changelog
369 cl = unfi.changelog
370 to_rev = cl.index.rev
370 to_rev = cl.index.rev
371 to_node = cl.node
371 to_node = cl.node
372 parent_revs = cl.parentrevs
372 parent_revs = cl.parentrevs
373 unselected = []
373 unselected = []
374 cheads = set()
374 cheads = set()
375 # XXX-perf: `self.revs` and `outgoing.missing` could hold revs directly
375 # XXX-perf: `self.revs` and `outgoing.missing` could hold revs directly
376 for n in self.revs:
376 for n in self.revs:
377 r = to_rev(n)
377 r = to_rev(n)
378 if r in common:
378 if r in common:
379 cheads.add(r)
379 cheads.add(r)
380 else:
380 else:
381 unselected.append(r)
381 unselected.append(r)
382 known_non_heads = cl.ancestors(cheads, inclusive=True)
382 known_non_heads = cl.ancestors(cheads, inclusive=True)
383 if unselected:
383 if unselected:
384 missing_revs = {to_rev(n) for n in self.outgoing.missing}
384 missing_revs = {to_rev(n) for n in self.outgoing.missing}
385 missing_revs.add(nullrev)
385 missing_revs.add(nullrev)
386 root_points = set()
386 root_points = set()
387 for r in missing_revs:
387 for r in missing_revs:
388 p1, p2 = parent_revs(r)
388 p1, p2 = parent_revs(r)
389 if p1 not in missing_revs and p1 not in known_non_heads:
389 if p1 not in missing_revs and p1 not in known_non_heads:
390 root_points.add(p1)
390 root_points.add(p1)
391 if p2 not in missing_revs and p2 not in known_non_heads:
391 if p2 not in missing_revs and p2 not in known_non_heads:
392 root_points.add(p2)
392 root_points.add(p2)
393 if root_points:
393 if root_points:
394 heads = unfi.revs('heads(%ld::%ld)', root_points, root_points)
394 heads = unfi.revs('heads(%ld::%ld)', root_points, root_points)
395 cheads.update(heads)
395 cheads.update(heads)
396 # XXX-perf: could this be a set of revision?
396 # XXX-perf: could this be a set of revision?
397 return [to_node(r) for r in sorted(cheads)]
397 return [to_node(r) for r in sorted(cheads)]
398
398
399 @property
399 @property
400 def commonheads(self):
400 def commonheads(self):
401 """set of all common heads after changeset bundle push"""
401 """set of all common heads after changeset bundle push"""
402 if self.cgresult:
402 if self.cgresult:
403 return self.futureheads
403 return self.futureheads
404 else:
404 else:
405 return self.fallbackheads
405 return self.fallbackheads
406
406
407
407
408 # mapping of message used when pushing bookmark
408 # mapping of message used when pushing bookmark
409 bookmsgmap = {
409 bookmsgmap = {
410 b'update': (
410 b'update': (
411 _(b"updating bookmark %s\n"),
411 _(b"updating bookmark %s\n"),
412 _(b'updating bookmark %s failed\n'),
412 _(b'updating bookmark %s failed\n'),
413 ),
413 ),
414 b'export': (
414 b'export': (
415 _(b"exporting bookmark %s\n"),
415 _(b"exporting bookmark %s\n"),
416 _(b'exporting bookmark %s failed\n'),
416 _(b'exporting bookmark %s failed\n'),
417 ),
417 ),
418 b'delete': (
418 b'delete': (
419 _(b"deleting remote bookmark %s\n"),
419 _(b"deleting remote bookmark %s\n"),
420 _(b'deleting remote bookmark %s failed\n'),
420 _(b'deleting remote bookmark %s failed\n'),
421 ),
421 ),
422 }
422 }
423
423
424
424
425 def push(
425 def push(
426 repo,
426 repo,
427 remote,
427 remote,
428 force=False,
428 force=False,
429 revs=None,
429 revs=None,
430 newbranch=False,
430 newbranch=False,
431 bookmarks=(),
431 bookmarks=(),
432 publish=False,
432 publish=False,
433 opargs=None,
433 opargs=None,
434 ):
434 ):
435 """Push outgoing changesets (limited by revs) from a local
435 """Push outgoing changesets (limited by revs) from a local
436 repository to remote. Return an integer:
436 repository to remote. Return an integer:
437 - None means nothing to push
437 - None means nothing to push
438 - 0 means HTTP error
438 - 0 means HTTP error
439 - 1 means we pushed and remote head count is unchanged *or*
439 - 1 means we pushed and remote head count is unchanged *or*
440 we have outgoing changesets but refused to push
440 we have outgoing changesets but refused to push
441 - other values as described by addchangegroup()
441 - other values as described by addchangegroup()
442 """
442 """
443 if opargs is None:
443 if opargs is None:
444 opargs = {}
444 opargs = {}
445 pushop = pushoperation(
445 pushop = pushoperation(
446 repo,
446 repo,
447 remote,
447 remote,
448 force,
448 force,
449 revs,
449 revs,
450 newbranch,
450 newbranch,
451 bookmarks,
451 bookmarks,
452 publish,
452 publish,
453 **pycompat.strkwargs(opargs),
453 **pycompat.strkwargs(opargs),
454 )
454 )
455 if pushop.remote.local():
455 if pushop.remote.local():
456 missing = (
456 missing = (
457 set(pushop.repo.requirements) - pushop.remote.local().supported
457 set(pushop.repo.requirements) - pushop.remote.local().supported
458 )
458 )
459 if missing:
459 if missing:
460 msg = _(
460 msg = _(
461 b"required features are not"
461 b"required features are not"
462 b" supported in the destination:"
462 b" supported in the destination:"
463 b" %s"
463 b" %s"
464 ) % (b', '.join(sorted(missing)))
464 ) % (b', '.join(sorted(missing)))
465 raise error.Abort(msg)
465 raise error.Abort(msg)
466
466
467 if not pushop.remote.canpush():
467 if not pushop.remote.canpush():
468 raise error.Abort(_(b"destination does not support push"))
468 raise error.Abort(_(b"destination does not support push"))
469
469
470 if not pushop.remote.capable(b'unbundle'):
470 if not pushop.remote.capable(b'unbundle'):
471 raise error.Abort(
471 raise error.Abort(
472 _(
472 _(
473 b'cannot push: destination does not support the '
473 b'cannot push: destination does not support the '
474 b'unbundle wire protocol command'
474 b'unbundle wire protocol command'
475 )
475 )
476 )
476 )
477 for category in sorted(bundle2.read_remote_wanted_sidedata(pushop.remote)):
477 for category in sorted(bundle2.read_remote_wanted_sidedata(pushop.remote)):
478 # Check that a computer is registered for that category for at least
478 # Check that a computer is registered for that category for at least
479 # one revlog kind.
479 # one revlog kind.
480 for kind, computers in repo._sidedata_computers.items():
480 for kind, computers in repo._sidedata_computers.items():
481 if computers.get(category):
481 if computers.get(category):
482 break
482 break
483 else:
483 else:
484 raise error.Abort(
484 raise error.Abort(
485 _(
485 _(
486 b'cannot push: required sidedata category not supported'
486 b'cannot push: required sidedata category not supported'
487 b" by this client: '%s'"
487 b" by this client: '%s'"
488 )
488 )
489 % pycompat.bytestr(category)
489 % pycompat.bytestr(category)
490 )
490 )
491 # get lock as we might write phase data
491 # get lock as we might write phase data
492 wlock = lock = None
492 wlock = lock = None
493 try:
493 try:
494 try:
494 try:
495 # bundle2 push may receive a reply bundle touching bookmarks
495 # bundle2 push may receive a reply bundle touching bookmarks
496 # requiring the wlock. Take it now to ensure proper ordering.
496 # requiring the wlock. Take it now to ensure proper ordering.
497 maypushback = pushop.ui.configbool(
497 maypushback = pushop.ui.configbool(
498 b'experimental',
498 b'experimental',
499 b'bundle2.pushback',
499 b'bundle2.pushback',
500 )
500 )
501 if (
501 if (
502 (not _forcebundle1(pushop))
502 (not _forcebundle1(pushop))
503 and maypushback
503 and maypushback
504 and not bookmod.bookmarksinstore(repo)
504 and not bookmod.bookmarksinstore(repo)
505 ):
505 ):
506 wlock = pushop.repo.wlock()
506 wlock = pushop.repo.wlock()
507 lock = pushop.repo.lock()
507 lock = pushop.repo.lock()
508 pushop.trmanager = transactionmanager(
508 pushop.trmanager = transactionmanager(
509 pushop.repo, b'push-response', pushop.remote.url()
509 pushop.repo, b'push-response', pushop.remote.url()
510 )
510 )
511 except error.LockUnavailable as err:
511 except error.LockUnavailable as err:
512 # source repo cannot be locked.
512 # source repo cannot be locked.
513 # We do not abort the push, but just disable the local phase
513 # We do not abort the push, but just disable the local phase
514 # synchronisation.
514 # synchronisation.
515 msg = b'cannot lock source repository: %s\n'
515 msg = b'cannot lock source repository: %s\n'
516 msg %= stringutil.forcebytestr(err)
516 msg %= stringutil.forcebytestr(err)
517 pushop.ui.debug(msg)
517 pushop.ui.debug(msg)
518
518
519 pushop.repo.checkpush(pushop)
519 pushop.repo.checkpush(pushop)
520 _checkpublish(pushop)
520 _checkpublish(pushop)
521 _pushdiscovery(pushop)
521 _pushdiscovery(pushop)
522 if not pushop.force:
522 if not pushop.force:
523 _checksubrepostate(pushop)
523 _checksubrepostate(pushop)
524 if not _forcebundle1(pushop):
524 if not _forcebundle1(pushop):
525 _pushbundle2(pushop)
525 _pushbundle2(pushop)
526 _pushchangeset(pushop)
526 _pushchangeset(pushop)
527 _pushsyncphase(pushop)
527 _pushsyncphase(pushop)
528 _pushobsolete(pushop)
528 _pushobsolete(pushop)
529 _pushbookmark(pushop)
529 _pushbookmark(pushop)
530 if pushop.trmanager is not None:
530 if pushop.trmanager is not None:
531 pushop.trmanager.close()
531 pushop.trmanager.close()
532 finally:
532 finally:
533 lockmod.release(pushop.trmanager, lock, wlock)
533 lockmod.release(pushop.trmanager, lock, wlock)
534
534
535 if repo.ui.configbool(b'experimental', b'remotenames'):
535 if repo.ui.configbool(b'experimental', b'remotenames'):
536 logexchange.pullremotenames(repo, remote)
536 logexchange.pullremotenames(repo, remote)
537
537
538 return pushop
538 return pushop
539
539
540
540
541 # list of steps to perform discovery before push
541 # list of steps to perform discovery before push
542 pushdiscoveryorder = []
542 pushdiscoveryorder = []
543
543
544 # Mapping between step name and function
544 # Mapping between step name and function
545 #
545 #
546 # This exists to help extensions wrap steps if necessary
546 # This exists to help extensions wrap steps if necessary
547 pushdiscoverymapping = {}
547 pushdiscoverymapping = {}
548
548
549
549
550 def pushdiscovery(stepname):
550 def pushdiscovery(stepname):
551 """decorator for function performing discovery before push
551 """decorator for function performing discovery before push
552
552
553 The function is added to the step -> function mapping and appended to the
553 The function is added to the step -> function mapping and appended to the
554 list of steps. Beware that decorated function will be added in order (this
554 list of steps. Beware that decorated function will be added in order (this
555 may matter).
555 may matter).
556
556
557 You can only use this decorator for a new step, if you want to wrap a step
557 You can only use this decorator for a new step, if you want to wrap a step
558 from an extension, change the pushdiscovery dictionary directly."""
558 from an extension, change the pushdiscovery dictionary directly."""
559
559
560 def dec(func):
560 def dec(func):
561 assert stepname not in pushdiscoverymapping
561 assert stepname not in pushdiscoverymapping
562 pushdiscoverymapping[stepname] = func
562 pushdiscoverymapping[stepname] = func
563 pushdiscoveryorder.append(stepname)
563 pushdiscoveryorder.append(stepname)
564 return func
564 return func
565
565
566 return dec
566 return dec
567
567
568
568
569 def _pushdiscovery(pushop):
569 def _pushdiscovery(pushop):
570 """Run all discovery steps"""
570 """Run all discovery steps"""
571 for stepname in pushdiscoveryorder:
571 for stepname in pushdiscoveryorder:
572 step = pushdiscoverymapping[stepname]
572 step = pushdiscoverymapping[stepname]
573 step(pushop)
573 step(pushop)
574
574
575
575
576 def _checksubrepostate(pushop):
576 def _checksubrepostate(pushop):
577 """Ensure all outgoing referenced subrepo revisions are present locally"""
577 """Ensure all outgoing referenced subrepo revisions are present locally"""
578
578
579 repo = pushop.repo
579 repo = pushop.repo
580
580
581 # If the repository does not use subrepos, skip the expensive
581 # If the repository does not use subrepos, skip the expensive
582 # manifest checks.
582 # manifest checks.
583 if not len(repo.file(b'.hgsub')) or not len(repo.file(b'.hgsubstate')):
583 if not len(repo.file(b'.hgsub')) or not len(repo.file(b'.hgsubstate')):
584 return
584 return
585
585
586 for n in pushop.outgoing.missing:
586 for n in pushop.outgoing.missing:
587 ctx = repo[n]
587 ctx = repo[n]
588
588
589 if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files():
589 if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files():
590 for subpath in sorted(ctx.substate):
590 for subpath in sorted(ctx.substate):
591 sub = ctx.sub(subpath)
591 sub = ctx.sub(subpath)
592 sub.verify(onpush=True)
592 sub.verify(onpush=True)
593
593
594
594
595 @pushdiscovery(b'changeset')
595 @pushdiscovery(b'changeset')
596 def _pushdiscoverychangeset(pushop):
596 def _pushdiscoverychangeset(pushop):
597 """discover the changeset that need to be pushed"""
597 """discover the changeset that need to be pushed"""
598 fci = discovery.findcommonincoming
598 fci = discovery.findcommonincoming
599 if pushop.revs:
599 if pushop.revs:
600 commoninc = fci(
600 commoninc = fci(
601 pushop.repo,
601 pushop.repo,
602 pushop.remote,
602 pushop.remote,
603 force=pushop.force,
603 force=pushop.force,
604 ancestorsof=pushop.revs,
604 ancestorsof=pushop.revs,
605 )
605 )
606 else:
606 else:
607 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
607 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
608 common, inc, remoteheads = commoninc
608 common, inc, remoteheads = commoninc
609 fco = discovery.findcommonoutgoing
609 fco = discovery.findcommonoutgoing
610 outgoing = fco(
610 outgoing = fco(
611 pushop.repo,
611 pushop.repo,
612 pushop.remote,
612 pushop.remote,
613 onlyheads=pushop.revs,
613 onlyheads=pushop.revs,
614 commoninc=commoninc,
614 commoninc=commoninc,
615 force=pushop.force,
615 force=pushop.force,
616 )
616 )
617 pushop.outgoing = outgoing
617 pushop.outgoing = outgoing
618 pushop.remoteheads = remoteheads
618 pushop.remoteheads = remoteheads
619 pushop.incoming = inc
619 pushop.incoming = inc
620
620
621
621
622 @pushdiscovery(b'phase')
622 @pushdiscovery(b'phase')
623 def _pushdiscoveryphase(pushop):
623 def _pushdiscoveryphase(pushop):
624 """discover the phase that needs to be pushed
624 """discover the phase that needs to be pushed
625
625
626 (computed for both success and failure case for changesets push)"""
626 (computed for both success and failure case for changesets push)"""
627 outgoing = pushop.outgoing
627 outgoing = pushop.outgoing
628 repo = pushop.repo
628 repo = pushop.repo
629 unfi = repo.unfiltered()
629 unfi = repo.unfiltered()
630 cl = unfi.changelog
630 cl = unfi.changelog
631 to_rev = cl.index.rev
631 to_rev = cl.index.rev
632 remotephases = listkeys(pushop.remote, b'phases')
632 remotephases = listkeys(pushop.remote, b'phases')
633
633
634 if (
634 if (
635 pushop.ui.configbool(b'ui', b'_usedassubrepo')
635 pushop.ui.configbool(b'ui', b'_usedassubrepo')
636 and remotephases # server supports phases
636 and remotephases # server supports phases
637 and not pushop.outgoing.missing # no changesets to be pushed
637 and not pushop.outgoing.missing # no changesets to be pushed
638 and remotephases.get(b'publishing', False)
638 and remotephases.get(b'publishing', False)
639 ):
639 ):
640 # When:
640 # When:
641 # - this is a subrepo push
641 # - this is a subrepo push
642 # - and remote support phase
642 # - and remote support phase
643 # - and no changeset are to be pushed
643 # - and no changeset are to be pushed
644 # - and remote is publishing
644 # - and remote is publishing
645 # We may be in issue 3781 case!
645 # We may be in issue 3781 case!
646 # We drop the possible phase synchronisation done by
646 # We drop the possible phase synchronisation done by
647 # courtesy to publish changesets possibly locally draft
647 # courtesy to publish changesets possibly locally draft
648 # on the remote.
648 # on the remote.
649 pushop.outdatedphases = []
649 pushop.outdatedphases = []
650 pushop.fallbackoutdatedphases = []
650 pushop.fallbackoutdatedphases = []
651 return
651 return
652
652
653 fallbackheads_rev = {to_rev(n) for n in pushop.fallbackheads}
653 fallbackheads_rev = {to_rev(n) for n in pushop.fallbackheads}
654 pushop.remotephases = phases.RemotePhasesSummary(
654 pushop.remotephases = phases.RemotePhasesSummary(
655 pushop.repo,
655 pushop.repo,
656 fallbackheads_rev,
656 fallbackheads_rev,
657 remotephases,
657 remotephases,
658 )
658 )
659 droots = set(pushop.remotephases.draft_roots)
659 droots = set(pushop.remotephases.draft_roots)
660
660
661 fallback_publishing = pushop.remotephases.publishing
661 fallback_publishing = pushop.remotephases.publishing
662 push_publishing = pushop.remotephases.publishing or pushop.publish
662 push_publishing = pushop.remotephases.publishing or pushop.publish
663 missing_revs = {to_rev(n) for n in outgoing.missing}
663 missing_revs = {to_rev(n) for n in outgoing.missing}
664 drafts = unfi._phasecache.get_raw_set(unfi, phases.draft)
664 drafts = unfi._phasecache.get_raw_set(unfi, phases.draft)
665
665
666 if fallback_publishing:
666 if fallback_publishing:
667 fallback_roots = droots - missing_revs
667 fallback_roots = droots - missing_revs
668 revset = b'heads(%ld::%ld)'
668 revset = b'heads(%ld::%ld)'
669 else:
669 else:
670 fallback_roots = droots - drafts
670 fallback_roots = droots - drafts
671 fallback_roots -= missing_revs
671 fallback_roots -= missing_revs
672 # Get the list of all revs draft on remote but public here.
672 # Get the list of all revs draft on remote but public here.
673 revset = b'heads((%ld::%ld) and public())'
673 revset = b'heads((%ld::%ld) and public())'
674 if not fallback_roots:
674 if not fallback_roots:
675 fallback = fallback_rev = []
675 fallback = fallback_rev = []
676 else:
676 else:
677 fallback_rev = unfi.revs(revset, fallback_roots, fallbackheads_rev)
677 fallback_rev = unfi.revs(revset, fallback_roots, fallbackheads_rev)
678 fallback = [repo[r] for r in fallback_rev]
678 fallback = [repo[r] for r in fallback_rev]
679
679
680 if push_publishing:
680 if push_publishing:
681 published = missing_revs.copy()
681 published = missing_revs.copy()
682 else:
682 else:
683 published = missing_revs - drafts
683 published = missing_revs - drafts
684 if pushop.publish:
684 if pushop.publish:
685 published.update(fallbackheads_rev & drafts)
685 published.update(fallbackheads_rev & drafts)
686 elif fallback:
686 elif fallback:
687 published.update(fallback_rev)
687 published.update(fallback_rev)
688
688
689 pushop.outdatedphases = [repo[r] for r in cl.headrevs(published)]
689 pushop.outdatedphases = [repo[r] for r in cl.headrevs(published)]
690 pushop.fallbackoutdatedphases = fallback
690 pushop.fallbackoutdatedphases = fallback
691
691
692
692
693 @pushdiscovery(b'obsmarker')
693 @pushdiscovery(b'obsmarker')
694 def _pushdiscoveryobsmarkers(pushop):
694 def _pushdiscoveryobsmarkers(pushop):
695 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
695 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
696 return
696 return
697
697
698 if not pushop.repo.obsstore:
698 if not pushop.repo.obsstore:
699 return
699 return
700
700
701 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
701 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
702 return
702 return
703
703
704 repo = pushop.repo
704 repo = pushop.repo
705 # very naive computation, that can be quite expensive on big repo.
705 # very naive computation, that can be quite expensive on big repo.
706 # However: evolution is currently slow on them anyway.
706 # However: evolution is currently slow on them anyway.
707 revs = repo.revs(b'::%ln', pushop.futureheads)
707 revs = repo.revs(b'::%ln', pushop.futureheads)
708 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(revs=revs)
708 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(revs=revs)
709
709
710
710
711 @pushdiscovery(b'bookmarks')
711 @pushdiscovery(b'bookmarks')
712 def _pushdiscoverybookmarks(pushop):
712 def _pushdiscoverybookmarks(pushop):
713 ui = pushop.ui
713 ui = pushop.ui
714 repo = pushop.repo.unfiltered()
714 repo = pushop.repo.unfiltered()
715 remote = pushop.remote
715 remote = pushop.remote
716 ui.debug(b"checking for updated bookmarks\n")
716 ui.debug(b"checking for updated bookmarks\n")
717 ancestors = ()
717 ancestors = ()
718 if pushop.revs:
718 if pushop.revs:
719 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
719 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
720 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
720 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
721
721
722 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
722 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
723
723
724 explicit = {
724 explicit = {
725 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
725 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
726 }
726 }
727
727
728 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
728 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
729 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
729 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
730
730
731
731
732 def _processcompared(pushop, pushed, explicit, remotebms, comp):
732 def _processcompared(pushop, pushed, explicit, remotebms, comp):
733 """take decision on bookmarks to push to the remote repo
733 """take decision on bookmarks to push to the remote repo
734
734
735 Exists to help extensions alter this behavior.
735 Exists to help extensions alter this behavior.
736 """
736 """
737 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
737 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
738
738
739 repo = pushop.repo
739 repo = pushop.repo
740
740
741 for b, scid, dcid in advsrc:
741 for b, scid, dcid in advsrc:
742 if b in explicit:
742 if b in explicit:
743 explicit.remove(b)
743 explicit.remove(b)
744 if not pushed or repo[scid].rev() in pushed:
744 if not pushed or repo[scid].rev() in pushed:
745 pushop.outbookmarks.append((b, dcid, scid))
745 pushop.outbookmarks.append((b, dcid, scid))
746 # search added bookmark
746 # search added bookmark
747 for b, scid, dcid in addsrc:
747 for b, scid, dcid in addsrc:
748 if b in explicit:
748 if b in explicit:
749 explicit.remove(b)
749 explicit.remove(b)
750 if bookmod.isdivergent(b):
750 if bookmod.isdivergent(b):
751 pushop.ui.warn(_(b'cannot push divergent bookmark %s!\n') % b)
751 pushop.ui.warn(_(b'cannot push divergent bookmark %s!\n') % b)
752 pushop.bkresult = 2
752 pushop.bkresult = 2
753 elif pushed and repo[scid].rev() not in pushed:
753 elif pushed and repo[scid].rev() not in pushed:
754 # in case of race or secret
754 # in case of race or secret
755 msg = _(b'cannot push bookmark X without its revision: %s!\n')
755 msg = _(b'cannot push bookmark X without its revision: %s!\n')
756 pushop.ui.warn(msg % b)
756 pushop.ui.warn(msg % b)
757 pushop.bkresult = 2
757 pushop.bkresult = 2
758 else:
758 else:
759 pushop.outbookmarks.append((b, b'', scid))
759 pushop.outbookmarks.append((b, b'', scid))
760 # search for overwritten bookmark
760 # search for overwritten bookmark
761 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
761 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
762 if b in explicit:
762 if b in explicit:
763 explicit.remove(b)
763 explicit.remove(b)
764 if not pushed or repo[scid].rev() in pushed:
764 if not pushed or repo[scid].rev() in pushed:
765 pushop.outbookmarks.append((b, dcid, scid))
765 pushop.outbookmarks.append((b, dcid, scid))
766 # search for bookmark to delete
766 # search for bookmark to delete
767 for b, scid, dcid in adddst:
767 for b, scid, dcid in adddst:
768 if b in explicit:
768 if b in explicit:
769 explicit.remove(b)
769 explicit.remove(b)
770 # treat as "deleted locally"
770 # treat as "deleted locally"
771 pushop.outbookmarks.append((b, dcid, b''))
771 pushop.outbookmarks.append((b, dcid, b''))
772 # identical bookmarks shouldn't get reported
772 # identical bookmarks shouldn't get reported
773 for b, scid, dcid in same:
773 for b, scid, dcid in same:
774 if b in explicit:
774 if b in explicit:
775 explicit.remove(b)
775 explicit.remove(b)
776
776
777 if explicit:
777 if explicit:
778 explicit = sorted(explicit)
778 explicit = sorted(explicit)
779 # we should probably list all of them
779 # we should probably list all of them
780 pushop.ui.warn(
780 pushop.ui.warn(
781 _(
781 _(
782 b'bookmark %s does not exist on the local '
782 b'bookmark %s does not exist on the local '
783 b'or remote repository!\n'
783 b'or remote repository!\n'
784 )
784 )
785 % explicit[0]
785 % explicit[0]
786 )
786 )
787 pushop.bkresult = 2
787 pushop.bkresult = 2
788
788
789 pushop.outbookmarks.sort()
789 pushop.outbookmarks.sort()
790
790
791
791
792 def _pushcheckoutgoing(pushop):
792 def _pushcheckoutgoing(pushop):
793 outgoing = pushop.outgoing
793 outgoing = pushop.outgoing
794 unfi = pushop.repo.unfiltered()
794 unfi = pushop.repo.unfiltered()
795 if not outgoing.missing:
795 if not outgoing.missing:
796 # nothing to push
796 # nothing to push
797 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
797 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
798 return False
798 return False
799 # something to push
799 # something to push
800 if not pushop.force:
800 if not pushop.force:
801 # if repo.obsstore == False --> no obsolete
801 # if repo.obsstore == False --> no obsolete
802 # then, save the iteration
802 # then, save the iteration
803 if unfi.obsstore:
803 if unfi.obsstore:
804 # this message are here for 80 char limit reason
804 # this message are here for 80 char limit reason
805 mso = _(b"push includes obsolete changeset: %s!")
805 mso = _(b"push includes obsolete changeset: %s!")
806 mspd = _(b"push includes phase-divergent changeset: %s!")
806 mspd = _(b"push includes phase-divergent changeset: %s!")
807 mscd = _(b"push includes content-divergent changeset: %s!")
807 mscd = _(b"push includes content-divergent changeset: %s!")
808 mst = {
808 mst = {
809 b"orphan": _(b"push includes orphan changeset: %s!"),
809 b"orphan": _(b"push includes orphan changeset: %s!"),
810 b"phase-divergent": mspd,
810 b"phase-divergent": mspd,
811 b"content-divergent": mscd,
811 b"content-divergent": mscd,
812 }
812 }
813 # If we are to push if there is at least one
813 # If we are to push if there is at least one
814 # obsolete or unstable changeset in missing, at
814 # obsolete or unstable changeset in missing, at
815 # least one of the missinghead will be obsolete or
815 # least one of the missinghead will be obsolete or
816 # unstable. So checking heads only is ok
816 # unstable. So checking heads only is ok
817 for node in outgoing.ancestorsof:
817 for node in outgoing.ancestorsof:
818 ctx = unfi[node]
818 ctx = unfi[node]
819 if ctx.obsolete():
819 if ctx.obsolete():
820 raise error.Abort(mso % ctx)
820 raise error.Abort(mso % ctx)
821 elif ctx.isunstable():
821 elif ctx.isunstable():
822 # TODO print more than one instability in the abort
822 # TODO print more than one instability in the abort
823 # message
823 # message
824 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
824 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
825
825
826 discovery.checkheads(pushop)
826 discovery.checkheads(pushop)
827 return True
827 return True
828
828
829
829
830 # List of names of steps to perform for an outgoing bundle2, order matters.
830 # List of names of steps to perform for an outgoing bundle2, order matters.
831 b2partsgenorder = []
831 b2partsgenorder = []
832
832
833 # Mapping between step name and function
833 # Mapping between step name and function
834 #
834 #
835 # This exists to help extensions wrap steps if necessary
835 # This exists to help extensions wrap steps if necessary
836 b2partsgenmapping = {}
836 b2partsgenmapping = {}
837
837
838
838
839 def b2partsgenerator(stepname, idx=None):
839 def b2partsgenerator(stepname, idx=None):
840 """decorator for function generating bundle2 part
840 """decorator for function generating bundle2 part
841
841
842 The function is added to the step -> function mapping and appended to the
842 The function is added to the step -> function mapping and appended to the
843 list of steps. Beware that decorated functions will be added in order
843 list of steps. Beware that decorated functions will be added in order
844 (this may matter).
844 (this may matter).
845
845
846 You can only use this decorator for new steps, if you want to wrap a step
846 You can only use this decorator for new steps, if you want to wrap a step
847 from an extension, attack the b2partsgenmapping dictionary directly."""
847 from an extension, attack the b2partsgenmapping dictionary directly."""
848
848
849 def dec(func):
849 def dec(func):
850 assert stepname not in b2partsgenmapping
850 assert stepname not in b2partsgenmapping
851 b2partsgenmapping[stepname] = func
851 b2partsgenmapping[stepname] = func
852 if idx is None:
852 if idx is None:
853 b2partsgenorder.append(stepname)
853 b2partsgenorder.append(stepname)
854 else:
854 else:
855 b2partsgenorder.insert(idx, stepname)
855 b2partsgenorder.insert(idx, stepname)
856 return func
856 return func
857
857
858 return dec
858 return dec
859
859
860
860
861 def _pushb2ctxcheckheads(pushop, bundler):
861 def _pushb2ctxcheckheads(pushop, bundler):
862 """Generate race condition checking parts
862 """Generate race condition checking parts
863
863
864 Exists as an independent function to aid extensions
864 Exists as an independent function to aid extensions
865 """
865 """
866 # * 'force' do not check for push race,
866 # * 'force' do not check for push race,
867 # * if we don't push anything, there are nothing to check.
867 # * if we don't push anything, there are nothing to check.
868 if not pushop.force and pushop.outgoing.ancestorsof:
868 if not pushop.force and pushop.outgoing.ancestorsof:
869 allowunrelated = b'related' in bundler.capabilities.get(
869 allowunrelated = b'related' in bundler.capabilities.get(
870 b'checkheads', ()
870 b'checkheads', ()
871 )
871 )
872 emptyremote = pushop.pushbranchmap is None
872 emptyremote = pushop.pushbranchmap is None
873 if not allowunrelated or emptyremote:
873 if not allowunrelated or emptyremote:
874 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
874 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
875 else:
875 else:
876 affected = set()
876 affected = set()
877 for branch, heads in pushop.pushbranchmap.items():
877 for branch, heads in pushop.pushbranchmap.items():
878 remoteheads, newheads, unsyncedheads, discardedheads = heads
878 remoteheads, newheads, unsyncedheads, discardedheads = heads
879 if remoteheads is not None:
879 if remoteheads is not None:
880 remote = set(remoteheads)
880 remote = set(remoteheads)
881 affected |= set(discardedheads) & remote
881 affected |= set(discardedheads) & remote
882 affected |= remote - set(newheads)
882 affected |= remote - set(newheads)
883 if affected:
883 if affected:
884 data = iter(sorted(affected))
884 data = iter(sorted(affected))
885 bundler.newpart(b'check:updated-heads', data=data)
885 bundler.newpart(b'check:updated-heads', data=data)
886
886
887
887
888 def _pushing(pushop):
888 def _pushing(pushop):
889 """return True if we are pushing anything"""
889 """return True if we are pushing anything"""
890 return bool(
890 return bool(
891 pushop.outgoing.missing
891 pushop.outgoing.missing
892 or pushop.outdatedphases
892 or pushop.outdatedphases
893 or pushop.outobsmarkers
893 or pushop.outobsmarkers
894 or pushop.outbookmarks
894 or pushop.outbookmarks
895 )
895 )
896
896
897
897
898 @b2partsgenerator(b'check-bookmarks')
898 @b2partsgenerator(b'check-bookmarks')
899 def _pushb2checkbookmarks(pushop, bundler):
899 def _pushb2checkbookmarks(pushop, bundler):
900 """insert bookmark move checking"""
900 """insert bookmark move checking"""
901 if not _pushing(pushop) or pushop.force:
901 if not _pushing(pushop) or pushop.force:
902 return
902 return
903 b2caps = bundle2.bundle2caps(pushop.remote)
903 b2caps = bundle2.bundle2caps(pushop.remote)
904 hasbookmarkcheck = b'bookmarks' in b2caps
904 hasbookmarkcheck = b'bookmarks' in b2caps
905 if not (pushop.outbookmarks and hasbookmarkcheck):
905 if not (pushop.outbookmarks and hasbookmarkcheck):
906 return
906 return
907 data = []
907 data = []
908 for book, old, new in pushop.outbookmarks:
908 for book, old, new in pushop.outbookmarks:
909 data.append((book, old))
909 data.append((book, old))
910 checkdata = bookmod.binaryencode(pushop.repo, data)
910 checkdata = bookmod.binaryencode(pushop.repo, data)
911 bundler.newpart(b'check:bookmarks', data=checkdata)
911 bundler.newpart(b'check:bookmarks', data=checkdata)
912
912
913
913
914 @b2partsgenerator(b'check-phases')
914 @b2partsgenerator(b'check-phases')
915 def _pushb2checkphases(pushop, bundler):
915 def _pushb2checkphases(pushop, bundler):
916 """insert phase move checking"""
916 """insert phase move checking"""
917 if not _pushing(pushop) or pushop.force:
917 if not _pushing(pushop) or pushop.force:
918 return
918 return
919 b2caps = bundle2.bundle2caps(pushop.remote)
919 b2caps = bundle2.bundle2caps(pushop.remote)
920 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
920 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
921 if pushop.remotephases is not None and hasphaseheads:
921 if pushop.remotephases is not None and hasphaseheads:
922 # check that the remote phase has not changed
922 # check that the remote phase has not changed
923 checks = {p: [] for p in phases.allphases}
923 checks = {p: [] for p in phases.allphases}
924 to_node = pushop.repo.unfiltered().changelog.node
924 to_node = pushop.repo.unfiltered().changelog.node
925 checks[phases.public].extend(
925 checks[phases.public].extend(
926 to_node(r) for r in pushop.remotephases.public_heads
926 to_node(r) for r in pushop.remotephases.public_heads
927 )
927 )
928 checks[phases.draft].extend(
928 checks[phases.draft].extend(
929 to_node(r) for r in pushop.remotephases.draft_roots
929 to_node(r) for r in pushop.remotephases.draft_roots
930 )
930 )
931 if any(checks.values()):
931 if any(checks.values()):
932 for phase in checks:
932 for phase in checks:
933 checks[phase].sort()
933 checks[phase].sort()
934 checkdata = phases.binaryencode(checks)
934 checkdata = phases.binaryencode(checks)
935 bundler.newpart(b'check:phases', data=checkdata)
935 bundler.newpart(b'check:phases', data=checkdata)
936
936
937
937
938 @b2partsgenerator(b'changeset')
938 @b2partsgenerator(b'changeset')
939 def _pushb2ctx(pushop, bundler):
939 def _pushb2ctx(pushop, bundler):
940 """handle changegroup push through bundle2
940 """handle changegroup push through bundle2
941
941
942 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
942 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
943 """
943 """
944 if b'changesets' in pushop.stepsdone:
944 if b'changesets' in pushop.stepsdone:
945 return
945 return
946 pushop.stepsdone.add(b'changesets')
946 pushop.stepsdone.add(b'changesets')
947 # Send known heads to the server for race detection.
947 # Send known heads to the server for race detection.
948 if not _pushcheckoutgoing(pushop):
948 if not _pushcheckoutgoing(pushop):
949 return
949 return
950 pushop.repo.prepushoutgoinghooks(pushop)
950 pushop.repo.prepushoutgoinghooks(pushop)
951
951
952 _pushb2ctxcheckheads(pushop, bundler)
952 _pushb2ctxcheckheads(pushop, bundler)
953
953
954 b2caps = bundle2.bundle2caps(pushop.remote)
954 b2caps = bundle2.bundle2caps(pushop.remote)
955 version = b'01'
955 version = b'01'
956 cgversions = b2caps.get(b'changegroup')
956 cgversions = b2caps.get(b'changegroup')
957 if cgversions: # 3.1 and 3.2 ship with an empty value
957 if cgversions: # 3.1 and 3.2 ship with an empty value
958 cgversions = [
958 cgversions = [
959 v
959 v
960 for v in cgversions
960 for v in cgversions
961 if v in changegroup.supportedoutgoingversions(pushop.repo)
961 if v in changegroup.supportedoutgoingversions(pushop.repo)
962 ]
962 ]
963 if not cgversions:
963 if not cgversions:
964 raise error.Abort(_(b'no common changegroup version'))
964 raise error.Abort(_(b'no common changegroup version'))
965 version = max(cgversions)
965 version = max(cgversions)
966
966
967 remote_sidedata = bundle2.read_remote_wanted_sidedata(pushop.remote)
967 remote_sidedata = bundle2.read_remote_wanted_sidedata(pushop.remote)
968 cgstream = changegroup.makestream(
968 cgstream = changegroup.makestream(
969 pushop.repo,
969 pushop.repo,
970 pushop.outgoing,
970 pushop.outgoing,
971 version,
971 version,
972 b'push',
972 b'push',
973 bundlecaps=b2caps,
973 bundlecaps=b2caps,
974 remote_sidedata=remote_sidedata,
974 remote_sidedata=remote_sidedata,
975 )
975 )
976 cgpart = bundler.newpart(b'changegroup', data=cgstream)
976 cgpart = bundler.newpart(b'changegroup', data=cgstream)
977 if cgversions:
977 if cgversions:
978 cgpart.addparam(b'version', version)
978 cgpart.addparam(b'version', version)
979 if scmutil.istreemanifest(pushop.repo):
979 if scmutil.istreemanifest(pushop.repo):
980 cgpart.addparam(b'treemanifest', b'1')
980 cgpart.addparam(b'treemanifest', b'1')
981 if repository.REPO_FEATURE_SIDE_DATA in pushop.repo.features:
981 if repository.REPO_FEATURE_SIDE_DATA in pushop.repo.features:
982 cgpart.addparam(b'exp-sidedata', b'1')
982 cgpart.addparam(b'exp-sidedata', b'1')
983
983
984 def handlereply(op):
984 def handlereply(op):
985 """extract addchangegroup returns from server reply"""
985 """extract addchangegroup returns from server reply"""
986 cgreplies = op.records.getreplies(cgpart.id)
986 cgreplies = op.records.getreplies(cgpart.id)
987 assert len(cgreplies[b'changegroup']) == 1
987 assert len(cgreplies[b'changegroup']) == 1
988 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
988 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
989
989
990 return handlereply
990 return handlereply
991
991
992
992
993 @b2partsgenerator(b'phase')
993 @b2partsgenerator(b'phase')
994 def _pushb2phases(pushop, bundler):
994 def _pushb2phases(pushop, bundler):
995 """handle phase push through bundle2"""
995 """handle phase push through bundle2"""
996 if b'phases' in pushop.stepsdone:
996 if b'phases' in pushop.stepsdone:
997 return
997 return
998 b2caps = bundle2.bundle2caps(pushop.remote)
998 b2caps = bundle2.bundle2caps(pushop.remote)
999 ui = pushop.repo.ui
999 ui = pushop.repo.ui
1000
1000
1001 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1001 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1002 haspushkey = b'pushkey' in b2caps
1002 haspushkey = b'pushkey' in b2caps
1003 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1003 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1004
1004
1005 if hasphaseheads and not legacyphase:
1005 if hasphaseheads and not legacyphase:
1006 return _pushb2phaseheads(pushop, bundler)
1006 return _pushb2phaseheads(pushop, bundler)
1007 elif haspushkey:
1007 elif haspushkey:
1008 return _pushb2phasespushkey(pushop, bundler)
1008 return _pushb2phasespushkey(pushop, bundler)
1009
1009
1010
1010
1011 def _pushb2phaseheads(pushop, bundler):
1011 def _pushb2phaseheads(pushop, bundler):
1012 """push phase information through a bundle2 - binary part"""
1012 """push phase information through a bundle2 - binary part"""
1013 pushop.stepsdone.add(b'phases')
1013 pushop.stepsdone.add(b'phases')
1014 if pushop.outdatedphases:
1014 if pushop.outdatedphases:
1015 updates = {p: [] for p in phases.allphases}
1015 updates = {p: [] for p in phases.allphases}
1016 updates[0].extend(h.node() for h in pushop.outdatedphases)
1016 updates[0].extend(h.node() for h in pushop.outdatedphases)
1017 phasedata = phases.binaryencode(updates)
1017 phasedata = phases.binaryencode(updates)
1018 bundler.newpart(b'phase-heads', data=phasedata)
1018 bundler.newpart(b'phase-heads', data=phasedata)
1019
1019
1020
1020
1021 def _pushb2phasespushkey(pushop, bundler):
1021 def _pushb2phasespushkey(pushop, bundler):
1022 """push phase information through a bundle2 - pushkey part"""
1022 """push phase information through a bundle2 - pushkey part"""
1023 pushop.stepsdone.add(b'phases')
1023 pushop.stepsdone.add(b'phases')
1024 part2node = []
1024 part2node = []
1025
1025
1026 def handlefailure(pushop, exc):
1026 def handlefailure(pushop, exc):
1027 targetid = int(exc.partid)
1027 targetid = int(exc.partid)
1028 for partid, node in part2node:
1028 for partid, node in part2node:
1029 if partid == targetid:
1029 if partid == targetid:
1030 raise error.Abort(_(b'updating %s to public failed') % node)
1030 raise error.Abort(_(b'updating %s to public failed') % node)
1031
1031
1032 enc = pushkey.encode
1032 enc = pushkey.encode
1033 for newremotehead in pushop.outdatedphases:
1033 for newremotehead in pushop.outdatedphases:
1034 part = bundler.newpart(b'pushkey')
1034 part = bundler.newpart(b'pushkey')
1035 part.addparam(b'namespace', enc(b'phases'))
1035 part.addparam(b'namespace', enc(b'phases'))
1036 part.addparam(b'key', enc(newremotehead.hex()))
1036 part.addparam(b'key', enc(newremotehead.hex()))
1037 part.addparam(b'old', enc(b'%d' % phases.draft))
1037 part.addparam(b'old', enc(b'%d' % phases.draft))
1038 part.addparam(b'new', enc(b'%d' % phases.public))
1038 part.addparam(b'new', enc(b'%d' % phases.public))
1039 part2node.append((part.id, newremotehead))
1039 part2node.append((part.id, newremotehead))
1040 pushop.pkfailcb[part.id] = handlefailure
1040 pushop.pkfailcb[part.id] = handlefailure
1041
1041
1042 def handlereply(op):
1042 def handlereply(op):
1043 for partid, node in part2node:
1043 for partid, node in part2node:
1044 partrep = op.records.getreplies(partid)
1044 partrep = op.records.getreplies(partid)
1045 results = partrep[b'pushkey']
1045 results = partrep[b'pushkey']
1046 assert len(results) <= 1
1046 assert len(results) <= 1
1047 msg = None
1047 msg = None
1048 if not results:
1048 if not results:
1049 msg = _(b'server ignored update of %s to public!\n') % node
1049 msg = _(b'server ignored update of %s to public!\n') % node
1050 elif not int(results[0][b'return']):
1050 elif not int(results[0][b'return']):
1051 msg = _(b'updating %s to public failed!\n') % node
1051 msg = _(b'updating %s to public failed!\n') % node
1052 if msg is not None:
1052 if msg is not None:
1053 pushop.ui.warn(msg)
1053 pushop.ui.warn(msg)
1054
1054
1055 return handlereply
1055 return handlereply
1056
1056
1057
1057
1058 @b2partsgenerator(b'obsmarkers')
1058 @b2partsgenerator(b'obsmarkers')
1059 def _pushb2obsmarkers(pushop, bundler):
1059 def _pushb2obsmarkers(pushop, bundler):
1060 if b'obsmarkers' in pushop.stepsdone:
1060 if b'obsmarkers' in pushop.stepsdone:
1061 return
1061 return
1062 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1062 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1063 if obsolete.commonversion(remoteversions) is None:
1063 if obsolete.commonversion(remoteversions) is None:
1064 return
1064 return
1065 pushop.stepsdone.add(b'obsmarkers')
1065 pushop.stepsdone.add(b'obsmarkers')
1066 if pushop.outobsmarkers:
1066 if pushop.outobsmarkers:
1067 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1067 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1068 bundle2.buildobsmarkerspart(bundler, markers)
1068 bundle2.buildobsmarkerspart(bundler, markers)
1069
1069
1070
1070
1071 @b2partsgenerator(b'bookmarks')
1071 @b2partsgenerator(b'bookmarks')
1072 def _pushb2bookmarks(pushop, bundler):
1072 def _pushb2bookmarks(pushop, bundler):
1073 """handle bookmark push through bundle2"""
1073 """handle bookmark push through bundle2"""
1074 if b'bookmarks' in pushop.stepsdone:
1074 if b'bookmarks' in pushop.stepsdone:
1075 return
1075 return
1076 b2caps = bundle2.bundle2caps(pushop.remote)
1076 b2caps = bundle2.bundle2caps(pushop.remote)
1077
1077
1078 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1078 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1079 legacybooks = b'bookmarks' in legacy
1079 legacybooks = b'bookmarks' in legacy
1080
1080
1081 if not legacybooks and b'bookmarks' in b2caps:
1081 if not legacybooks and b'bookmarks' in b2caps:
1082 return _pushb2bookmarkspart(pushop, bundler)
1082 return _pushb2bookmarkspart(pushop, bundler)
1083 elif b'pushkey' in b2caps:
1083 elif b'pushkey' in b2caps:
1084 return _pushb2bookmarkspushkey(pushop, bundler)
1084 return _pushb2bookmarkspushkey(pushop, bundler)
1085
1085
1086
1086
1087 def _bmaction(old, new):
1087 def _bmaction(old, new):
1088 """small utility for bookmark pushing"""
1088 """small utility for bookmark pushing"""
1089 if not old:
1089 if not old:
1090 return b'export'
1090 return b'export'
1091 elif not new:
1091 elif not new:
1092 return b'delete'
1092 return b'delete'
1093 return b'update'
1093 return b'update'
1094
1094
1095
1095
1096 def _abortonsecretctx(pushop, node, b):
1096 def _abortonsecretctx(pushop, node, b):
1097 """abort if a given bookmark points to a secret changeset"""
1097 """abort if a given bookmark points to a secret changeset"""
1098 if node and pushop.repo[node].phase() == phases.secret:
1098 if node and pushop.repo[node].phase() == phases.secret:
1099 raise error.Abort(
1099 raise error.Abort(
1100 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1100 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1101 )
1101 )
1102
1102
1103
1103
1104 def _pushb2bookmarkspart(pushop, bundler):
1104 def _pushb2bookmarkspart(pushop, bundler):
1105 pushop.stepsdone.add(b'bookmarks')
1105 pushop.stepsdone.add(b'bookmarks')
1106 if not pushop.outbookmarks:
1106 if not pushop.outbookmarks:
1107 return
1107 return
1108
1108
1109 allactions = []
1109 allactions = []
1110 data = []
1110 data = []
1111 for book, old, new in pushop.outbookmarks:
1111 for book, old, new in pushop.outbookmarks:
1112 _abortonsecretctx(pushop, new, book)
1112 _abortonsecretctx(pushop, new, book)
1113 data.append((book, new))
1113 data.append((book, new))
1114 allactions.append((book, _bmaction(old, new)))
1114 allactions.append((book, _bmaction(old, new)))
1115 checkdata = bookmod.binaryencode(pushop.repo, data)
1115 checkdata = bookmod.binaryencode(pushop.repo, data)
1116 bundler.newpart(b'bookmarks', data=checkdata)
1116 bundler.newpart(b'bookmarks', data=checkdata)
1117
1117
1118 def handlereply(op):
1118 def handlereply(op):
1119 ui = pushop.ui
1119 ui = pushop.ui
1120 # if success
1120 # if success
1121 for book, action in allactions:
1121 for book, action in allactions:
1122 ui.status(bookmsgmap[action][0] % book)
1122 ui.status(bookmsgmap[action][0] % book)
1123
1123
1124 return handlereply
1124 return handlereply
1125
1125
1126
1126
1127 def _pushb2bookmarkspushkey(pushop, bundler):
1127 def _pushb2bookmarkspushkey(pushop, bundler):
1128 pushop.stepsdone.add(b'bookmarks')
1128 pushop.stepsdone.add(b'bookmarks')
1129 part2book = []
1129 part2book = []
1130 enc = pushkey.encode
1130 enc = pushkey.encode
1131
1131
1132 def handlefailure(pushop, exc):
1132 def handlefailure(pushop, exc):
1133 targetid = int(exc.partid)
1133 targetid = int(exc.partid)
1134 for partid, book, action in part2book:
1134 for partid, book, action in part2book:
1135 if partid == targetid:
1135 if partid == targetid:
1136 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1136 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1137 # we should not be called for part we did not generated
1137 # we should not be called for part we did not generated
1138 assert False
1138 assert False
1139
1139
1140 for book, old, new in pushop.outbookmarks:
1140 for book, old, new in pushop.outbookmarks:
1141 _abortonsecretctx(pushop, new, book)
1141 _abortonsecretctx(pushop, new, book)
1142 part = bundler.newpart(b'pushkey')
1142 part = bundler.newpart(b'pushkey')
1143 part.addparam(b'namespace', enc(b'bookmarks'))
1143 part.addparam(b'namespace', enc(b'bookmarks'))
1144 part.addparam(b'key', enc(book))
1144 part.addparam(b'key', enc(book))
1145 part.addparam(b'old', enc(hex(old)))
1145 part.addparam(b'old', enc(hex(old)))
1146 part.addparam(b'new', enc(hex(new)))
1146 part.addparam(b'new', enc(hex(new)))
1147 action = b'update'
1147 action = b'update'
1148 if not old:
1148 if not old:
1149 action = b'export'
1149 action = b'export'
1150 elif not new:
1150 elif not new:
1151 action = b'delete'
1151 action = b'delete'
1152 part2book.append((part.id, book, action))
1152 part2book.append((part.id, book, action))
1153 pushop.pkfailcb[part.id] = handlefailure
1153 pushop.pkfailcb[part.id] = handlefailure
1154
1154
1155 def handlereply(op):
1155 def handlereply(op):
1156 ui = pushop.ui
1156 ui = pushop.ui
1157 for partid, book, action in part2book:
1157 for partid, book, action in part2book:
1158 partrep = op.records.getreplies(partid)
1158 partrep = op.records.getreplies(partid)
1159 results = partrep[b'pushkey']
1159 results = partrep[b'pushkey']
1160 assert len(results) <= 1
1160 assert len(results) <= 1
1161 if not results:
1161 if not results:
1162 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1162 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1163 else:
1163 else:
1164 ret = int(results[0][b'return'])
1164 ret = int(results[0][b'return'])
1165 if ret:
1165 if ret:
1166 ui.status(bookmsgmap[action][0] % book)
1166 ui.status(bookmsgmap[action][0] % book)
1167 else:
1167 else:
1168 ui.warn(bookmsgmap[action][1] % book)
1168 ui.warn(bookmsgmap[action][1] % book)
1169 if pushop.bkresult is not None:
1169 if pushop.bkresult is not None:
1170 pushop.bkresult = 1
1170 pushop.bkresult = 1
1171
1171
1172 return handlereply
1172 return handlereply
1173
1173
1174
1174
1175 @b2partsgenerator(b'pushvars', idx=0)
1175 @b2partsgenerator(b'pushvars', idx=0)
1176 def _getbundlesendvars(pushop, bundler):
1176 def _getbundlesendvars(pushop, bundler):
1177 '''send shellvars via bundle2'''
1177 '''send shellvars via bundle2'''
1178 pushvars = pushop.pushvars
1178 pushvars = pushop.pushvars
1179 if pushvars:
1179 if pushvars:
1180 shellvars = {}
1180 shellvars = {}
1181 for raw in pushvars:
1181 for raw in pushvars:
1182 if b'=' not in raw:
1182 if b'=' not in raw:
1183 msg = (
1183 msg = (
1184 b"unable to parse variable '%s', should follow "
1184 b"unable to parse variable '%s', should follow "
1185 b"'KEY=VALUE' or 'KEY=' format"
1185 b"'KEY=VALUE' or 'KEY=' format"
1186 )
1186 )
1187 raise error.Abort(msg % raw)
1187 raise error.Abort(msg % raw)
1188 k, v = raw.split(b'=', 1)
1188 k, v = raw.split(b'=', 1)
1189 shellvars[k] = v
1189 shellvars[k] = v
1190
1190
1191 part = bundler.newpart(b'pushvars')
1191 part = bundler.newpart(b'pushvars')
1192
1192
1193 for key, value in shellvars.items():
1193 for key, value in shellvars.items():
1194 part.addparam(key, value, mandatory=False)
1194 part.addparam(key, value, mandatory=False)
1195
1195
1196
1196
1197 def _pushbundle2(pushop):
1197 def _pushbundle2(pushop):
1198 """push data to the remote using bundle2
1198 """push data to the remote using bundle2
1199
1199
1200 The only currently supported type of data is changegroup but this will
1200 The only currently supported type of data is changegroup but this will
1201 evolve in the future."""
1201 evolve in the future."""
1202 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1202 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1203 pushback = pushop.trmanager and pushop.ui.configbool(
1203 pushback = pushop.trmanager and pushop.ui.configbool(
1204 b'experimental', b'bundle2.pushback'
1204 b'experimental', b'bundle2.pushback'
1205 )
1205 )
1206
1206
1207 # create reply capability
1207 # create reply capability
1208 capsblob = bundle2.encodecaps(
1208 capsblob = bundle2.encodecaps(
1209 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1209 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1210 )
1210 )
1211 bundler.newpart(b'replycaps', data=capsblob)
1211 bundler.newpart(b'replycaps', data=capsblob)
1212 replyhandlers = []
1212 replyhandlers = []
1213 for partgenname in b2partsgenorder:
1213 for partgenname in b2partsgenorder:
1214 partgen = b2partsgenmapping[partgenname]
1214 partgen = b2partsgenmapping[partgenname]
1215 ret = partgen(pushop, bundler)
1215 ret = partgen(pushop, bundler)
1216 if callable(ret):
1216 if callable(ret):
1217 replyhandlers.append(ret)
1217 replyhandlers.append(ret)
1218 # do not push if nothing to push
1218 # do not push if nothing to push
1219 if bundler.nbparts <= 1:
1219 if bundler.nbparts <= 1:
1220 return
1220 return
1221 stream = util.chunkbuffer(bundler.getchunks())
1221 stream = util.chunkbuffer(bundler.getchunks())
1222 try:
1222 try:
1223 try:
1223 try:
1224 with pushop.remote.commandexecutor() as e:
1224 with pushop.remote.commandexecutor() as e:
1225 reply = e.callcommand(
1225 reply = e.callcommand(
1226 b'unbundle',
1226 b'unbundle',
1227 {
1227 {
1228 b'bundle': stream,
1228 b'bundle': stream,
1229 b'heads': [b'force'],
1229 b'heads': [b'force'],
1230 b'url': pushop.remote.url(),
1230 b'url': pushop.remote.url(),
1231 },
1231 },
1232 ).result()
1232 ).result()
1233 except error.BundleValueError as exc:
1233 except error.BundleValueError as exc:
1234 raise error.RemoteError(_(b'missing support for %s') % exc)
1234 raise error.RemoteError(_(b'missing support for %s') % exc)
1235 try:
1235 try:
1236 trgetter = None
1236 trgetter = None
1237 if pushback:
1237 if pushback:
1238 trgetter = pushop.trmanager.transaction
1238 trgetter = pushop.trmanager.transaction
1239 op = bundle2.processbundle(
1239 op = bundle2.processbundle(
1240 pushop.repo,
1240 pushop.repo,
1241 reply,
1241 reply,
1242 trgetter,
1242 trgetter,
1243 remote=pushop.remote,
1243 remote=pushop.remote,
1244 )
1244 )
1245 except error.BundleValueError as exc:
1245 except error.BundleValueError as exc:
1246 raise error.RemoteError(_(b'missing support for %s') % exc)
1246 raise error.RemoteError(_(b'missing support for %s') % exc)
1247 except bundle2.AbortFromPart as exc:
1247 except bundle2.AbortFromPart as exc:
1248 pushop.ui.error(_(b'remote: %s\n') % exc)
1248 pushop.ui.error(_(b'remote: %s\n') % exc)
1249 if exc.hint is not None:
1249 if exc.hint is not None:
1250 pushop.ui.error(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1250 pushop.ui.error(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1251 raise error.RemoteError(_(b'push failed on remote'))
1251 raise error.RemoteError(_(b'push failed on remote'))
1252 except error.PushkeyFailed as exc:
1252 except error.PushkeyFailed as exc:
1253 partid = int(exc.partid)
1253 partid = int(exc.partid)
1254 if partid not in pushop.pkfailcb:
1254 if partid not in pushop.pkfailcb:
1255 raise
1255 raise
1256 pushop.pkfailcb[partid](pushop, exc)
1256 pushop.pkfailcb[partid](pushop, exc)
1257 for rephand in replyhandlers:
1257 for rephand in replyhandlers:
1258 rephand(op)
1258 rephand(op)
1259
1259
1260
1260
1261 def _pushchangeset(pushop):
1261 def _pushchangeset(pushop):
1262 """Make the actual push of changeset bundle to remote repo"""
1262 """Make the actual push of changeset bundle to remote repo"""
1263 if b'changesets' in pushop.stepsdone:
1263 if b'changesets' in pushop.stepsdone:
1264 return
1264 return
1265 pushop.stepsdone.add(b'changesets')
1265 pushop.stepsdone.add(b'changesets')
1266 if not _pushcheckoutgoing(pushop):
1266 if not _pushcheckoutgoing(pushop):
1267 return
1267 return
1268
1268
1269 # Should have verified this in push().
1269 # Should have verified this in push().
1270 assert pushop.remote.capable(b'unbundle')
1270 assert pushop.remote.capable(b'unbundle')
1271
1271
1272 pushop.repo.prepushoutgoinghooks(pushop)
1272 pushop.repo.prepushoutgoinghooks(pushop)
1273 outgoing = pushop.outgoing
1273 outgoing = pushop.outgoing
1274 # TODO: get bundlecaps from remote
1274 # TODO: get bundlecaps from remote
1275 bundlecaps = None
1275 bundlecaps = None
1276 # create a changegroup from local
1276 # create a changegroup from local
1277 if pushop.revs is None and not (
1277 if pushop.revs is None and not (
1278 outgoing.excluded or pushop.repo.changelog.filteredrevs
1278 outgoing.excluded or pushop.repo.changelog.filteredrevs
1279 ):
1279 ):
1280 # push everything,
1280 # push everything,
1281 # use the fast path, no race possible on push
1281 # use the fast path, no race possible on push
1282 fastpath = True
1282 fastpath = True
1283 else:
1283 else:
1284 fastpath = False
1284 fastpath = False
1285
1285
1286 cg = changegroup.makechangegroup(
1286 cg = changegroup.makechangegroup(
1287 pushop.repo,
1287 pushop.repo,
1288 outgoing,
1288 outgoing,
1289 b'01',
1289 b'01',
1290 b'push',
1290 b'push',
1291 fastpath=fastpath,
1291 fastpath=fastpath,
1292 bundlecaps=bundlecaps,
1292 bundlecaps=bundlecaps,
1293 )
1293 )
1294
1294
1295 # apply changegroup to remote
1295 # apply changegroup to remote
1296 # local repo finds heads on server, finds out what
1296 # local repo finds heads on server, finds out what
1297 # revs it must push. once revs transferred, if server
1297 # revs it must push. once revs transferred, if server
1298 # finds it has different heads (someone else won
1298 # finds it has different heads (someone else won
1299 # commit/push race), server aborts.
1299 # commit/push race), server aborts.
1300 if pushop.force:
1300 if pushop.force:
1301 remoteheads = [b'force']
1301 remoteheads = [b'force']
1302 else:
1302 else:
1303 remoteheads = pushop.remoteheads
1303 remoteheads = pushop.remoteheads
1304 # ssh: return remote's addchangegroup()
1304 # ssh: return remote's addchangegroup()
1305 # http: return remote's addchangegroup() or 0 for error
1305 # http: return remote's addchangegroup() or 0 for error
1306 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1306 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1307
1307
1308
1308
1309 def _pushsyncphase(pushop):
1309 def _pushsyncphase(pushop):
1310 """synchronise phase information locally and remotely"""
1310 """synchronise phase information locally and remotely"""
1311 cheads = pushop.commonheads
1311 cheads = pushop.commonheads
1312 # even when we don't push, exchanging phase data is useful
1312 # even when we don't push, exchanging phase data is useful
1313 remotephases = listkeys(pushop.remote, b'phases')
1313 remotephases = listkeys(pushop.remote, b'phases')
1314 if (
1314 if (
1315 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1315 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1316 and remotephases # server supports phases
1316 and remotephases # server supports phases
1317 and pushop.cgresult is None # nothing was pushed
1317 and pushop.cgresult is None # nothing was pushed
1318 and remotephases.get(b'publishing', False)
1318 and remotephases.get(b'publishing', False)
1319 ):
1319 ):
1320 # When:
1320 # When:
1321 # - this is a subrepo push
1321 # - this is a subrepo push
1322 # - and remote support phase
1322 # - and remote support phase
1323 # - and no changeset was pushed
1323 # - and no changeset was pushed
1324 # - and remote is publishing
1324 # - and remote is publishing
1325 # We may be in issue 3871 case!
1325 # We may be in issue 3871 case!
1326 # We drop the possible phase synchronisation done by
1326 # We drop the possible phase synchronisation done by
1327 # courtesy to publish changesets possibly locally draft
1327 # courtesy to publish changesets possibly locally draft
1328 # on the remote.
1328 # on the remote.
1329 remotephases = {b'publishing': b'True'}
1329 remotephases = {b'publishing': b'True'}
1330 if not remotephases: # old server or public only reply from non-publishing
1330 if not remotephases: # old server or public only reply from non-publishing
1331 _localphasemove(pushop, cheads)
1331 _localphasemove(pushop, cheads)
1332 # don't push any phase data as there is nothing to push
1332 # don't push any phase data as there is nothing to push
1333 else:
1333 else:
1334 unfi = pushop.repo.unfiltered()
1334 unfi = pushop.repo.unfiltered()
1335 to_rev = unfi.changelog.index.rev
1335 to_rev = unfi.changelog.index.rev
1336 to_node = unfi.changelog.node
1336 to_node = unfi.changelog.node
1337 cheads_revs = [to_rev(n) for n in cheads]
1337 cheads_revs = [to_rev(n) for n in cheads]
1338 pheads_revs, _dr = phases.analyze_remote_phases(
1338 pheads_revs, _dr = phases.analyze_remote_phases(
1339 pushop.repo,
1339 pushop.repo,
1340 cheads_revs,
1340 cheads_revs,
1341 remotephases,
1341 remotephases,
1342 )
1342 )
1343 pheads = [to_node(r) for r in pheads_revs]
1343 pheads = [to_node(r) for r in pheads_revs]
1344 ### Apply remote phase on local
1344 ### Apply remote phase on local
1345 if remotephases.get(b'publishing', False):
1345 if remotephases.get(b'publishing', False):
1346 _localphasemove(pushop, cheads)
1346 _localphasemove(pushop, cheads)
1347 else: # publish = False
1347 else: # publish = False
1348 _localphasemove(pushop, pheads)
1348 _localphasemove(pushop, pheads)
1349 _localphasemove(pushop, cheads, phases.draft)
1349 _localphasemove(pushop, cheads, phases.draft)
1350 ### Apply local phase on remote
1350 ### Apply local phase on remote
1351
1351
1352 if pushop.cgresult:
1352 if pushop.cgresult:
1353 if b'phases' in pushop.stepsdone:
1353 if b'phases' in pushop.stepsdone:
1354 # phases already pushed though bundle2
1354 # phases already pushed though bundle2
1355 return
1355 return
1356 outdated = pushop.outdatedphases
1356 outdated = pushop.outdatedphases
1357 else:
1357 else:
1358 outdated = pushop.fallbackoutdatedphases
1358 outdated = pushop.fallbackoutdatedphases
1359
1359
1360 pushop.stepsdone.add(b'phases')
1360 pushop.stepsdone.add(b'phases')
1361
1361
1362 # filter heads already turned public by the push
1362 # filter heads already turned public by the push
1363 outdated = [c for c in outdated if c.node() not in pheads]
1363 outdated = [c for c in outdated if c.node() not in pheads]
1364 # fallback to independent pushkey command
1364 # fallback to independent pushkey command
1365 for newremotehead in outdated:
1365 for newremotehead in outdated:
1366 with pushop.remote.commandexecutor() as e:
1366 with pushop.remote.commandexecutor() as e:
1367 r = e.callcommand(
1367 r = e.callcommand(
1368 b'pushkey',
1368 b'pushkey',
1369 {
1369 {
1370 b'namespace': b'phases',
1370 b'namespace': b'phases',
1371 b'key': newremotehead.hex(),
1371 b'key': newremotehead.hex(),
1372 b'old': b'%d' % phases.draft,
1372 b'old': b'%d' % phases.draft,
1373 b'new': b'%d' % phases.public,
1373 b'new': b'%d' % phases.public,
1374 },
1374 },
1375 ).result()
1375 ).result()
1376
1376
1377 if not r:
1377 if not r:
1378 pushop.ui.warn(
1378 pushop.ui.warn(
1379 _(b'updating %s to public failed!\n') % newremotehead
1379 _(b'updating %s to public failed!\n') % newremotehead
1380 )
1380 )
1381
1381
1382
1382
1383 def _localphasemove(pushop, nodes, phase=phases.public):
1383 def _localphasemove(pushop, nodes, phase=phases.public):
1384 """move <nodes> to <phase> in the local source repo"""
1384 """move <nodes> to <phase> in the local source repo"""
1385 if pushop.trmanager:
1385 if pushop.trmanager:
1386 phases.advanceboundary(
1386 phases.advanceboundary(
1387 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1387 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1388 )
1388 )
1389 else:
1389 else:
1390 # repo is not locked, do not change any phases!
1390 # repo is not locked, do not change any phases!
1391 # Informs the user that phases should have been moved when
1391 # Informs the user that phases should have been moved when
1392 # applicable.
1392 # applicable.
1393 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1393 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1394 phasestr = phases.phasenames[phase]
1394 phasestr = phases.phasenames[phase]
1395 if actualmoves:
1395 if actualmoves:
1396 pushop.ui.status(
1396 pushop.ui.status(
1397 _(
1397 _(
1398 b'cannot lock source repo, skipping '
1398 b'cannot lock source repo, skipping '
1399 b'local %s phase update\n'
1399 b'local %s phase update\n'
1400 )
1400 )
1401 % phasestr
1401 % phasestr
1402 )
1402 )
1403
1403
1404
1404
1405 def _pushobsolete(pushop):
1405 def _pushobsolete(pushop):
1406 """utility function to push obsolete markers to a remote"""
1406 """utility function to push obsolete markers to a remote"""
1407 if b'obsmarkers' in pushop.stepsdone:
1407 if b'obsmarkers' in pushop.stepsdone:
1408 return
1408 return
1409 repo = pushop.repo
1409 repo = pushop.repo
1410 remote = pushop.remote
1410 remote = pushop.remote
1411 pushop.stepsdone.add(b'obsmarkers')
1411 pushop.stepsdone.add(b'obsmarkers')
1412 if pushop.outobsmarkers:
1412 if pushop.outobsmarkers:
1413 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1413 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1414 rslts = []
1414 rslts = []
1415 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1415 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1416 remotedata = obsolete._pushkeyescape(markers)
1416 remotedata = obsolete._pushkeyescape(markers)
1417 for key in sorted(remotedata, reverse=True):
1417 for key in sorted(remotedata, reverse=True):
1418 # reverse sort to ensure we end with dump0
1418 # reverse sort to ensure we end with dump0
1419 data = remotedata[key]
1419 data = remotedata[key]
1420 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1420 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1421 if [r for r in rslts if not r]:
1421 if [r for r in rslts if not r]:
1422 msg = _(b'failed to push some obsolete markers!\n')
1422 msg = _(b'failed to push some obsolete markers!\n')
1423 repo.ui.warn(msg)
1423 repo.ui.warn(msg)
1424
1424
1425
1425
1426 def _pushbookmark(pushop):
1426 def _pushbookmark(pushop):
1427 """Update bookmark position on remote"""
1427 """Update bookmark position on remote"""
1428 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1428 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1429 return
1429 return
1430 pushop.stepsdone.add(b'bookmarks')
1430 pushop.stepsdone.add(b'bookmarks')
1431 ui = pushop.ui
1431 ui = pushop.ui
1432 remote = pushop.remote
1432 remote = pushop.remote
1433
1433
1434 for b, old, new in pushop.outbookmarks:
1434 for b, old, new in pushop.outbookmarks:
1435 action = b'update'
1435 action = b'update'
1436 if not old:
1436 if not old:
1437 action = b'export'
1437 action = b'export'
1438 elif not new:
1438 elif not new:
1439 action = b'delete'
1439 action = b'delete'
1440
1440
1441 with remote.commandexecutor() as e:
1441 with remote.commandexecutor() as e:
1442 r = e.callcommand(
1442 r = e.callcommand(
1443 b'pushkey',
1443 b'pushkey',
1444 {
1444 {
1445 b'namespace': b'bookmarks',
1445 b'namespace': b'bookmarks',
1446 b'key': b,
1446 b'key': b,
1447 b'old': hex(old),
1447 b'old': hex(old),
1448 b'new': hex(new),
1448 b'new': hex(new),
1449 },
1449 },
1450 ).result()
1450 ).result()
1451
1451
1452 if r:
1452 if r:
1453 ui.status(bookmsgmap[action][0] % b)
1453 ui.status(bookmsgmap[action][0] % b)
1454 else:
1454 else:
1455 ui.warn(bookmsgmap[action][1] % b)
1455 ui.warn(bookmsgmap[action][1] % b)
1456 # discovery can have set the value form invalid entry
1456 # discovery can have set the value form invalid entry
1457 if pushop.bkresult is not None:
1457 if pushop.bkresult is not None:
1458 pushop.bkresult = 1
1458 pushop.bkresult = 1
1459
1459
1460
1460
1461 class pulloperation:
1461 class pulloperation:
1462 """A object that represent a single pull operation
1462 """A object that represent a single pull operation
1463
1463
1464 It purpose is to carry pull related state and very common operation.
1464 It purpose is to carry pull related state and very common operation.
1465
1465
1466 A new should be created at the beginning of each pull and discarded
1466 A new should be created at the beginning of each pull and discarded
1467 afterward.
1467 afterward.
1468 """
1468 """
1469
1469
1470 def __init__(
1470 def __init__(
1471 self,
1471 self,
1472 repo,
1472 repo,
1473 remote,
1473 remote,
1474 heads=None,
1474 heads=None,
1475 force=False,
1475 force=False,
1476 bookmarks=(),
1476 bookmarks=(),
1477 remotebookmarks=None,
1477 remotebookmarks=None,
1478 streamclonerequested=None,
1478 streamclonerequested=None,
1479 includepats=None,
1479 includepats=None,
1480 excludepats=None,
1480 excludepats=None,
1481 depth=None,
1481 depth=None,
1482 path=None,
1482 path=None,
1483 ):
1483 ):
1484 # repo we pull into
1484 # repo we pull into
1485 self.repo = repo
1485 self.repo = repo
1486 # repo we pull from
1486 # repo we pull from
1487 self.remote = remote
1487 self.remote = remote
1488 # path object used to build this remote
1488 # path object used to build this remote
1489 #
1489 #
1490 # Ideally, the remote peer would carry that directly.
1490 # Ideally, the remote peer would carry that directly.
1491 self.remote_path = path
1491 self.remote_path = path
1492 # revision we try to pull (None is "all")
1492 # revision we try to pull (None is "all")
1493 self.heads = heads
1493 self.heads = heads
1494 # bookmark pulled explicitly
1494 # bookmark pulled explicitly
1495 self.explicitbookmarks = [
1495 self.explicitbookmarks = [
1496 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1496 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1497 ]
1497 ]
1498 # do we force pull?
1498 # do we force pull?
1499 self.force = force
1499 self.force = force
1500 # whether a streaming clone was requested
1500 # whether a streaming clone was requested
1501 self.streamclonerequested = streamclonerequested
1501 self.streamclonerequested = streamclonerequested
1502 # transaction manager
1502 # transaction manager
1503 self.trmanager = None
1503 self.trmanager = None
1504 # set of common changeset between local and remote before pull
1504 # set of common changeset between local and remote before pull
1505 self.common = None
1505 self.common = None
1506 # set of pulled head
1506 # set of pulled head
1507 self.rheads = None
1507 self.rheads = None
1508 # list of missing changeset to fetch remotely
1508 # list of missing changeset to fetch remotely
1509 self.fetch = None
1509 self.fetch = None
1510 # remote bookmarks data
1510 # remote bookmarks data
1511 self.remotebookmarks = remotebookmarks
1511 self.remotebookmarks = remotebookmarks
1512 # result of changegroup pulling (used as return code by pull)
1512 # result of changegroup pulling (used as return code by pull)
1513 self.cgresult = None
1513 self.cgresult = None
1514 # list of step already done
1514 # list of step already done
1515 self.stepsdone = set()
1515 self.stepsdone = set()
1516 # Whether we attempted a clone from pre-generated bundles.
1516 # Whether we attempted a clone from pre-generated bundles.
1517 self.clonebundleattempted = False
1517 self.clonebundleattempted = False
1518 # Set of file patterns to include.
1518 # Set of file patterns to include.
1519 self.includepats = includepats
1519 self.includepats = includepats
1520 # Set of file patterns to exclude.
1520 # Set of file patterns to exclude.
1521 self.excludepats = excludepats
1521 self.excludepats = excludepats
1522 # Number of ancestor changesets to pull from each pulled head.
1522 # Number of ancestor changesets to pull from each pulled head.
1523 self.depth = depth
1523 self.depth = depth
1524
1524
1525 @util.propertycache
1525 @util.propertycache
1526 def pulledsubset(self):
1526 def pulledsubset(self):
1527 """heads of the set of changeset target by the pull"""
1527 """heads of the set of changeset target by the pull"""
1528 # compute target subset
1528 # compute target subset
1529 if self.heads is None:
1529 if self.heads is None:
1530 # We pulled every thing possible
1530 # We pulled every thing possible
1531 # sync on everything common
1531 # sync on everything common
1532 c = set(self.common)
1532 c = set(self.common)
1533 ret = list(self.common)
1533 ret = list(self.common)
1534 for n in self.rheads:
1534 for n in self.rheads:
1535 if n not in c:
1535 if n not in c:
1536 ret.append(n)
1536 ret.append(n)
1537 return ret
1537 return ret
1538 else:
1538 else:
1539 # We pulled a specific subset
1539 # We pulled a specific subset
1540 # sync on this subset
1540 # sync on this subset
1541 return self.heads
1541 return self.heads
1542
1542
1543 @util.propertycache
1543 @util.propertycache
1544 def canusebundle2(self):
1544 def canusebundle2(self):
1545 return not _forcebundle1(self)
1545 return not _forcebundle1(self)
1546
1546
1547 @util.propertycache
1547 @util.propertycache
1548 def remotebundle2caps(self):
1548 def remotebundle2caps(self):
1549 return bundle2.bundle2caps(self.remote)
1549 return bundle2.bundle2caps(self.remote)
1550
1550
1551 def gettransaction(self):
1551 def gettransaction(self):
1552 # deprecated; talk to trmanager directly
1552 # deprecated; talk to trmanager directly
1553 return self.trmanager.transaction()
1553 return self.trmanager.transaction()
1554
1554
1555
1555
1556 class transactionmanager(util.transactional):
1556 class transactionmanager(util.transactional):
1557 """An object to manage the life cycle of a transaction
1557 """An object to manage the life cycle of a transaction
1558
1558
1559 It creates the transaction on demand and calls the appropriate hooks when
1559 It creates the transaction on demand and calls the appropriate hooks when
1560 closing the transaction."""
1560 closing the transaction."""
1561
1561
1562 def __init__(self, repo, source, url):
1562 def __init__(self, repo, source, url):
1563 self.repo = repo
1563 self.repo = repo
1564 self.source = source
1564 self.source = source
1565 self.url = url
1565 self.url = url
1566 self._tr = None
1566 self._tr = None
1567
1567
1568 def transaction(self):
1568 def transaction(self):
1569 """Return an open transaction object, constructing if necessary"""
1569 """Return an open transaction object, constructing if necessary"""
1570 if not self._tr:
1570 if not self._tr:
1571 trname = b'%s\n%s' % (self.source, urlutil.hidepassword(self.url))
1571 trname = b'%s\n%s' % (self.source, urlutil.hidepassword(self.url))
1572 self._tr = self.repo.transaction(trname)
1572 self._tr = self.repo.transaction(trname)
1573 self._tr.hookargs[b'source'] = self.source
1573 self._tr.hookargs[b'source'] = self.source
1574 self._tr.hookargs[b'url'] = self.url
1574 self._tr.hookargs[b'url'] = self.url
1575 return self._tr
1575 return self._tr
1576
1576
1577 def close(self):
1577 def close(self):
1578 """close transaction if created"""
1578 """close transaction if created"""
1579 if self._tr is not None:
1579 if self._tr is not None:
1580 self._tr.close()
1580 self._tr.close()
1581
1581
1582 def release(self):
1582 def release(self):
1583 """release transaction if created"""
1583 """release transaction if created"""
1584 if self._tr is not None:
1584 if self._tr is not None:
1585 self._tr.release()
1585 self._tr.release()
1586
1586
1587
1587
1588 def listkeys(remote, namespace):
1588 def listkeys(remote, namespace):
1589 with remote.commandexecutor() as e:
1589 with remote.commandexecutor() as e:
1590 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1590 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1591
1591
1592
1592
1593 def _fullpullbundle2(repo, pullop):
1593 def _fullpullbundle2(repo, pullop):
1594 # The server may send a partial reply, i.e. when inlining
1594 # The server may send a partial reply, i.e. when inlining
1595 # pre-computed bundles. In that case, update the common
1595 # pre-computed bundles. In that case, update the common
1596 # set based on the results and pull another bundle.
1596 # set based on the results and pull another bundle.
1597 #
1597 #
1598 # There are two indicators that the process is finished:
1598 # There are two indicators that the process is finished:
1599 # - no changeset has been added, or
1599 # - no changeset has been added, or
1600 # - all remote heads are known locally.
1600 # - all remote heads are known locally.
1601 # The head check must use the unfiltered view as obsoletion
1601 # The head check must use the unfiltered view as obsoletion
1602 # markers can hide heads.
1602 # markers can hide heads.
1603 unfi = repo.unfiltered()
1603 unfi = repo.unfiltered()
1604 unficl = unfi.changelog
1604 unficl = unfi.changelog
1605
1605
1606 def headsofdiff(h1, h2):
1606 def headsofdiff(h1, h2):
1607 """Returns heads(h1 % h2)"""
1607 """Returns heads(h1 % h2)"""
1608 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1608 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1609 return {ctx.node() for ctx in res}
1609 return {ctx.node() for ctx in res}
1610
1610
1611 def headsofunion(h1, h2):
1611 def headsofunion(h1, h2):
1612 """Returns heads((h1 + h2) - null)"""
1612 """Returns heads((h1 + h2) - null)"""
1613 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1613 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1614 return {ctx.node() for ctx in res}
1614 return {ctx.node() for ctx in res}
1615
1615
1616 while True:
1616 while True:
1617 old_heads = unficl.heads()
1617 old_heads = unficl.heads()
1618 clstart = len(unficl)
1618 clstart = len(unficl)
1619 _pullbundle2(pullop)
1619 _pullbundle2(pullop)
1620 if requirements.NARROW_REQUIREMENT in repo.requirements:
1620 if requirements.NARROW_REQUIREMENT in repo.requirements:
1621 # XXX narrow clones filter the heads on the server side during
1621 # XXX narrow clones filter the heads on the server side during
1622 # XXX getbundle and result in partial replies as well.
1622 # XXX getbundle and result in partial replies as well.
1623 # XXX Disable pull bundles in this case as band aid to avoid
1623 # XXX Disable pull bundles in this case as band aid to avoid
1624 # XXX extra round trips.
1624 # XXX extra round trips.
1625 break
1625 break
1626 if clstart == len(unficl):
1626 if clstart == len(unficl):
1627 break
1627 break
1628 if all(unficl.hasnode(n) for n in pullop.rheads):
1628 if all(unficl.hasnode(n) for n in pullop.rheads):
1629 break
1629 break
1630 new_heads = headsofdiff(unficl.heads(), old_heads)
1630 new_heads = headsofdiff(unficl.heads(), old_heads)
1631 pullop.common = headsofunion(new_heads, pullop.common)
1631 pullop.common = headsofunion(new_heads, pullop.common)
1632 pullop.rheads = set(pullop.rheads) - pullop.common
1632 pullop.rheads = set(pullop.rheads) - pullop.common
1633
1633
1634
1634
1635 def add_confirm_callback(repo, pullop):
1635 def add_confirm_callback(repo, pullop):
1636 """adds a finalize callback to transaction which can be used to show stats
1636 """adds a finalize callback to transaction which can be used to show stats
1637 to user and confirm the pull before committing transaction"""
1637 to user and confirm the pull before committing transaction"""
1638
1638
1639 tr = pullop.trmanager.transaction()
1639 tr = pullop.trmanager.transaction()
1640 scmutil.registersummarycallback(
1640 scmutil.registersummarycallback(
1641 repo, tr, txnname=b'pull', as_validator=True
1641 repo, tr, txnname=b'pull', as_validator=True
1642 )
1642 )
1643 reporef = weakref.ref(repo.unfiltered())
1643 reporef = weakref.ref(repo.unfiltered())
1644
1644
1645 def prompt(tr):
1645 def prompt(tr):
1646 repo = reporef()
1646 repo = reporef()
1647 cm = _(b'accept incoming changes (yn)?$$ &Yes $$ &No')
1647 cm = _(b'accept incoming changes (yn)?$$ &Yes $$ &No')
1648 if repo.ui.promptchoice(cm):
1648 if repo.ui.promptchoice(cm):
1649 raise error.Abort(b"user aborted")
1649 raise error.Abort(b"user aborted")
1650
1650
1651 tr.addvalidator(b'900-pull-prompt', prompt)
1651 tr.addvalidator(b'900-pull-prompt', prompt)
1652
1652
1653
1653
1654 def pull(
1654 def pull(
1655 repo,
1655 repo,
1656 remote,
1656 remote,
1657 path=None,
1657 path=None,
1658 heads=None,
1658 heads=None,
1659 force=False,
1659 force=False,
1660 bookmarks=(),
1660 bookmarks=(),
1661 opargs=None,
1661 opargs=None,
1662 streamclonerequested=None,
1662 streamclonerequested=None,
1663 includepats=None,
1663 includepats=None,
1664 excludepats=None,
1664 excludepats=None,
1665 depth=None,
1665 depth=None,
1666 confirm=None,
1666 confirm=None,
1667 ):
1667 ):
1668 """Fetch repository data from a remote.
1668 """Fetch repository data from a remote.
1669
1669
1670 This is the main function used to retrieve data from a remote repository.
1670 This is the main function used to retrieve data from a remote repository.
1671
1671
1672 ``repo`` is the local repository to clone into.
1672 ``repo`` is the local repository to clone into.
1673 ``remote`` is a peer instance.
1673 ``remote`` is a peer instance.
1674 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1674 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1675 default) means to pull everything from the remote.
1675 default) means to pull everything from the remote.
1676 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1676 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1677 default, all remote bookmarks are pulled.
1677 default, all remote bookmarks are pulled.
1678 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1678 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1679 initialization.
1679 initialization.
1680 ``streamclonerequested`` is a boolean indicating whether a "streaming
1680 ``streamclonerequested`` is a boolean indicating whether a "streaming
1681 clone" is requested. A "streaming clone" is essentially a raw file copy
1681 clone" is requested. A "streaming clone" is essentially a raw file copy
1682 of revlogs from the server. This only works when the local repository is
1682 of revlogs from the server. This only works when the local repository is
1683 empty. The default value of ``None`` means to respect the server
1683 empty. The default value of ``None`` means to respect the server
1684 configuration for preferring stream clones.
1684 configuration for preferring stream clones.
1685 ``includepats`` and ``excludepats`` define explicit file patterns to
1685 ``includepats`` and ``excludepats`` define explicit file patterns to
1686 include and exclude in storage, respectively. If not defined, narrow
1686 include and exclude in storage, respectively. If not defined, narrow
1687 patterns from the repo instance are used, if available.
1687 patterns from the repo instance are used, if available.
1688 ``depth`` is an integer indicating the DAG depth of history we're
1688 ``depth`` is an integer indicating the DAG depth of history we're
1689 interested in. If defined, for each revision specified in ``heads``, we
1689 interested in. If defined, for each revision specified in ``heads``, we
1690 will fetch up to this many of its ancestors and data associated with them.
1690 will fetch up to this many of its ancestors and data associated with them.
1691 ``confirm`` is a boolean indicating whether the pull should be confirmed
1691 ``confirm`` is a boolean indicating whether the pull should be confirmed
1692 before committing the transaction. This overrides HGPLAIN.
1692 before committing the transaction. This overrides HGPLAIN.
1693
1693
1694 Returns the ``pulloperation`` created for this pull.
1694 Returns the ``pulloperation`` created for this pull.
1695 """
1695 """
1696 if opargs is None:
1696 if opargs is None:
1697 opargs = {}
1697 opargs = {}
1698
1698
1699 # We allow the narrow patterns to be passed in explicitly to provide more
1699 # We allow the narrow patterns to be passed in explicitly to provide more
1700 # flexibility for API consumers.
1700 # flexibility for API consumers.
1701 if includepats is not None or excludepats is not None:
1701 if includepats is not None or excludepats is not None:
1702 includepats = includepats or set()
1702 includepats = includepats or set()
1703 excludepats = excludepats or set()
1703 excludepats = excludepats or set()
1704 else:
1704 else:
1705 includepats, excludepats = repo.narrowpats
1705 includepats, excludepats = repo.narrowpats
1706
1706
1707 narrowspec.validatepatterns(includepats)
1707 narrowspec.validatepatterns(includepats)
1708 narrowspec.validatepatterns(excludepats)
1708 narrowspec.validatepatterns(excludepats)
1709
1709
1710 pullop = pulloperation(
1710 pullop = pulloperation(
1711 repo,
1711 repo,
1712 remote,
1712 remote,
1713 path=path,
1713 path=path,
1714 heads=heads,
1714 heads=heads,
1715 force=force,
1715 force=force,
1716 bookmarks=bookmarks,
1716 bookmarks=bookmarks,
1717 streamclonerequested=streamclonerequested,
1717 streamclonerequested=streamclonerequested,
1718 includepats=includepats,
1718 includepats=includepats,
1719 excludepats=excludepats,
1719 excludepats=excludepats,
1720 depth=depth,
1720 depth=depth,
1721 **pycompat.strkwargs(opargs),
1721 **pycompat.strkwargs(opargs),
1722 )
1722 )
1723
1723
1724 peerlocal = pullop.remote.local()
1724 peerlocal = pullop.remote.local()
1725 if peerlocal:
1725 if peerlocal:
1726 missing = set(peerlocal.requirements) - pullop.repo.supported
1726 missing = set(peerlocal.requirements) - pullop.repo.supported
1727 if missing:
1727 if missing:
1728 msg = _(
1728 msg = _(
1729 b"required features are not"
1729 b"required features are not"
1730 b" supported in the destination:"
1730 b" supported in the destination:"
1731 b" %s"
1731 b" %s"
1732 ) % (b', '.join(sorted(missing)))
1732 ) % (b', '.join(sorted(missing)))
1733 raise error.Abort(msg)
1733 raise error.Abort(msg)
1734
1734
1735 for category in repo._wanted_sidedata:
1735 for category in repo._wanted_sidedata:
1736 # Check that a computer is registered for that category for at least
1736 # Check that a computer is registered for that category for at least
1737 # one revlog kind.
1737 # one revlog kind.
1738 for kind, computers in repo._sidedata_computers.items():
1738 for kind, computers in repo._sidedata_computers.items():
1739 if computers.get(category):
1739 if computers.get(category):
1740 break
1740 break
1741 else:
1741 else:
1742 # This should never happen since repos are supposed to be able to
1742 # This should never happen since repos are supposed to be able to
1743 # generate the sidedata they require.
1743 # generate the sidedata they require.
1744 raise error.ProgrammingError(
1744 raise error.ProgrammingError(
1745 _(
1745 _(
1746 b'sidedata category requested by local side without local'
1746 b'sidedata category requested by local side without local'
1747 b"support: '%s'"
1747 b"support: '%s'"
1748 )
1748 )
1749 % pycompat.bytestr(category)
1749 % pycompat.bytestr(category)
1750 )
1750 )
1751
1751
1752 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1752 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1753 wlock = util.nullcontextmanager
1753 wlock = util.nullcontextmanager
1754 if not bookmod.bookmarksinstore(repo):
1754 if not bookmod.bookmarksinstore(repo):
1755 wlock = repo.wlock
1755 wlock = repo.wlock
1756 with wlock(), repo.lock(), pullop.trmanager:
1756 with wlock(), repo.lock(), pullop.trmanager:
1757 if confirm or (
1757 if confirm or (
1758 repo.ui.configbool(b"pull", b"confirm") and not repo.ui.plain()
1758 repo.ui.configbool(b"pull", b"confirm") and not repo.ui.plain()
1759 ):
1759 ):
1760 add_confirm_callback(repo, pullop)
1760 add_confirm_callback(repo, pullop)
1761
1761
1762 # This should ideally be in _pullbundle2(). However, it needs to run
1762 # This should ideally be in _pullbundle2(). However, it needs to run
1763 # before discovery to avoid extra work.
1763 # before discovery to avoid extra work.
1764 _maybeapplyclonebundle(pullop)
1764 _maybeapplyclonebundle(pullop)
1765 streamclone.maybeperformlegacystreamclone(pullop)
1765 streamclone.maybeperformlegacystreamclone(pullop)
1766 _pulldiscovery(pullop)
1766 _pulldiscovery(pullop)
1767 if pullop.canusebundle2:
1767 if pullop.canusebundle2:
1768 _fullpullbundle2(repo, pullop)
1768 _fullpullbundle2(repo, pullop)
1769 _pullchangeset(pullop)
1769 _pullchangeset(pullop)
1770 _pullphase(pullop)
1770 _pullphase(pullop)
1771 _pullbookmarks(pullop)
1771 _pullbookmarks(pullop)
1772 _pullobsolete(pullop)
1772 _pullobsolete(pullop)
1773
1773
1774 # storing remotenames
1774 # storing remotenames
1775 if repo.ui.configbool(b'experimental', b'remotenames'):
1775 if repo.ui.configbool(b'experimental', b'remotenames'):
1776 logexchange.pullremotenames(repo, remote)
1776 logexchange.pullremotenames(repo, remote)
1777
1777
1778 return pullop
1778 return pullop
1779
1779
1780
1780
1781 # list of steps to perform discovery before pull
1781 # list of steps to perform discovery before pull
1782 pulldiscoveryorder = []
1782 pulldiscoveryorder = []
1783
1783
1784 # Mapping between step name and function
1784 # Mapping between step name and function
1785 #
1785 #
1786 # This exists to help extensions wrap steps if necessary
1786 # This exists to help extensions wrap steps if necessary
1787 pulldiscoverymapping = {}
1787 pulldiscoverymapping = {}
1788
1788
1789
1789
1790 def pulldiscovery(stepname):
1790 def pulldiscovery(stepname):
1791 """decorator for function performing discovery before pull
1791 """decorator for function performing discovery before pull
1792
1792
1793 The function is added to the step -> function mapping and appended to the
1793 The function is added to the step -> function mapping and appended to the
1794 list of steps. Beware that decorated function will be added in order (this
1794 list of steps. Beware that decorated function will be added in order (this
1795 may matter).
1795 may matter).
1796
1796
1797 You can only use this decorator for a new step, if you want to wrap a step
1797 You can only use this decorator for a new step, if you want to wrap a step
1798 from an extension, change the pulldiscovery dictionary directly."""
1798 from an extension, change the pulldiscovery dictionary directly."""
1799
1799
1800 def dec(func):
1800 def dec(func):
1801 assert stepname not in pulldiscoverymapping
1801 assert stepname not in pulldiscoverymapping
1802 pulldiscoverymapping[stepname] = func
1802 pulldiscoverymapping[stepname] = func
1803 pulldiscoveryorder.append(stepname)
1803 pulldiscoveryorder.append(stepname)
1804 return func
1804 return func
1805
1805
1806 return dec
1806 return dec
1807
1807
1808
1808
1809 def _pulldiscovery(pullop):
1809 def _pulldiscovery(pullop):
1810 """Run all discovery steps"""
1810 """Run all discovery steps"""
1811 for stepname in pulldiscoveryorder:
1811 for stepname in pulldiscoveryorder:
1812 step = pulldiscoverymapping[stepname]
1812 step = pulldiscoverymapping[stepname]
1813 step(pullop)
1813 step(pullop)
1814
1814
1815
1815
1816 @pulldiscovery(b'b1:bookmarks')
1816 @pulldiscovery(b'b1:bookmarks')
1817 def _pullbookmarkbundle1(pullop):
1817 def _pullbookmarkbundle1(pullop):
1818 """fetch bookmark data in bundle1 case
1818 """fetch bookmark data in bundle1 case
1819
1819
1820 If not using bundle2, we have to fetch bookmarks before changeset
1820 If not using bundle2, we have to fetch bookmarks before changeset
1821 discovery to reduce the chance and impact of race conditions."""
1821 discovery to reduce the chance and impact of race conditions."""
1822 if pullop.remotebookmarks is not None:
1822 if pullop.remotebookmarks is not None:
1823 return
1823 return
1824 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1824 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1825 # all known bundle2 servers now support listkeys, but lets be nice with
1825 # all known bundle2 servers now support listkeys, but lets be nice with
1826 # new implementation.
1826 # new implementation.
1827 return
1827 return
1828 books = listkeys(pullop.remote, b'bookmarks')
1828 books = listkeys(pullop.remote, b'bookmarks')
1829 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1829 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1830
1830
1831
1831
1832 @pulldiscovery(b'changegroup')
1832 @pulldiscovery(b'changegroup')
1833 def _pulldiscoverychangegroup(pullop):
1833 def _pulldiscoverychangegroup(pullop):
1834 """discovery phase for the pull
1834 """discovery phase for the pull
1835
1835
1836 Current handle changeset discovery only, will change handle all discovery
1836 Current handle changeset discovery only, will change handle all discovery
1837 at some point."""
1837 at some point."""
1838 tmp = discovery.findcommonincoming(
1838 tmp = discovery.findcommonincoming(
1839 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1839 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1840 )
1840 )
1841 common, fetch, rheads = tmp
1841 common, fetch, rheads = tmp
1842 has_node = pullop.repo.unfiltered().changelog.index.has_node
1842 has_node = pullop.repo.unfiltered().changelog.index.has_node
1843 if fetch and rheads:
1843 if fetch and rheads:
1844 # If a remote heads is filtered locally, put in back in common.
1844 # If a remote heads is filtered locally, put in back in common.
1845 #
1845 #
1846 # This is a hackish solution to catch most of "common but locally
1846 # This is a hackish solution to catch most of "common but locally
1847 # hidden situation". We do not performs discovery on unfiltered
1847 # hidden situation". We do not performs discovery on unfiltered
1848 # repository because it end up doing a pathological amount of round
1848 # repository because it end up doing a pathological amount of round
1849 # trip for w huge amount of changeset we do not care about.
1849 # trip for w huge amount of changeset we do not care about.
1850 #
1850 #
1851 # If a set of such "common but filtered" changeset exist on the server
1851 # If a set of such "common but filtered" changeset exist on the server
1852 # but are not including a remote heads, we'll not be able to detect it,
1852 # but are not including a remote heads, we'll not be able to detect it,
1853 scommon = set(common)
1853 scommon = set(common)
1854 for n in rheads:
1854 for n in rheads:
1855 if has_node(n):
1855 if has_node(n):
1856 if n not in scommon:
1856 if n not in scommon:
1857 common.append(n)
1857 common.append(n)
1858 if set(rheads).issubset(set(common)):
1858 if set(rheads).issubset(set(common)):
1859 fetch = []
1859 fetch = []
1860 pullop.common = common
1860 pullop.common = common
1861 pullop.fetch = fetch
1861 pullop.fetch = fetch
1862 pullop.rheads = rheads
1862 pullop.rheads = rheads
1863
1863
1864
1864
1865 def _pullbundle2(pullop):
1865 def _pullbundle2(pullop):
1866 """pull data using bundle2
1866 """pull data using bundle2
1867
1867
1868 For now, the only supported data are changegroup."""
1868 For now, the only supported data are changegroup."""
1869 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1869 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1870
1870
1871 # make ui easier to access
1871 # make ui easier to access
1872 ui = pullop.repo.ui
1872 ui = pullop.repo.ui
1873
1873
1874 # At the moment we don't do stream clones over bundle2. If that is
1874 # At the moment we don't do stream clones over bundle2. If that is
1875 # implemented then here's where the check for that will go.
1875 # implemented then here's where the check for that will go.
1876 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1876 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1877
1877
1878 # declare pull perimeters
1878 # declare pull perimeters
1879 kwargs[b'common'] = pullop.common
1879 kwargs[b'common'] = pullop.common
1880 kwargs[b'heads'] = pullop.heads or pullop.rheads
1880 kwargs[b'heads'] = pullop.heads or pullop.rheads
1881
1881
1882 # check server supports narrow and then adding includepats and excludepats
1882 # check server supports narrow and then adding includepats and excludepats
1883 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1883 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1884 if servernarrow and pullop.includepats:
1884 if servernarrow and pullop.includepats:
1885 kwargs[b'includepats'] = pullop.includepats
1885 kwargs[b'includepats'] = pullop.includepats
1886 if servernarrow and pullop.excludepats:
1886 if servernarrow and pullop.excludepats:
1887 kwargs[b'excludepats'] = pullop.excludepats
1887 kwargs[b'excludepats'] = pullop.excludepats
1888
1888
1889 if streaming:
1889 if streaming:
1890 kwargs[b'cg'] = False
1890 kwargs[b'cg'] = False
1891 kwargs[b'stream'] = True
1891 kwargs[b'stream'] = True
1892 pullop.stepsdone.add(b'changegroup')
1892 pullop.stepsdone.add(b'changegroup')
1893 pullop.stepsdone.add(b'phases')
1893 pullop.stepsdone.add(b'phases')
1894
1894
1895 else:
1895 else:
1896 # pulling changegroup
1896 # pulling changegroup
1897 pullop.stepsdone.add(b'changegroup')
1897 pullop.stepsdone.add(b'changegroup')
1898
1898
1899 kwargs[b'cg'] = pullop.fetch
1899 kwargs[b'cg'] = pullop.fetch
1900
1900
1901 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1901 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1902 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1902 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1903 if not legacyphase and hasbinaryphase:
1903 if not legacyphase and hasbinaryphase:
1904 kwargs[b'phases'] = True
1904 kwargs[b'phases'] = True
1905 pullop.stepsdone.add(b'phases')
1905 pullop.stepsdone.add(b'phases')
1906
1906
1907 if b'listkeys' in pullop.remotebundle2caps:
1907 if b'listkeys' in pullop.remotebundle2caps:
1908 if b'phases' not in pullop.stepsdone:
1908 if b'phases' not in pullop.stepsdone:
1909 kwargs[b'listkeys'] = [b'phases']
1909 kwargs[b'listkeys'] = [b'phases']
1910
1910
1911 bookmarksrequested = False
1911 bookmarksrequested = False
1912 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1912 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1913 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1913 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1914
1914
1915 if pullop.remotebookmarks is not None:
1915 if pullop.remotebookmarks is not None:
1916 pullop.stepsdone.add(b'request-bookmarks')
1916 pullop.stepsdone.add(b'request-bookmarks')
1917
1917
1918 if (
1918 if (
1919 b'request-bookmarks' not in pullop.stepsdone
1919 b'request-bookmarks' not in pullop.stepsdone
1920 and pullop.remotebookmarks is None
1920 and pullop.remotebookmarks is None
1921 and not legacybookmark
1921 and not legacybookmark
1922 and hasbinarybook
1922 and hasbinarybook
1923 ):
1923 ):
1924 kwargs[b'bookmarks'] = True
1924 kwargs[b'bookmarks'] = True
1925 bookmarksrequested = True
1925 bookmarksrequested = True
1926
1926
1927 if b'listkeys' in pullop.remotebundle2caps:
1927 if b'listkeys' in pullop.remotebundle2caps:
1928 if b'request-bookmarks' not in pullop.stepsdone:
1928 if b'request-bookmarks' not in pullop.stepsdone:
1929 # make sure to always includes bookmark data when migrating
1929 # make sure to always includes bookmark data when migrating
1930 # `hg incoming --bundle` to using this function.
1930 # `hg incoming --bundle` to using this function.
1931 pullop.stepsdone.add(b'request-bookmarks')
1931 pullop.stepsdone.add(b'request-bookmarks')
1932 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1932 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1933
1933
1934 # If this is a full pull / clone and the server supports the clone bundles
1934 # If this is a full pull / clone and the server supports the clone bundles
1935 # feature, tell the server whether we attempted a clone bundle. The
1935 # feature, tell the server whether we attempted a clone bundle. The
1936 # presence of this flag indicates the client supports clone bundles. This
1936 # presence of this flag indicates the client supports clone bundles. This
1937 # will enable the server to treat clients that support clone bundles
1937 # will enable the server to treat clients that support clone bundles
1938 # differently from those that don't.
1938 # differently from those that don't.
1939 if (
1939 if (
1940 pullop.remote.capable(b'clonebundles')
1940 pullop.remote.capable(b'clonebundles')
1941 and pullop.heads is None
1941 and pullop.heads is None
1942 and list(pullop.common) == [pullop.repo.nullid]
1942 and list(pullop.common) == [pullop.repo.nullid]
1943 ):
1943 ):
1944 kwargs[b'cbattempted'] = pullop.clonebundleattempted
1944 kwargs[b'cbattempted'] = pullop.clonebundleattempted
1945
1945
1946 if streaming:
1946 if streaming:
1947 pullop.repo.ui.status(_(b'streaming all changes\n'))
1947 pullop.repo.ui.status(_(b'streaming all changes\n'))
1948 elif not pullop.fetch:
1948 elif not pullop.fetch:
1949 pullop.repo.ui.status(_(b"no changes found\n"))
1949 pullop.repo.ui.status(_(b"no changes found\n"))
1950 pullop.cgresult = 0
1950 pullop.cgresult = 0
1951 else:
1951 else:
1952 if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
1952 if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
1953 pullop.repo.ui.status(_(b"requesting all changes\n"))
1953 pullop.repo.ui.status(_(b"requesting all changes\n"))
1954 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1954 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1955 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1955 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1956 if obsolete.commonversion(remoteversions) is not None:
1956 if obsolete.commonversion(remoteversions) is not None:
1957 kwargs[b'obsmarkers'] = True
1957 kwargs[b'obsmarkers'] = True
1958 pullop.stepsdone.add(b'obsmarkers')
1958 pullop.stepsdone.add(b'obsmarkers')
1959 _pullbundle2extraprepare(pullop, kwargs)
1959 _pullbundle2extraprepare(pullop, kwargs)
1960
1960
1961 remote_sidedata = bundle2.read_remote_wanted_sidedata(pullop.remote)
1961 remote_sidedata = bundle2.read_remote_wanted_sidedata(pullop.remote)
1962 if remote_sidedata:
1962 if remote_sidedata:
1963 kwargs[b'remote_sidedata'] = remote_sidedata
1963 kwargs[b'remote_sidedata'] = remote_sidedata
1964
1964
1965 with pullop.remote.commandexecutor() as e:
1965 with pullop.remote.commandexecutor() as e:
1966 args = dict(kwargs)
1966 args = dict(kwargs)
1967 args[b'source'] = b'pull'
1967 args[b'source'] = b'pull'
1968 bundle = e.callcommand(b'getbundle', args).result()
1968 bundle = e.callcommand(b'getbundle', args).result()
1969
1969
1970 try:
1970 try:
1971 op = bundle2.bundleoperation(
1971 op = bundle2.bundleoperation(
1972 pullop.repo,
1972 pullop.repo,
1973 pullop.gettransaction,
1973 pullop.gettransaction,
1974 source=b'pull',
1974 source=b'pull',
1975 remote=pullop.remote,
1975 remote=pullop.remote,
1976 )
1976 )
1977 op.modes[b'bookmarks'] = b'records'
1977 op.modes[b'bookmarks'] = b'records'
1978 bundle2.processbundle(
1978 bundle2.processbundle(
1979 pullop.repo,
1979 pullop.repo,
1980 bundle,
1980 bundle,
1981 op=op,
1981 op=op,
1982 remote=pullop.remote,
1982 remote=pullop.remote,
1983 )
1983 )
1984 except bundle2.AbortFromPart as exc:
1984 except bundle2.AbortFromPart as exc:
1985 pullop.repo.ui.error(_(b'remote: abort: %s\n') % exc)
1985 pullop.repo.ui.error(_(b'remote: abort: %s\n') % exc)
1986 raise error.RemoteError(_(b'pull failed on remote'), hint=exc.hint)
1986 raise error.RemoteError(_(b'pull failed on remote'), hint=exc.hint)
1987 except error.BundleValueError as exc:
1987 except error.BundleValueError as exc:
1988 raise error.RemoteError(_(b'missing support for %s') % exc)
1988 raise error.RemoteError(_(b'missing support for %s') % exc)
1989
1989
1990 if pullop.fetch:
1990 if pullop.fetch:
1991 pullop.cgresult = bundle2.combinechangegroupresults(op)
1991 pullop.cgresult = bundle2.combinechangegroupresults(op)
1992
1992
1993 # processing phases change
1993 # processing phases change
1994 for namespace, value in op.records[b'listkeys']:
1994 for namespace, value in op.records[b'listkeys']:
1995 if namespace == b'phases':
1995 if namespace == b'phases':
1996 _pullapplyphases(pullop, value)
1996 _pullapplyphases(pullop, value)
1997
1997
1998 # processing bookmark update
1998 # processing bookmark update
1999 if bookmarksrequested:
1999 if bookmarksrequested:
2000 books = {}
2000 books = {}
2001 for record in op.records[b'bookmarks']:
2001 for record in op.records[b'bookmarks']:
2002 books[record[b'bookmark']] = record[b"node"]
2002 books[record[b'bookmark']] = record[b"node"]
2003 pullop.remotebookmarks = books
2003 pullop.remotebookmarks = books
2004 else:
2004 else:
2005 for namespace, value in op.records[b'listkeys']:
2005 for namespace, value in op.records[b'listkeys']:
2006 if namespace == b'bookmarks':
2006 if namespace == b'bookmarks':
2007 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
2007 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
2008
2008
2009 # bookmark data were either already there or pulled in the bundle
2009 # bookmark data were either already there or pulled in the bundle
2010 if pullop.remotebookmarks is not None:
2010 if pullop.remotebookmarks is not None:
2011 _pullbookmarks(pullop)
2011 _pullbookmarks(pullop)
2012
2012
2013
2013
2014 def _pullbundle2extraprepare(pullop, kwargs):
2014 def _pullbundle2extraprepare(pullop, kwargs):
2015 """hook function so that extensions can extend the getbundle call"""
2015 """hook function so that extensions can extend the getbundle call"""
2016
2016
2017
2017
2018 def _pullchangeset(pullop):
2018 def _pullchangeset(pullop):
2019 """pull changeset from unbundle into the local repo"""
2019 """pull changeset from unbundle into the local repo"""
2020 # We delay the open of the transaction as late as possible so we
2020 # We delay the open of the transaction as late as possible so we
2021 # don't open transaction for nothing or you break future useful
2021 # don't open transaction for nothing or you break future useful
2022 # rollback call
2022 # rollback call
2023 if b'changegroup' in pullop.stepsdone:
2023 if b'changegroup' in pullop.stepsdone:
2024 return
2024 return
2025 pullop.stepsdone.add(b'changegroup')
2025 pullop.stepsdone.add(b'changegroup')
2026 if not pullop.fetch:
2026 if not pullop.fetch:
2027 pullop.repo.ui.status(_(b"no changes found\n"))
2027 pullop.repo.ui.status(_(b"no changes found\n"))
2028 pullop.cgresult = 0
2028 pullop.cgresult = 0
2029 return
2029 return
2030 tr = pullop.gettransaction()
2030 tr = pullop.gettransaction()
2031 if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
2031 if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
2032 pullop.repo.ui.status(_(b"requesting all changes\n"))
2032 pullop.repo.ui.status(_(b"requesting all changes\n"))
2033 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
2033 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
2034 # issue1320, avoid a race if remote changed after discovery
2034 # issue1320, avoid a race if remote changed after discovery
2035 pullop.heads = pullop.rheads
2035 pullop.heads = pullop.rheads
2036
2036
2037 if pullop.remote.capable(b'getbundle'):
2037 if pullop.remote.capable(b'getbundle'):
2038 # TODO: get bundlecaps from remote
2038 # TODO: get bundlecaps from remote
2039 cg = pullop.remote.getbundle(
2039 cg = pullop.remote.getbundle(
2040 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
2040 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
2041 )
2041 )
2042 elif pullop.heads is None:
2042 elif pullop.heads is None:
2043 with pullop.remote.commandexecutor() as e:
2043 with pullop.remote.commandexecutor() as e:
2044 cg = e.callcommand(
2044 cg = e.callcommand(
2045 b'changegroup',
2045 b'changegroup',
2046 {
2046 {
2047 b'nodes': pullop.fetch,
2047 b'nodes': pullop.fetch,
2048 b'source': b'pull',
2048 b'source': b'pull',
2049 },
2049 },
2050 ).result()
2050 ).result()
2051
2051
2052 elif not pullop.remote.capable(b'changegroupsubset'):
2052 elif not pullop.remote.capable(b'changegroupsubset'):
2053 raise error.Abort(
2053 raise error.Abort(
2054 _(
2054 _(
2055 b"partial pull cannot be done because "
2055 b"partial pull cannot be done because "
2056 b"other repository doesn't support "
2056 b"other repository doesn't support "
2057 b"changegroupsubset."
2057 b"changegroupsubset."
2058 )
2058 )
2059 )
2059 )
2060 else:
2060 else:
2061 with pullop.remote.commandexecutor() as e:
2061 with pullop.remote.commandexecutor() as e:
2062 cg = e.callcommand(
2062 cg = e.callcommand(
2063 b'changegroupsubset',
2063 b'changegroupsubset',
2064 {
2064 {
2065 b'bases': pullop.fetch,
2065 b'bases': pullop.fetch,
2066 b'heads': pullop.heads,
2066 b'heads': pullop.heads,
2067 b'source': b'pull',
2067 b'source': b'pull',
2068 },
2068 },
2069 ).result()
2069 ).result()
2070
2070
2071 bundleop = bundle2.applybundle(
2071 bundleop = bundle2.applybundle(
2072 pullop.repo,
2072 pullop.repo,
2073 cg,
2073 cg,
2074 tr,
2074 tr,
2075 b'pull',
2075 b'pull',
2076 pullop.remote.url(),
2076 pullop.remote.url(),
2077 remote=pullop.remote,
2077 remote=pullop.remote,
2078 )
2078 )
2079 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
2079 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
2080
2080
2081
2081
2082 def _pullphase(pullop):
2082 def _pullphase(pullop):
2083 # Get remote phases data from remote
2083 # Get remote phases data from remote
2084 if b'phases' in pullop.stepsdone:
2084 if b'phases' in pullop.stepsdone:
2085 return
2085 return
2086 remotephases = listkeys(pullop.remote, b'phases')
2086 remotephases = listkeys(pullop.remote, b'phases')
2087 _pullapplyphases(pullop, remotephases)
2087 _pullapplyphases(pullop, remotephases)
2088
2088
2089
2089
2090 def _pullapplyphases(pullop, remotephases):
2090 def _pullapplyphases(pullop, remotephases):
2091 """apply phase movement from observed remote state"""
2091 """apply phase movement from observed remote state"""
2092 if b'phases' in pullop.stepsdone:
2092 if b'phases' in pullop.stepsdone:
2093 return
2093 return
2094 pullop.stepsdone.add(b'phases')
2094 pullop.stepsdone.add(b'phases')
2095 publishing = bool(remotephases.get(b'publishing', False))
2095 publishing = bool(remotephases.get(b'publishing', False))
2096 if remotephases and not publishing:
2096 if remotephases and not publishing:
2097 unfi = pullop.repo.unfiltered()
2097 unfi = pullop.repo.unfiltered()
2098 to_rev = unfi.changelog.index.rev
2098 to_rev = unfi.changelog.index.rev
2099 to_node = unfi.changelog.node
2099 to_node = unfi.changelog.node
2100 pulledsubset_revs = [to_rev(n) for n in pullop.pulledsubset]
2100 pulledsubset_revs = [to_rev(n) for n in pullop.pulledsubset]
2101 # remote is new and non-publishing
2101 # remote is new and non-publishing
2102 pheads_revs, _dr = phases.analyze_remote_phases(
2102 pheads_revs, _dr = phases.analyze_remote_phases(
2103 pullop.repo,
2103 pullop.repo,
2104 pulledsubset_revs,
2104 pulledsubset_revs,
2105 remotephases,
2105 remotephases,
2106 )
2106 )
2107 pheads = [to_node(r) for r in pheads_revs]
2107 pheads = [to_node(r) for r in pheads_revs]
2108 dheads = pullop.pulledsubset
2108 dheads = pullop.pulledsubset
2109 else:
2109 else:
2110 # Remote is old or publishing all common changesets
2110 # Remote is old or publishing all common changesets
2111 # should be seen as public
2111 # should be seen as public
2112 pheads = pullop.pulledsubset
2112 pheads = pullop.pulledsubset
2113 dheads = []
2113 dheads = []
2114 unfi = pullop.repo.unfiltered()
2114 unfi = pullop.repo.unfiltered()
2115 phase = unfi._phasecache.phase
2115 phase = unfi._phasecache.phase
2116 rev = unfi.changelog.index.get_rev
2116 rev = unfi.changelog.index.get_rev
2117 public = phases.public
2117 public = phases.public
2118 draft = phases.draft
2118 draft = phases.draft
2119
2119
2120 # exclude changesets already public locally and update the others
2120 # exclude changesets already public locally and update the others
2121 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2121 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2122 if pheads:
2122 if pheads:
2123 tr = pullop.gettransaction()
2123 tr = pullop.gettransaction()
2124 phases.advanceboundary(pullop.repo, tr, public, pheads)
2124 phases.advanceboundary(pullop.repo, tr, public, pheads)
2125
2125
2126 # exclude changesets already draft locally and update the others
2126 # exclude changesets already draft locally and update the others
2127 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2127 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2128 if dheads:
2128 if dheads:
2129 tr = pullop.gettransaction()
2129 tr = pullop.gettransaction()
2130 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2130 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2131
2131
2132
2132
2133 def _pullbookmarks(pullop):
2133 def _pullbookmarks(pullop):
2134 """process the remote bookmark information to update the local one"""
2134 """process the remote bookmark information to update the local one"""
2135 if b'bookmarks' in pullop.stepsdone:
2135 if b'bookmarks' in pullop.stepsdone:
2136 return
2136 return
2137 pullop.stepsdone.add(b'bookmarks')
2137 pullop.stepsdone.add(b'bookmarks')
2138 repo = pullop.repo
2138 repo = pullop.repo
2139 remotebookmarks = pullop.remotebookmarks
2139 remotebookmarks = pullop.remotebookmarks
2140 bookmarks_mode = None
2140 bookmarks_mode = None
2141 if pullop.remote_path is not None:
2141 if pullop.remote_path is not None:
2142 bookmarks_mode = pullop.remote_path.bookmarks_mode
2142 bookmarks_mode = pullop.remote_path.bookmarks_mode
2143 bookmod.updatefromremote(
2143 bookmod.updatefromremote(
2144 repo.ui,
2144 repo.ui,
2145 repo,
2145 repo,
2146 remotebookmarks,
2146 remotebookmarks,
2147 pullop.remote.url(),
2147 pullop.remote.url(),
2148 pullop.gettransaction,
2148 pullop.gettransaction,
2149 explicit=pullop.explicitbookmarks,
2149 explicit=pullop.explicitbookmarks,
2150 mode=bookmarks_mode,
2150 mode=bookmarks_mode,
2151 )
2151 )
2152
2152
2153
2153
2154 def _pullobsolete(pullop):
2154 def _pullobsolete(pullop):
2155 """utility function to pull obsolete markers from a remote
2155 """utility function to pull obsolete markers from a remote
2156
2156
2157 The `gettransaction` is function that return the pull transaction, creating
2157 The `gettransaction` is function that return the pull transaction, creating
2158 one if necessary. We return the transaction to inform the calling code that
2158 one if necessary. We return the transaction to inform the calling code that
2159 a new transaction have been created (when applicable).
2159 a new transaction have been created (when applicable).
2160
2160
2161 Exists mostly to allow overriding for experimentation purpose"""
2161 Exists mostly to allow overriding for experimentation purpose"""
2162 if b'obsmarkers' in pullop.stepsdone:
2162 if b'obsmarkers' in pullop.stepsdone:
2163 return
2163 return
2164 pullop.stepsdone.add(b'obsmarkers')
2164 pullop.stepsdone.add(b'obsmarkers')
2165 tr = None
2165 tr = None
2166 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2166 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2167 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2167 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2168 remoteobs = listkeys(pullop.remote, b'obsolete')
2168 remoteobs = listkeys(pullop.remote, b'obsolete')
2169 if b'dump0' in remoteobs:
2169 if b'dump0' in remoteobs:
2170 tr = pullop.gettransaction()
2170 tr = pullop.gettransaction()
2171 markers = []
2171 markers = []
2172 for key in sorted(remoteobs, reverse=True):
2172 for key in sorted(remoteobs, reverse=True):
2173 if key.startswith(b'dump'):
2173 if key.startswith(b'dump'):
2174 data = util.b85decode(remoteobs[key])
2174 data = util.b85decode(remoteobs[key])
2175 version, newmarks = obsolete._readmarkers(data)
2175 version, newmarks = obsolete._readmarkers(data)
2176 markers += newmarks
2176 markers += newmarks
2177 if markers:
2177 if markers:
2178 pullop.repo.obsstore.add(tr, markers)
2178 pullop.repo.obsstore.add(tr, markers)
2179 pullop.repo.invalidatevolatilesets()
2179 pullop.repo.invalidatevolatilesets()
2180 return tr
2180 return tr
2181
2181
2182
2182
2183 def applynarrowacl(repo, kwargs):
2183 def applynarrowacl(repo, kwargs):
2184 """Apply narrow fetch access control.
2184 """Apply narrow fetch access control.
2185
2185
2186 This massages the named arguments for getbundle wire protocol commands
2186 This massages the named arguments for getbundle wire protocol commands
2187 so requested data is filtered through access control rules.
2187 so requested data is filtered through access control rules.
2188 """
2188 """
2189 ui = repo.ui
2189 ui = repo.ui
2190 # TODO this assumes existence of HTTP and is a layering violation.
2190 # TODO this assumes existence of HTTP and is a layering violation.
2191 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2191 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2192 user_includes = ui.configlist(
2192 user_includes = ui.configlist(
2193 _NARROWACL_SECTION,
2193 _NARROWACL_SECTION,
2194 username + b'.includes',
2194 username + b'.includes',
2195 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2195 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2196 )
2196 )
2197 user_excludes = ui.configlist(
2197 user_excludes = ui.configlist(
2198 _NARROWACL_SECTION,
2198 _NARROWACL_SECTION,
2199 username + b'.excludes',
2199 username + b'.excludes',
2200 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2200 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2201 )
2201 )
2202 if not user_includes:
2202 if not user_includes:
2203 raise error.Abort(
2203 raise error.Abort(
2204 _(b"%s configuration for user %s is empty")
2204 _(b"%s configuration for user %s is empty")
2205 % (_NARROWACL_SECTION, username)
2205 % (_NARROWACL_SECTION, username)
2206 )
2206 )
2207
2207
2208 user_includes = [
2208 user_includes = [
2209 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2209 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2210 ]
2210 ]
2211 user_excludes = [
2211 user_excludes = [
2212 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2212 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2213 ]
2213 ]
2214
2214
2215 req_includes = set(kwargs.get('includepats', []))
2215 req_includes = set(kwargs.get('includepats', []))
2216 req_excludes = set(kwargs.get('excludepats', []))
2216 req_excludes = set(kwargs.get('excludepats', []))
2217
2217
2218 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2218 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2219 req_includes, req_excludes, user_includes, user_excludes
2219 req_includes, req_excludes, user_includes, user_excludes
2220 )
2220 )
2221
2221
2222 if invalid_includes:
2222 if invalid_includes:
2223 raise error.Abort(
2223 raise error.Abort(
2224 _(b"The following includes are not accessible for %s: %s")
2224 _(b"The following includes are not accessible for %s: %s")
2225 % (username, stringutil.pprint(invalid_includes))
2225 % (username, stringutil.pprint(invalid_includes))
2226 )
2226 )
2227
2227
2228 new_args = {}
2228 new_args = {}
2229 new_args.update(kwargs)
2229 new_args.update(kwargs)
2230 new_args['narrow'] = True
2230 new_args['narrow'] = True
2231 new_args['narrow_acl'] = True
2231 new_args['narrow_acl'] = True
2232 new_args['includepats'] = req_includes
2232 new_args['includepats'] = req_includes
2233 if req_excludes:
2233 if req_excludes:
2234 new_args['excludepats'] = req_excludes
2234 new_args['excludepats'] = req_excludes
2235
2235
2236 return new_args
2236 return new_args
2237
2237
2238
2238
2239 def _computeellipsis(repo, common, heads, known, match, depth=None):
2239 def _computeellipsis(repo, common, heads, known, match, depth=None):
2240 """Compute the shape of a narrowed DAG.
2240 """Compute the shape of a narrowed DAG.
2241
2241
2242 Args:
2242 Args:
2243 repo: The repository we're transferring.
2243 repo: The repository we're transferring.
2244 common: The roots of the DAG range we're transferring.
2244 common: The roots of the DAG range we're transferring.
2245 May be just [nullid], which means all ancestors of heads.
2245 May be just [nullid], which means all ancestors of heads.
2246 heads: The heads of the DAG range we're transferring.
2246 heads: The heads of the DAG range we're transferring.
2247 match: The narrowmatcher that allows us to identify relevant changes.
2247 match: The narrowmatcher that allows us to identify relevant changes.
2248 depth: If not None, only consider nodes to be full nodes if they are at
2248 depth: If not None, only consider nodes to be full nodes if they are at
2249 most depth changesets away from one of heads.
2249 most depth changesets away from one of heads.
2250
2250
2251 Returns:
2251 Returns:
2252 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2252 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2253
2253
2254 visitnodes: The list of nodes (either full or ellipsis) which
2254 visitnodes: The list of nodes (either full or ellipsis) which
2255 need to be sent to the client.
2255 need to be sent to the client.
2256 relevant_nodes: The set of changelog nodes which change a file inside
2256 relevant_nodes: The set of changelog nodes which change a file inside
2257 the narrowspec. The client needs these as non-ellipsis nodes.
2257 the narrowspec. The client needs these as non-ellipsis nodes.
2258 ellipsisroots: A dict of {rev: parents} that is used in
2258 ellipsisroots: A dict of {rev: parents} that is used in
2259 narrowchangegroup to produce ellipsis nodes with the
2259 narrowchangegroup to produce ellipsis nodes with the
2260 correct parents.
2260 correct parents.
2261 """
2261 """
2262 cl = repo.changelog
2262 cl = repo.changelog
2263 mfl = repo.manifestlog
2263 mfl = repo.manifestlog
2264
2264
2265 clrev = cl.rev
2265 clrev = cl.rev
2266
2266
2267 commonrevs = {clrev(n) for n in common} | {nullrev}
2267 commonrevs = {clrev(n) for n in common} | {nullrev}
2268 headsrevs = {clrev(n) for n in heads}
2268 headsrevs = {clrev(n) for n in heads}
2269
2269
2270 if depth:
2270 if depth:
2271 revdepth = {h: 0 for h in headsrevs}
2271 revdepth = {h: 0 for h in headsrevs}
2272
2272
2273 ellipsisheads = collections.defaultdict(set)
2273 ellipsisheads = collections.defaultdict(set)
2274 ellipsisroots = collections.defaultdict(set)
2274 ellipsisroots = collections.defaultdict(set)
2275
2275
2276 def addroot(head, curchange):
2276 def addroot(head, curchange):
2277 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2277 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2278 ellipsisroots[head].add(curchange)
2278 ellipsisroots[head].add(curchange)
2279 # Recursively split ellipsis heads with 3 roots by finding the
2279 # Recursively split ellipsis heads with 3 roots by finding the
2280 # roots' youngest common descendant which is an elided merge commit.
2280 # roots' youngest common descendant which is an elided merge commit.
2281 # That descendant takes 2 of the 3 roots as its own, and becomes a
2281 # That descendant takes 2 of the 3 roots as its own, and becomes a
2282 # root of the head.
2282 # root of the head.
2283 while len(ellipsisroots[head]) > 2:
2283 while len(ellipsisroots[head]) > 2:
2284 child, roots = splithead(head)
2284 child, roots = splithead(head)
2285 splitroots(head, child, roots)
2285 splitroots(head, child, roots)
2286 head = child # Recurse in case we just added a 3rd root
2286 head = child # Recurse in case we just added a 3rd root
2287
2287
2288 def splitroots(head, child, roots):
2288 def splitroots(head, child, roots):
2289 ellipsisroots[head].difference_update(roots)
2289 ellipsisroots[head].difference_update(roots)
2290 ellipsisroots[head].add(child)
2290 ellipsisroots[head].add(child)
2291 ellipsisroots[child].update(roots)
2291 ellipsisroots[child].update(roots)
2292 ellipsisroots[child].discard(child)
2292 ellipsisroots[child].discard(child)
2293
2293
2294 def splithead(head):
2294 def splithead(head):
2295 r1, r2, r3 = sorted(ellipsisroots[head])
2295 r1, r2, r3 = sorted(ellipsisroots[head])
2296 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2296 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2297 mid = repo.revs(
2297 mid = repo.revs(
2298 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2298 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2299 )
2299 )
2300 for j in mid:
2300 for j in mid:
2301 if j == nr2:
2301 if j == nr2:
2302 return nr2, (nr1, nr2)
2302 return nr2, (nr1, nr2)
2303 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2303 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2304 return j, (nr1, nr2)
2304 return j, (nr1, nr2)
2305 raise error.Abort(
2305 raise error.Abort(
2306 _(
2306 _(
2307 b'Failed to split up ellipsis node! head: %d, '
2307 b'Failed to split up ellipsis node! head: %d, '
2308 b'roots: %d %d %d'
2308 b'roots: %d %d %d'
2309 )
2309 )
2310 % (head, r1, r2, r3)
2310 % (head, r1, r2, r3)
2311 )
2311 )
2312
2312
2313 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2313 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2314 visit = reversed(missing)
2314 visit = reversed(missing)
2315 relevant_nodes = set()
2315 relevant_nodes = set()
2316 visitnodes = [cl.node(m) for m in missing]
2316 visitnodes = [cl.node(m) for m in missing]
2317 required = set(headsrevs) | known
2317 required = set(headsrevs) | known
2318 for rev in visit:
2318 for rev in visit:
2319 clrev = cl.changelogrevision(rev)
2319 clrev = cl.changelogrevision(rev)
2320 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2320 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2321 if depth is not None:
2321 if depth is not None:
2322 curdepth = revdepth[rev]
2322 curdepth = revdepth[rev]
2323 for p in ps:
2323 for p in ps:
2324 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2324 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2325 needed = False
2325 needed = False
2326 shallow_enough = depth is None or revdepth[rev] <= depth
2326 shallow_enough = depth is None or revdepth[rev] <= depth
2327 if shallow_enough:
2327 if shallow_enough:
2328 curmf = mfl[clrev.manifest].read()
2328 curmf = mfl[clrev.manifest].read()
2329 if ps:
2329 if ps:
2330 # We choose to not trust the changed files list in
2330 # We choose to not trust the changed files list in
2331 # changesets because it's not always correct. TODO: could
2331 # changesets because it's not always correct. TODO: could
2332 # we trust it for the non-merge case?
2332 # we trust it for the non-merge case?
2333 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2333 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2334 needed = bool(curmf.diff(p1mf, match))
2334 needed = bool(curmf.diff(p1mf, match))
2335 if not needed and len(ps) > 1:
2335 if not needed and len(ps) > 1:
2336 # For merge changes, the list of changed files is not
2336 # For merge changes, the list of changed files is not
2337 # helpful, since we need to emit the merge if a file
2337 # helpful, since we need to emit the merge if a file
2338 # in the narrow spec has changed on either side of the
2338 # in the narrow spec has changed on either side of the
2339 # merge. As a result, we do a manifest diff to check.
2339 # merge. As a result, we do a manifest diff to check.
2340 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2340 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2341 needed = bool(curmf.diff(p2mf, match))
2341 needed = bool(curmf.diff(p2mf, match))
2342 else:
2342 else:
2343 # For a root node, we need to include the node if any
2343 # For a root node, we need to include the node if any
2344 # files in the node match the narrowspec.
2344 # files in the node match the narrowspec.
2345 needed = any(curmf.walk(match))
2345 needed = any(curmf.walk(match))
2346
2346
2347 if needed:
2347 if needed:
2348 for head in ellipsisheads[rev]:
2348 for head in ellipsisheads[rev]:
2349 addroot(head, rev)
2349 addroot(head, rev)
2350 for p in ps:
2350 for p in ps:
2351 required.add(p)
2351 required.add(p)
2352 relevant_nodes.add(cl.node(rev))
2352 relevant_nodes.add(cl.node(rev))
2353 else:
2353 else:
2354 if not ps:
2354 if not ps:
2355 ps = [nullrev]
2355 ps = [nullrev]
2356 if rev in required:
2356 if rev in required:
2357 for head in ellipsisheads[rev]:
2357 for head in ellipsisheads[rev]:
2358 addroot(head, rev)
2358 addroot(head, rev)
2359 for p in ps:
2359 for p in ps:
2360 ellipsisheads[p].add(rev)
2360 ellipsisheads[p].add(rev)
2361 else:
2361 else:
2362 for p in ps:
2362 for p in ps:
2363 ellipsisheads[p] |= ellipsisheads[rev]
2363 ellipsisheads[p] |= ellipsisheads[rev]
2364
2364
2365 # add common changesets as roots of their reachable ellipsis heads
2365 # add common changesets as roots of their reachable ellipsis heads
2366 for c in commonrevs:
2366 for c in commonrevs:
2367 for head in ellipsisheads[c]:
2367 for head in ellipsisheads[c]:
2368 addroot(head, c)
2368 addroot(head, c)
2369 return visitnodes, relevant_nodes, ellipsisroots
2369 return visitnodes, relevant_nodes, ellipsisroots
2370
2370
2371
2371
2372 def caps20to10(repo, role):
2372 def caps20to10(repo, role):
2373 """return a set with appropriate options to use bundle20 during getbundle"""
2373 """return a set with appropriate options to use bundle20 during getbundle"""
2374 caps = {b'HG20'}
2374 caps = {b'HG20'}
2375 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2375 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2376 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2376 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2377 return caps
2377 return caps
2378
2378
2379
2379
2380 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2380 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2381 getbundle2partsorder = []
2381 getbundle2partsorder = []
2382
2382
2383 # Mapping between step name and function
2383 # Mapping between step name and function
2384 #
2384 #
2385 # This exists to help extensions wrap steps if necessary
2385 # This exists to help extensions wrap steps if necessary
2386 getbundle2partsmapping = {}
2386 getbundle2partsmapping = {}
2387
2387
2388
2388
2389 def getbundle2partsgenerator(stepname, idx=None):
2389 def getbundle2partsgenerator(stepname, idx=None):
2390 """decorator for function generating bundle2 part for getbundle
2390 """decorator for function generating bundle2 part for getbundle
2391
2391
2392 The function is added to the step -> function mapping and appended to the
2392 The function is added to the step -> function mapping and appended to the
2393 list of steps. Beware that decorated functions will be added in order
2393 list of steps. Beware that decorated functions will be added in order
2394 (this may matter).
2394 (this may matter).
2395
2395
2396 You can only use this decorator for new steps, if you want to wrap a step
2396 You can only use this decorator for new steps, if you want to wrap a step
2397 from an extension, attack the getbundle2partsmapping dictionary directly."""
2397 from an extension, attack the getbundle2partsmapping dictionary directly."""
2398
2398
2399 def dec(func):
2399 def dec(func):
2400 assert stepname not in getbundle2partsmapping
2400 assert stepname not in getbundle2partsmapping
2401 getbundle2partsmapping[stepname] = func
2401 getbundle2partsmapping[stepname] = func
2402 if idx is None:
2402 if idx is None:
2403 getbundle2partsorder.append(stepname)
2403 getbundle2partsorder.append(stepname)
2404 else:
2404 else:
2405 getbundle2partsorder.insert(idx, stepname)
2405 getbundle2partsorder.insert(idx, stepname)
2406 return func
2406 return func
2407
2407
2408 return dec
2408 return dec
2409
2409
2410
2410
2411 def bundle2requested(bundlecaps):
2411 def bundle2requested(bundlecaps):
2412 if bundlecaps is not None:
2412 if bundlecaps is not None:
2413 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2413 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2414 return False
2414 return False
2415
2415
2416
2416
2417 def getbundlechunks(
2417 def getbundlechunks(
2418 repo,
2418 repo,
2419 source,
2419 source,
2420 heads=None,
2420 heads=None,
2421 common=None,
2421 common=None,
2422 bundlecaps=None,
2422 bundlecaps=None,
2423 remote_sidedata=None,
2423 remote_sidedata=None,
2424 **kwargs,
2424 **kwargs,
2425 ):
2425 ):
2426 """Return chunks constituting a bundle's raw data.
2426 """Return chunks constituting a bundle's raw data.
2427
2427
2428 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2428 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2429 passed.
2429 passed.
2430
2430
2431 Returns a 2-tuple of a dict with metadata about the generated bundle
2431 Returns a 2-tuple of a dict with metadata about the generated bundle
2432 and an iterator over raw chunks (of varying sizes).
2432 and an iterator over raw chunks (of varying sizes).
2433 """
2433 """
2434 kwargs = pycompat.byteskwargs(kwargs)
2434 kwargs = pycompat.byteskwargs(kwargs)
2435 info = {}
2435 info = {}
2436 usebundle2 = bundle2requested(bundlecaps)
2436 usebundle2 = bundle2requested(bundlecaps)
2437 # bundle10 case
2437 # bundle10 case
2438 if not usebundle2:
2438 if not usebundle2:
2439 if bundlecaps and not kwargs.get(b'cg', True):
2439 if bundlecaps and not kwargs.get(b'cg', True):
2440 raise ValueError(
2440 raise ValueError(
2441 _(b'request for bundle10 must include changegroup')
2441 _(b'request for bundle10 must include changegroup')
2442 )
2442 )
2443
2443
2444 if kwargs:
2444 if kwargs:
2445 raise ValueError(
2445 raise ValueError(
2446 _(b'unsupported getbundle arguments: %s')
2446 _(b'unsupported getbundle arguments: %s')
2447 % b', '.join(sorted(kwargs.keys()))
2447 % b', '.join(sorted(kwargs.keys()))
2448 )
2448 )
2449 outgoing = _computeoutgoing(repo, heads, common)
2449 outgoing = _computeoutgoing(repo, heads, common)
2450 info[b'bundleversion'] = 1
2450 info[b'bundleversion'] = 1
2451 return (
2451 return (
2452 info,
2452 info,
2453 changegroup.makestream(
2453 changegroup.makestream(
2454 repo,
2454 repo,
2455 outgoing,
2455 outgoing,
2456 b'01',
2456 b'01',
2457 source,
2457 source,
2458 bundlecaps=bundlecaps,
2458 bundlecaps=bundlecaps,
2459 remote_sidedata=remote_sidedata,
2459 remote_sidedata=remote_sidedata,
2460 ),
2460 ),
2461 )
2461 )
2462
2462
2463 # bundle20 case
2463 # bundle20 case
2464 info[b'bundleversion'] = 2
2464 info[b'bundleversion'] = 2
2465 b2caps = {}
2465 b2caps = {}
2466 for bcaps in bundlecaps:
2466 for bcaps in bundlecaps:
2467 if bcaps.startswith(b'bundle2='):
2467 if bcaps.startswith(b'bundle2='):
2468 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2468 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2469 b2caps.update(bundle2.decodecaps(blob))
2469 b2caps.update(bundle2.decodecaps(blob))
2470 bundler = bundle2.bundle20(repo.ui, b2caps)
2470 bundler = bundle2.bundle20(repo.ui, b2caps)
2471
2471
2472 kwargs[b'heads'] = heads
2472 kwargs[b'heads'] = heads
2473 kwargs[b'common'] = common
2473 kwargs[b'common'] = common
2474
2474
2475 for name in getbundle2partsorder:
2475 for name in getbundle2partsorder:
2476 func = getbundle2partsmapping[name]
2476 func = getbundle2partsmapping[name]
2477 func(
2477 func(
2478 bundler,
2478 bundler,
2479 repo,
2479 repo,
2480 source,
2480 source,
2481 bundlecaps=bundlecaps,
2481 bundlecaps=bundlecaps,
2482 b2caps=b2caps,
2482 b2caps=b2caps,
2483 remote_sidedata=remote_sidedata,
2483 remote_sidedata=remote_sidedata,
2484 **pycompat.strkwargs(kwargs),
2484 **pycompat.strkwargs(kwargs),
2485 )
2485 )
2486
2486
2487 info[b'prefercompressed'] = bundler.prefercompressed
2487 info[b'prefercompressed'] = bundler.prefercompressed
2488
2488
2489 return info, bundler.getchunks()
2489 return info, bundler.getchunks()
2490
2490
2491
2491
2492 @getbundle2partsgenerator(b'stream')
2492 @getbundle2partsgenerator(b'stream')
2493 def _getbundlestream2(bundler, repo, *args, **kwargs):
2493 def _getbundlestream2(bundler, repo, *args, **kwargs):
2494 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2494 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2495
2495
2496
2496
2497 @getbundle2partsgenerator(b'changegroup')
2497 @getbundle2partsgenerator(b'changegroup')
2498 def _getbundlechangegrouppart(
2498 def _getbundlechangegrouppart(
2499 bundler,
2499 bundler,
2500 repo,
2500 repo,
2501 source,
2501 source,
2502 bundlecaps=None,
2502 bundlecaps=None,
2503 b2caps=None,
2503 b2caps=None,
2504 heads=None,
2504 heads=None,
2505 common=None,
2505 common=None,
2506 remote_sidedata=None,
2506 remote_sidedata=None,
2507 **kwargs,
2507 **kwargs,
2508 ):
2508 ):
2509 """add a changegroup part to the requested bundle"""
2509 """add a changegroup part to the requested bundle"""
2510 if not kwargs.get('cg', True) or not b2caps:
2510 if not kwargs.get('cg', True) or not b2caps:
2511 return
2511 return
2512
2512
2513 version = b'01'
2513 version = b'01'
2514 cgversions = b2caps.get(b'changegroup')
2514 cgversions = b2caps.get(b'changegroup')
2515 if cgversions: # 3.1 and 3.2 ship with an empty value
2515 if cgversions: # 3.1 and 3.2 ship with an empty value
2516 cgversions = [
2516 cgversions = [
2517 v
2517 v
2518 for v in cgversions
2518 for v in cgversions
2519 if v in changegroup.supportedoutgoingversions(repo)
2519 if v in changegroup.supportedoutgoingversions(repo)
2520 ]
2520 ]
2521 if not cgversions:
2521 if not cgversions:
2522 raise error.Abort(_(b'no common changegroup version'))
2522 raise error.Abort(_(b'no common changegroup version'))
2523 version = max(cgversions)
2523 version = max(cgversions)
2524
2524
2525 outgoing = _computeoutgoing(repo, heads, common)
2525 outgoing = _computeoutgoing(repo, heads, common)
2526 if not outgoing.missing:
2526 if not outgoing.missing:
2527 return
2527 return
2528
2528
2529 if kwargs.get('narrow', False):
2529 if kwargs.get('narrow', False):
2530 include = sorted(filter(bool, kwargs.get('includepats', [])))
2530 include = sorted(filter(bool, kwargs.get('includepats', [])))
2531 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2531 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2532 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2532 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2533 else:
2533 else:
2534 matcher = None
2534 matcher = None
2535
2535
2536 cgstream = changegroup.makestream(
2536 cgstream = changegroup.makestream(
2537 repo,
2537 repo,
2538 outgoing,
2538 outgoing,
2539 version,
2539 version,
2540 source,
2540 source,
2541 bundlecaps=bundlecaps,
2541 bundlecaps=bundlecaps,
2542 matcher=matcher,
2542 matcher=matcher,
2543 remote_sidedata=remote_sidedata,
2543 remote_sidedata=remote_sidedata,
2544 )
2544 )
2545
2545
2546 part = bundler.newpart(b'changegroup', data=cgstream)
2546 part = bundler.newpart(b'changegroup', data=cgstream)
2547 if cgversions:
2547 if cgversions:
2548 part.addparam(b'version', version)
2548 part.addparam(b'version', version)
2549
2549
2550 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2550 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2551
2551
2552 if scmutil.istreemanifest(repo):
2552 if scmutil.istreemanifest(repo):
2553 part.addparam(b'treemanifest', b'1')
2553 part.addparam(b'treemanifest', b'1')
2554
2554
2555 if repository.REPO_FEATURE_SIDE_DATA in repo.features:
2555 if repository.REPO_FEATURE_SIDE_DATA in repo.features:
2556 part.addparam(b'exp-sidedata', b'1')
2556 part.addparam(b'exp-sidedata', b'1')
2557 sidedata = bundle2.format_remote_wanted_sidedata(repo)
2557 sidedata = bundle2.format_remote_wanted_sidedata(repo)
2558 part.addparam(b'exp-wanted-sidedata', sidedata)
2558 part.addparam(b'exp-wanted-sidedata', sidedata)
2559
2559
2560 if (
2560 if (
2561 kwargs.get('narrow', False)
2561 kwargs.get('narrow', False)
2562 and kwargs.get('narrow_acl', False)
2562 and kwargs.get('narrow_acl', False)
2563 and (include or exclude)
2563 and (include or exclude)
2564 ):
2564 ):
2565 # this is mandatory because otherwise ACL clients won't work
2565 # this is mandatory because otherwise ACL clients won't work
2566 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2566 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2567 narrowspecpart.data = b'%s\0%s' % (
2567 narrowspecpart.data = b'%s\0%s' % (
2568 b'\n'.join(include),
2568 b'\n'.join(include),
2569 b'\n'.join(exclude),
2569 b'\n'.join(exclude),
2570 )
2570 )
2571
2571
2572
2572
2573 @getbundle2partsgenerator(b'bookmarks')
2573 @getbundle2partsgenerator(b'bookmarks')
2574 def _getbundlebookmarkpart(
2574 def _getbundlebookmarkpart(
2575 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2575 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2576 ):
2576 ):
2577 """add a bookmark part to the requested bundle"""
2577 """add a bookmark part to the requested bundle"""
2578 if not kwargs.get('bookmarks', False):
2578 if not kwargs.get('bookmarks', False):
2579 return
2579 return
2580 if not b2caps or b'bookmarks' not in b2caps:
2580 if not b2caps or b'bookmarks' not in b2caps:
2581 raise error.Abort(_(b'no common bookmarks exchange method'))
2581 raise error.Abort(_(b'no common bookmarks exchange method'))
2582 books = bookmod.listbinbookmarks(repo)
2582 books = bookmod.listbinbookmarks(repo)
2583 data = bookmod.binaryencode(repo, books)
2583 data = bookmod.binaryencode(repo, books)
2584 if data:
2584 if data:
2585 bundler.newpart(b'bookmarks', data=data)
2585 bundler.newpart(b'bookmarks', data=data)
2586
2586
2587
2587
2588 @getbundle2partsgenerator(b'listkeys')
2588 @getbundle2partsgenerator(b'listkeys')
2589 def _getbundlelistkeysparts(
2589 def _getbundlelistkeysparts(
2590 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2590 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2591 ):
2591 ):
2592 """add parts containing listkeys namespaces to the requested bundle"""
2592 """add parts containing listkeys namespaces to the requested bundle"""
2593 listkeys = kwargs.get('listkeys', ())
2593 listkeys = kwargs.get('listkeys', ())
2594 for namespace in listkeys:
2594 for namespace in listkeys:
2595 part = bundler.newpart(b'listkeys')
2595 part = bundler.newpart(b'listkeys')
2596 part.addparam(b'namespace', namespace)
2596 part.addparam(b'namespace', namespace)
2597 keys = repo.listkeys(namespace).items()
2597 keys = repo.listkeys(namespace).items()
2598 part.data = pushkey.encodekeys(keys)
2598 part.data = pushkey.encodekeys(keys)
2599
2599
2600
2600
2601 @getbundle2partsgenerator(b'obsmarkers')
2601 @getbundle2partsgenerator(b'obsmarkers')
2602 def _getbundleobsmarkerpart(
2602 def _getbundleobsmarkerpart(
2603 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2603 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2604 ):
2604 ):
2605 """add an obsolescence markers part to the requested bundle"""
2605 """add an obsolescence markers part to the requested bundle"""
2606 if kwargs.get('obsmarkers', False):
2606 if kwargs.get('obsmarkers', False):
2607 unfi_cl = repo.unfiltered().changelog
2607 unfi_cl = repo.unfiltered().changelog
2608 if heads is None:
2608 if heads is None:
2609 headrevs = repo.changelog.headrevs()
2609 headrevs = repo.changelog.headrevs()
2610 else:
2610 else:
2611 get_rev = unfi_cl.index.get_rev
2611 get_rev = unfi_cl.index.get_rev
2612 headrevs = [get_rev(node) for node in heads]
2612 headrevs = [get_rev(node) for node in heads]
2613 headrevs = [rev for rev in headrevs if rev is not None]
2613 headrevs = [rev for rev in headrevs if rev is not None]
2614 revs = unfi_cl.ancestors(headrevs, inclusive=True)
2614 revs = unfi_cl.ancestors(headrevs, inclusive=True)
2615 markers = repo.obsstore.relevantmarkers(revs=revs)
2615 markers = repo.obsstore.relevantmarkers(revs=revs)
2616 markers = obsutil.sortedmarkers(markers)
2616 markers = obsutil.sortedmarkers(markers)
2617 bundle2.buildobsmarkerspart(bundler, markers)
2617 bundle2.buildobsmarkerspart(bundler, markers)
2618
2618
2619
2619
2620 @getbundle2partsgenerator(b'phases')
2620 @getbundle2partsgenerator(b'phases')
2621 def _getbundlephasespart(
2621 def _getbundlephasespart(
2622 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2622 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2623 ):
2623 ):
2624 """add phase heads part to the requested bundle"""
2624 """add phase heads part to the requested bundle"""
2625 if kwargs.get('phases', False):
2625 if kwargs.get('phases', False):
2626 if not b2caps or b'heads' not in b2caps.get(b'phases'):
2626 if not b2caps or b'heads' not in b2caps.get(b'phases'):
2627 raise error.Abort(_(b'no common phases exchange method'))
2627 raise error.Abort(_(b'no common phases exchange method'))
2628 if heads is None:
2628 if heads is None:
2629 heads = repo.heads()
2629 heads = repo.heads()
2630
2630
2631 headsbyphase = collections.defaultdict(set)
2631 headsbyphase = collections.defaultdict(set)
2632 if repo.publishing():
2632 if repo.publishing():
2633 headsbyphase[phases.public] = heads
2633 headsbyphase[phases.public] = heads
2634 else:
2634 else:
2635 # find the appropriate heads to move
2635 # find the appropriate heads to move
2636
2636
2637 phase = repo._phasecache.phase
2637 phase = repo._phasecache.phase
2638 node = repo.changelog.node
2638 node = repo.changelog.node
2639 rev = repo.changelog.rev
2639 rev = repo.changelog.rev
2640 for h in heads:
2640 for h in heads:
2641 headsbyphase[phase(repo, rev(h))].add(h)
2641 headsbyphase[phase(repo, rev(h))].add(h)
2642 seenphases = list(headsbyphase.keys())
2642 seenphases = list(headsbyphase.keys())
2643
2643
2644 # We do not handle anything but public and draft phase for now)
2644 # We do not handle anything but public and draft phase for now)
2645 if seenphases:
2645 if seenphases:
2646 assert max(seenphases) <= phases.draft
2646 assert max(seenphases) <= phases.draft
2647
2647
2648 # if client is pulling non-public changesets, we need to find
2648 # if client is pulling non-public changesets, we need to find
2649 # intermediate public heads.
2649 # intermediate public heads.
2650 draftheads = headsbyphase.get(phases.draft, set())
2650 draftheads = headsbyphase.get(phases.draft, set())
2651 if draftheads:
2651 if draftheads:
2652 publicheads = headsbyphase.get(phases.public, set())
2652 publicheads = headsbyphase.get(phases.public, set())
2653
2653
2654 revset = b'heads(only(%ln, %ln) and public())'
2654 revset = b'heads(only(%ln, %ln) and public())'
2655 extraheads = repo.revs(revset, draftheads, publicheads)
2655 extraheads = repo.revs(revset, draftheads, publicheads)
2656 for r in extraheads:
2656 for r in extraheads:
2657 headsbyphase[phases.public].add(node(r))
2657 headsbyphase[phases.public].add(node(r))
2658
2658
2659 # transform data in a format used by the encoding function
2659 # transform data in a format used by the encoding function
2660 phasemapping = {
2660 phasemapping = {
2661 phase: sorted(headsbyphase[phase]) for phase in phases.allphases
2661 phase: sorted(headsbyphase[phase]) for phase in phases.allphases
2662 }
2662 }
2663
2663
2664 # generate the actual part
2664 # generate the actual part
2665 phasedata = phases.binaryencode(phasemapping)
2665 phasedata = phases.binaryencode(phasemapping)
2666 bundler.newpart(b'phase-heads', data=phasedata)
2666 bundler.newpart(b'phase-heads', data=phasedata)
2667
2667
2668
2668
2669 @getbundle2partsgenerator(b'hgtagsfnodes')
2669 @getbundle2partsgenerator(b'hgtagsfnodes')
2670 def _getbundletagsfnodes(
2670 def _getbundletagsfnodes(
2671 bundler,
2671 bundler,
2672 repo,
2672 repo,
2673 source,
2673 source,
2674 bundlecaps=None,
2674 bundlecaps=None,
2675 b2caps=None,
2675 b2caps=None,
2676 heads=None,
2676 heads=None,
2677 common=None,
2677 common=None,
2678 **kwargs,
2678 **kwargs,
2679 ):
2679 ):
2680 """Transfer the .hgtags filenodes mapping.
2680 """Transfer the .hgtags filenodes mapping.
2681
2681
2682 Only values for heads in this bundle will be transferred.
2682 Only values for heads in this bundle will be transferred.
2683
2683
2684 The part data consists of pairs of 20 byte changeset node and .hgtags
2684 The part data consists of pairs of 20 byte changeset node and .hgtags
2685 filenodes raw values.
2685 filenodes raw values.
2686 """
2686 """
2687 # Don't send unless:
2687 # Don't send unless:
2688 # - changeset are being exchanged,
2688 # - changeset are being exchanged,
2689 # - the client supports it.
2689 # - the client supports it.
2690 if not b2caps or not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2690 if not b2caps or not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2691 return
2691 return
2692
2692
2693 outgoing = _computeoutgoing(repo, heads, common)
2693 outgoing = _computeoutgoing(repo, heads, common)
2694 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2694 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2695
2695
2696
2696
2697 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2697 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2698 def _getbundlerevbranchcache(
2698 def _getbundlerevbranchcache(
2699 bundler,
2699 bundler,
2700 repo,
2700 repo,
2701 source,
2701 source,
2702 bundlecaps=None,
2702 bundlecaps=None,
2703 b2caps=None,
2703 b2caps=None,
2704 heads=None,
2704 heads=None,
2705 common=None,
2705 common=None,
2706 **kwargs,
2706 **kwargs,
2707 ):
2707 ):
2708 """Transfer the rev-branch-cache mapping
2708 """Transfer the rev-branch-cache mapping
2709
2709
2710 The payload is a series of data related to each branch
2710 The payload is a series of data related to each branch
2711
2711
2712 1) branch name length
2712 1) branch name length
2713 2) number of open heads
2713 2) number of open heads
2714 3) number of closed heads
2714 3) number of closed heads
2715 4) open heads nodes
2715 4) open heads nodes
2716 5) closed heads nodes
2716 5) closed heads nodes
2717 """
2717 """
2718 # Don't send unless:
2718 # Don't send unless:
2719 # - changeset are being exchanged,
2719 # - changeset are being exchanged,
2720 # - the client supports it.
2720 # - the client supports it.
2721 # - narrow bundle isn't in play (not currently compatible).
2721 # - narrow bundle isn't in play (not currently compatible).
2722 if (
2722 if (
2723 not kwargs.get('cg', True)
2723 not kwargs.get('cg', True)
2724 or not b2caps
2724 or not b2caps
2725 or b'rev-branch-cache' not in b2caps
2725 or b'rev-branch-cache' not in b2caps
2726 or kwargs.get('narrow', False)
2726 or kwargs.get('narrow', False)
2727 or repo.ui.has_section(_NARROWACL_SECTION)
2727 or repo.ui.has_section(_NARROWACL_SECTION)
2728 ):
2728 ):
2729 return
2729 return
2730
2730
2731 outgoing = _computeoutgoing(repo, heads, common)
2731 outgoing = _computeoutgoing(repo, heads, common)
2732 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2732 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2733
2733
2734
2734
2735 def check_heads(repo, their_heads, context):
2735 def check_heads(repo, their_heads, context):
2736 """check if the heads of a repo have been modified
2736 """check if the heads of a repo have been modified
2737
2737
2738 Used by peer for unbundling.
2738 Used by peer for unbundling.
2739 """
2739 """
2740 heads = repo.heads()
2740 heads = repo.heads()
2741 heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest()
2741 heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest()
2742 if not (
2742 if not (
2743 their_heads == [b'force']
2743 their_heads == [b'force']
2744 or their_heads == heads
2744 or their_heads == heads
2745 or their_heads == [b'hashed', heads_hash]
2745 or their_heads == [b'hashed', heads_hash]
2746 ):
2746 ):
2747 # someone else committed/pushed/unbundled while we
2747 # someone else committed/pushed/unbundled while we
2748 # were transferring data
2748 # were transferring data
2749 raise error.PushRaced(
2749 raise error.PushRaced(
2750 b'repository changed while %s - please try again' % context
2750 b'repository changed while %s - please try again' % context
2751 )
2751 )
2752
2752
2753
2753
2754 def unbundle(repo, cg, heads, source, url):
2754 def unbundle(repo, cg, heads, source, url):
2755 """Apply a bundle to a repo.
2755 """Apply a bundle to a repo.
2756
2756
2757 this function makes sure the repo is locked during the application and have
2757 this function makes sure the repo is locked during the application and have
2758 mechanism to check that no push race occurred between the creation of the
2758 mechanism to check that no push race occurred between the creation of the
2759 bundle and its application.
2759 bundle and its application.
2760
2760
2761 If the push was raced as PushRaced exception is raised."""
2761 If the push was raced as PushRaced exception is raised."""
2762 r = 0
2762 r = 0
2763 # need a transaction when processing a bundle2 stream
2763 # need a transaction when processing a bundle2 stream
2764 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2764 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2765 lockandtr = [None, None, None]
2765 lockandtr = [None, None, None]
2766 recordout = None
2766 recordout = None
2767 # quick fix for output mismatch with bundle2 in 3.4
2767 # quick fix for output mismatch with bundle2 in 3.4
2768 captureoutput = repo.ui.configbool(
2768 captureoutput = repo.ui.configbool(
2769 b'experimental', b'bundle2-output-capture'
2769 b'experimental', b'bundle2-output-capture'
2770 )
2770 )
2771 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2771 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2772 captureoutput = True
2772 captureoutput = True
2773 try:
2773 try:
2774 # note: outside bundle1, 'heads' is expected to be empty and this
2774 # note: outside bundle1, 'heads' is expected to be empty and this
2775 # 'check_heads' call wil be a no-op
2775 # 'check_heads' call wil be a no-op
2776 check_heads(repo, heads, b'uploading changes')
2776 check_heads(repo, heads, b'uploading changes')
2777 # push can proceed
2777 # push can proceed
2778 if not isinstance(cg, bundle2.unbundle20):
2778 if not isinstance(cg, bundle2.unbundle20):
2779 # legacy case: bundle1 (changegroup 01)
2779 # legacy case: bundle1 (changegroup 01)
2780 txnname = b"\n".join([source, urlutil.hidepassword(url)])
2780 txnname = b"\n".join([source, urlutil.hidepassword(url)])
2781 with repo.lock(), repo.transaction(txnname) as tr:
2781 with repo.lock(), repo.transaction(txnname) as tr:
2782 op = bundle2.applybundle(repo, cg, tr, source, url)
2782 op = bundle2.applybundle(repo, cg, tr, source, url)
2783 r = bundle2.combinechangegroupresults(op)
2783 r = bundle2.combinechangegroupresults(op)
2784 else:
2784 else:
2785 r = None
2785 r = None
2786 try:
2786 try:
2787
2787
2788 def gettransaction():
2788 def gettransaction():
2789 if not lockandtr[2]:
2789 if not lockandtr[2]:
2790 if not bookmod.bookmarksinstore(repo):
2790 if not bookmod.bookmarksinstore(repo):
2791 lockandtr[0] = repo.wlock()
2791 lockandtr[0] = repo.wlock()
2792 lockandtr[1] = repo.lock()
2792 lockandtr[1] = repo.lock()
2793 lockandtr[2] = repo.transaction(source)
2793 lockandtr[2] = repo.transaction(source)
2794 lockandtr[2].hookargs[b'source'] = source
2794 lockandtr[2].hookargs[b'source'] = source
2795 lockandtr[2].hookargs[b'url'] = url
2795 lockandtr[2].hookargs[b'url'] = url
2796 lockandtr[2].hookargs[b'bundle2'] = b'1'
2796 lockandtr[2].hookargs[b'bundle2'] = b'1'
2797 return lockandtr[2]
2797 return lockandtr[2]
2798
2798
2799 # Do greedy locking by default until we're satisfied with lazy
2799 # Do greedy locking by default until we're satisfied with lazy
2800 # locking.
2800 # locking.
2801 if not repo.ui.configbool(
2801 if not repo.ui.configbool(
2802 b'experimental', b'bundle2lazylocking'
2802 b'experimental', b'bundle2lazylocking'
2803 ):
2803 ):
2804 gettransaction()
2804 gettransaction()
2805
2805
2806 op = bundle2.bundleoperation(
2806 op = bundle2.bundleoperation(
2807 repo,
2807 repo,
2808 gettransaction,
2808 gettransaction,
2809 captureoutput=captureoutput,
2809 captureoutput=captureoutput,
2810 source=b'push',
2810 source=b'push',
2811 )
2811 )
2812 try:
2812 try:
2813 op = bundle2.processbundle(repo, cg, op=op)
2813 op = bundle2.processbundle(repo, cg, op=op)
2814 finally:
2814 finally:
2815 r = op.reply
2815 r = op.reply
2816 if captureoutput and r is not None:
2816 if captureoutput and r is not None:
2817 repo.ui.pushbuffer(error=True, subproc=True)
2817 repo.ui.pushbuffer(error=True, subproc=True)
2818
2818
2819 def recordout(output):
2819 def recordout(output):
2820 r.newpart(b'output', data=output, mandatory=False)
2820 r.newpart(b'output', data=output, mandatory=False)
2821
2821
2822 if lockandtr[2] is not None:
2822 if lockandtr[2] is not None:
2823 lockandtr[2].close()
2823 lockandtr[2].close()
2824 except BaseException as exc:
2824 except BaseException as exc:
2825 exc.duringunbundle2 = True
2825 exc.duringunbundle2 = True
2826 if captureoutput and r is not None:
2826 if captureoutput and r is not None:
2827 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2827 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2828
2828
2829 def recordout(output):
2829 def recordout(output):
2830 part = bundle2.bundlepart(
2830 part = bundle2.bundlepart(
2831 b'output', data=output, mandatory=False
2831 b'output', data=output, mandatory=False
2832 )
2832 )
2833 parts.append(part)
2833 parts.append(part)
2834
2834
2835 raise
2835 raise
2836 finally:
2836 finally:
2837 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2837 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2838 if recordout is not None:
2838 if recordout is not None:
2839 recordout(repo.ui.popbuffer())
2839 recordout(repo.ui.popbuffer())
2840 return r
2840 return r
2841
2841
2842
2842
2843 def _maybeapplyclonebundle(pullop):
2843 def _maybeapplyclonebundle(pullop):
2844 """Apply a clone bundle from a remote, if possible."""
2844 """Apply a clone bundle from a remote, if possible."""
2845
2845
2846 repo = pullop.repo
2846 repo = pullop.repo
2847 remote = pullop.remote
2847 remote = pullop.remote
2848
2848
2849 if not repo.ui.configbool(b'ui', b'clonebundles'):
2849 if not repo.ui.configbool(b'ui', b'clonebundles'):
2850 return
2850 return
2851
2851
2852 # Only run if local repo is empty.
2852 # Only run if local repo is empty.
2853 if len(repo):
2853 if len(repo):
2854 return
2854 return
2855
2855
2856 if pullop.heads:
2856 if pullop.heads:
2857 return
2857 return
2858
2858
2859 if not remote.capable(b'clonebundles'):
2859 if not remote.capable(b'clonebundles'):
2860 return
2860 return
2861
2861
2862 with remote.commandexecutor() as e:
2862 with remote.commandexecutor() as e:
2863 res = e.callcommand(b'clonebundles', {}).result()
2863 res = e.callcommand(b'clonebundles', {}).result()
2864
2864
2865 # If we call the wire protocol command, that's good enough to record the
2865 # If we call the wire protocol command, that's good enough to record the
2866 # attempt.
2866 # attempt.
2867 pullop.clonebundleattempted = True
2867 pullop.clonebundleattempted = True
2868
2868
2869 entries = bundlecaches.parseclonebundlesmanifest(repo, res)
2869 entries = bundlecaches.parseclonebundlesmanifest(repo, res)
2870 if not entries:
2870 if not entries:
2871 repo.ui.note(
2871 repo.ui.note(
2872 _(
2872 _(
2873 b'no clone bundles available on remote; '
2873 b'no clone bundles available on remote; '
2874 b'falling back to regular clone\n'
2874 b'falling back to regular clone\n'
2875 )
2875 )
2876 )
2876 )
2877 return
2877 return
2878
2878
2879 entries = bundlecaches.filterclonebundleentries(
2879 entries = bundlecaches.filterclonebundleentries(
2880 repo, entries, streamclonerequested=pullop.streamclonerequested
2880 repo, entries, streamclonerequested=pullop.streamclonerequested
2881 )
2881 )
2882
2882
2883 if not entries:
2883 if not entries:
2884 # There is a thundering herd concern here. However, if a server
2884 # There is a thundering herd concern here. However, if a server
2885 # operator doesn't advertise bundles appropriate for its clients,
2885 # operator doesn't advertise bundles appropriate for its clients,
2886 # they deserve what's coming. Furthermore, from a client's
2886 # they deserve what's coming. Furthermore, from a client's
2887 # perspective, no automatic fallback would mean not being able to
2887 # perspective, no automatic fallback would mean not being able to
2888 # clone!
2888 # clone!
2889 repo.ui.warn(
2889 repo.ui.warn(
2890 _(
2890 _(
2891 b'no compatible clone bundles available on server; '
2891 b'no compatible clone bundles available on server; '
2892 b'falling back to regular clone\n'
2892 b'falling back to regular clone\n'
2893 )
2893 )
2894 )
2894 )
2895 repo.ui.warn(
2895 repo.ui.warn(
2896 _(b'(you may want to report this to the server operator)\n')
2896 _(b'(you may want to report this to the server operator)\n')
2897 )
2897 )
2898 return
2898 return
2899
2899
2900 entries = bundlecaches.sortclonebundleentries(repo.ui, entries)
2900 entries = bundlecaches.sortclonebundleentries(repo.ui, entries)
2901
2901
2902 url = entries[0][b'URL']
2902 url = entries[0][b'URL']
2903 digest = entries[0].get(b'DIGEST')
2904 if digest:
2905 algorithms = urlmod.digesthandler.digest_algorithms.keys()
2906 preference = dict(zip(algorithms, range(len(algorithms))))
2907 best_entry = None
2908 best_preference = len(preference)
2909 for digest_entry in digest.split(b','):
2910 cur_algo, cur_digest = digest_entry.split(b':')
2911 if cur_algo not in preference:
2912 continue
2913 if preference[cur_algo] < best_preference:
2914 best_entry = digest_entry
2915 best_preference = preference[cur_algo]
2916 digest = best_entry
2917
2903 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2918 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2904 if trypullbundlefromurl(repo.ui, repo, url, remote):
2919 if trypullbundlefromurl(repo.ui, repo, url, remote, digest):
2905 repo.ui.status(_(b'finished applying clone bundle\n'))
2920 repo.ui.status(_(b'finished applying clone bundle\n'))
2906 # Bundle failed.
2921 # Bundle failed.
2907 #
2922 #
2908 # We abort by default to avoid the thundering herd of
2923 # We abort by default to avoid the thundering herd of
2909 # clients flooding a server that was expecting expensive
2924 # clients flooding a server that was expecting expensive
2910 # clone load to be offloaded.
2925 # clone load to be offloaded.
2911 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2926 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2912 repo.ui.warn(_(b'falling back to normal clone\n'))
2927 repo.ui.warn(_(b'falling back to normal clone\n'))
2913 else:
2928 else:
2914 raise error.Abort(
2929 raise error.Abort(
2915 _(b'error applying bundle'),
2930 _(b'error applying bundle'),
2916 hint=_(
2931 hint=_(
2917 b'if this error persists, consider contacting '
2932 b'if this error persists, consider contacting '
2918 b'the server operator or disable clone '
2933 b'the server operator or disable clone '
2919 b'bundles via '
2934 b'bundles via '
2920 b'"--config ui.clonebundles=false"'
2935 b'"--config ui.clonebundles=false"'
2921 ),
2936 ),
2922 )
2937 )
2923
2938
2924
2939
2925 def inline_clone_bundle_open(ui, url, peer):
2940 def inline_clone_bundle_open(ui, url, peer):
2926 if not peer:
2941 if not peer:
2927 raise error.Abort(_(b'no remote repository supplied for %s' % url))
2942 raise error.Abort(_(b'no remote repository supplied for %s' % url))
2928 clonebundleid = url[len(bundlecaches.CLONEBUNDLESCHEME) :]
2943 clonebundleid = url[len(bundlecaches.CLONEBUNDLESCHEME) :]
2929 peerclonebundle = peer.get_cached_bundle_inline(clonebundleid)
2944 peerclonebundle = peer.get_cached_bundle_inline(clonebundleid)
2930 return util.chunkbuffer(peerclonebundle)
2945 return util.chunkbuffer(peerclonebundle)
2931
2946
2932
2947
2933 def trypullbundlefromurl(ui, repo, url, peer):
2948 def trypullbundlefromurl(ui, repo, url, peer, digest):
2934 """Attempt to apply a bundle from a URL."""
2949 """Attempt to apply a bundle from a URL."""
2935 with repo.lock(), repo.transaction(b'bundleurl') as tr:
2950 with repo.lock(), repo.transaction(b'bundleurl') as tr:
2936 try:
2951 try:
2937 if url.startswith(bundlecaches.CLONEBUNDLESCHEME):
2952 if url.startswith(bundlecaches.CLONEBUNDLESCHEME):
2938 fh = inline_clone_bundle_open(ui, url, peer)
2953 fh = inline_clone_bundle_open(ui, url, peer)
2939 else:
2954 else:
2940 fh = urlmod.open(ui, url)
2955 fh = urlmod.open(ui, url, digest=digest)
2941 cg = readbundle(ui, fh, b'stream')
2956 cg = readbundle(ui, fh, b'stream')
2942
2957
2943 if isinstance(cg, streamclone.streamcloneapplier):
2958 if isinstance(cg, streamclone.streamcloneapplier):
2944 cg.apply(repo)
2959 cg.apply(repo)
2945 else:
2960 else:
2946 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
2961 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
2947 return True
2962 return True
2948 except urlerr.httperror as e:
2963 except urlerr.httperror as e:
2949 ui.warn(
2964 ui.warn(
2950 _(b'HTTP error fetching bundle: %s\n')
2965 _(b'HTTP error fetching bundle: %s\n')
2951 % stringutil.forcebytestr(e)
2966 % stringutil.forcebytestr(e)
2952 )
2967 )
2953 except urlerr.urlerror as e:
2968 except urlerr.urlerror as e:
2954 ui.warn(
2969 ui.warn(
2955 _(b'error fetching bundle: %s\n')
2970 _(b'error fetching bundle: %s\n')
2956 % stringutil.forcebytestr(e.reason)
2971 % stringutil.forcebytestr(e.reason)
2957 )
2972 )
2958
2973
2959 return False
2974 return False
@@ -1,661 +1,731
1 # url.py - HTTP handling for mercurial
1 # url.py - HTTP handling for mercurial
2 #
2 #
3 # Copyright 2005, 2006, 2007, 2008 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005, 2006, 2007, 2008 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006, 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br>
4 # Copyright 2006, 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 from __future__ import annotations
10 from __future__ import annotations
11
11
12 import base64
12 import base64
13 import hashlib
13 import socket
14 import socket
14
15
15 from .i18n import _
16 from .i18n import _
17 from .node import hex
16 from . import (
18 from . import (
17 encoding,
19 encoding,
18 error,
20 error,
19 httpconnection as httpconnectionmod,
21 httpconnection as httpconnectionmod,
20 keepalive,
22 keepalive,
21 pycompat,
23 pycompat,
22 sslutil,
24 sslutil,
23 urllibcompat,
25 urllibcompat,
24 util,
26 util,
25 )
27 )
26 from .utils import (
28 from .utils import (
27 stringutil,
29 stringutil,
28 urlutil,
30 urlutil,
29 )
31 )
30
32
31 httplib = util.httplib
33 httplib = util.httplib
32 stringio = util.stringio
34 stringio = util.stringio
33 urlerr = util.urlerr
35 urlerr = util.urlerr
34 urlreq = util.urlreq
36 urlreq = util.urlreq
35
37
36
38
37 def escape(s, quote=None):
39 def escape(s, quote=None):
38 """Replace special characters "&", "<" and ">" to HTML-safe sequences.
40 """Replace special characters "&", "<" and ">" to HTML-safe sequences.
39 If the optional flag quote is true, the quotation mark character (")
41 If the optional flag quote is true, the quotation mark character (")
40 is also translated.
42 is also translated.
41
43
42 This is the same as cgi.escape in Python, but always operates on
44 This is the same as cgi.escape in Python, but always operates on
43 bytes, whereas cgi.escape in Python 3 only works on unicodes.
45 bytes, whereas cgi.escape in Python 3 only works on unicodes.
44 """
46 """
45 s = s.replace(b"&", b"&amp;")
47 s = s.replace(b"&", b"&amp;")
46 s = s.replace(b"<", b"&lt;")
48 s = s.replace(b"<", b"&lt;")
47 s = s.replace(b">", b"&gt;")
49 s = s.replace(b">", b"&gt;")
48 if quote:
50 if quote:
49 s = s.replace(b'"', b"&quot;")
51 s = s.replace(b'"', b"&quot;")
50 return s
52 return s
51
53
52
54
53 class passwordmgr:
55 class passwordmgr:
54 def __init__(self, ui, passwddb):
56 def __init__(self, ui, passwddb):
55 self.ui = ui
57 self.ui = ui
56 self.passwddb = passwddb
58 self.passwddb = passwddb
57
59
58 def add_password(self, realm, uri, user, passwd):
60 def add_password(self, realm, uri, user, passwd):
59 return self.passwddb.add_password(realm, uri, user, passwd)
61 return self.passwddb.add_password(realm, uri, user, passwd)
60
62
61 def find_user_password(self, realm, authuri):
63 def find_user_password(self, realm, authuri):
62 assert isinstance(realm, (type(None), str))
64 assert isinstance(realm, (type(None), str))
63 assert isinstance(authuri, str)
65 assert isinstance(authuri, str)
64 authinfo = self.passwddb.find_user_password(realm, authuri)
66 authinfo = self.passwddb.find_user_password(realm, authuri)
65 user, passwd = authinfo
67 user, passwd = authinfo
66 user, passwd = pycompat.bytesurl(user), pycompat.bytesurl(passwd)
68 user, passwd = pycompat.bytesurl(user), pycompat.bytesurl(passwd)
67 if user and passwd:
69 if user and passwd:
68 self._writedebug(user, passwd)
70 self._writedebug(user, passwd)
69 return (pycompat.strurl(user), pycompat.strurl(passwd))
71 return (pycompat.strurl(user), pycompat.strurl(passwd))
70
72
71 if not user or not passwd:
73 if not user or not passwd:
72 res = httpconnectionmod.readauthforuri(self.ui, authuri, user)
74 res = httpconnectionmod.readauthforuri(self.ui, authuri, user)
73 if res:
75 if res:
74 group, auth = res
76 group, auth = res
75 user, passwd = auth.get(b'username'), auth.get(b'password')
77 user, passwd = auth.get(b'username'), auth.get(b'password')
76 self.ui.debug(b"using auth.%s.* for authentication\n" % group)
78 self.ui.debug(b"using auth.%s.* for authentication\n" % group)
77 if not user or not passwd:
79 if not user or not passwd:
78 u = urlutil.url(pycompat.bytesurl(authuri))
80 u = urlutil.url(pycompat.bytesurl(authuri))
79 u.query = None
81 u.query = None
80 if not self.ui.interactive():
82 if not self.ui.interactive():
81 raise error.Abort(
83 raise error.Abort(
82 _(b'http authorization required for %s')
84 _(b'http authorization required for %s')
83 % urlutil.hidepassword(bytes(u))
85 % urlutil.hidepassword(bytes(u))
84 )
86 )
85
87
86 self.ui.write(
88 self.ui.write(
87 _(b"http authorization required for %s\n")
89 _(b"http authorization required for %s\n")
88 % urlutil.hidepassword(bytes(u))
90 % urlutil.hidepassword(bytes(u))
89 )
91 )
90 self.ui.write(_(b"realm: %s\n") % pycompat.bytesurl(realm))
92 self.ui.write(_(b"realm: %s\n") % pycompat.bytesurl(realm))
91 if user:
93 if user:
92 self.ui.write(_(b"user: %s\n") % user)
94 self.ui.write(_(b"user: %s\n") % user)
93 else:
95 else:
94 user = self.ui.prompt(_(b"user:"), default=None)
96 user = self.ui.prompt(_(b"user:"), default=None)
95
97
96 if not passwd:
98 if not passwd:
97 passwd = self.ui.getpass()
99 passwd = self.ui.getpass()
98
100
99 # As of Python 3.8, the default implementation of
101 # As of Python 3.8, the default implementation of
100 # AbstractBasicAuthHandler.retry_http_basic_auth() assumes the user
102 # AbstractBasicAuthHandler.retry_http_basic_auth() assumes the user
101 # is set if pw is not None. This means (None, str) is not a valid
103 # is set if pw is not None. This means (None, str) is not a valid
102 # return type of find_user_password().
104 # return type of find_user_password().
103 if user is None:
105 if user is None:
104 return None, None
106 return None, None
105
107
106 self.passwddb.add_password(realm, authuri, user, passwd)
108 self.passwddb.add_password(realm, authuri, user, passwd)
107 self._writedebug(user, passwd)
109 self._writedebug(user, passwd)
108 return (pycompat.strurl(user), pycompat.strurl(passwd))
110 return (pycompat.strurl(user), pycompat.strurl(passwd))
109
111
110 def _writedebug(self, user, passwd):
112 def _writedebug(self, user, passwd):
111 msg = _(b'http auth: user %s, password %s\n')
113 msg = _(b'http auth: user %s, password %s\n')
112 self.ui.debug(msg % (user, passwd and b'*' * len(passwd) or b'not set'))
114 self.ui.debug(msg % (user, passwd and b'*' * len(passwd) or b'not set'))
113
115
114 def find_stored_password(self, authuri):
116 def find_stored_password(self, authuri):
115 return self.passwddb.find_user_password(None, authuri)
117 return self.passwddb.find_user_password(None, authuri)
116
118
117
119
118 class proxyhandler(urlreq.proxyhandler):
120 class proxyhandler(urlreq.proxyhandler):
119 def __init__(self, ui):
121 def __init__(self, ui):
120 proxyurl = ui.config(b"http_proxy", b"host") or encoding.environ.get(
122 proxyurl = ui.config(b"http_proxy", b"host") or encoding.environ.get(
121 b'http_proxy'
123 b'http_proxy'
122 )
124 )
123 # XXX proxyauthinfo = None
125 # XXX proxyauthinfo = None
124
126
125 if proxyurl:
127 if proxyurl:
126 # proxy can be proper url or host[:port]
128 # proxy can be proper url or host[:port]
127 if not (
129 if not (
128 proxyurl.startswith(b'http:') or proxyurl.startswith(b'https:')
130 proxyurl.startswith(b'http:') or proxyurl.startswith(b'https:')
129 ):
131 ):
130 proxyurl = b'http://' + proxyurl + b'/'
132 proxyurl = b'http://' + proxyurl + b'/'
131 proxy = urlutil.url(proxyurl)
133 proxy = urlutil.url(proxyurl)
132 if not proxy.user:
134 if not proxy.user:
133 proxy.user = ui.config(b"http_proxy", b"user")
135 proxy.user = ui.config(b"http_proxy", b"user")
134 proxy.passwd = ui.config(b"http_proxy", b"passwd")
136 proxy.passwd = ui.config(b"http_proxy", b"passwd")
135
137
136 # see if we should use a proxy for this url
138 # see if we should use a proxy for this url
137 no_list = [b"localhost", b"127.0.0.1"]
139 no_list = [b"localhost", b"127.0.0.1"]
138 no_list.extend(
140 no_list.extend(
139 [p.lower() for p in ui.configlist(b"http_proxy", b"no")]
141 [p.lower() for p in ui.configlist(b"http_proxy", b"no")]
140 )
142 )
141 no_list.extend(
143 no_list.extend(
142 [
144 [
143 p.strip().lower()
145 p.strip().lower()
144 for p in encoding.environ.get(b"no_proxy", b'').split(b',')
146 for p in encoding.environ.get(b"no_proxy", b'').split(b',')
145 if p.strip()
147 if p.strip()
146 ]
148 ]
147 )
149 )
148 # "http_proxy.always" config is for running tests on localhost
150 # "http_proxy.always" config is for running tests on localhost
149 if ui.configbool(b"http_proxy", b"always"):
151 if ui.configbool(b"http_proxy", b"always"):
150 self.no_list = []
152 self.no_list = []
151 else:
153 else:
152 self.no_list = no_list
154 self.no_list = no_list
153
155
154 # Keys and values need to be str because the standard library
156 # Keys and values need to be str because the standard library
155 # expects them to be.
157 # expects them to be.
156 proxyurl = str(proxy)
158 proxyurl = str(proxy)
157 proxies = {'http': proxyurl, 'https': proxyurl}
159 proxies = {'http': proxyurl, 'https': proxyurl}
158 ui.debug(
160 ui.debug(
159 b'proxying through %s\n' % urlutil.hidepassword(bytes(proxy))
161 b'proxying through %s\n' % urlutil.hidepassword(bytes(proxy))
160 )
162 )
161 else:
163 else:
162 proxies = {}
164 proxies = {}
163
165
164 urlreq.proxyhandler.__init__(self, proxies)
166 urlreq.proxyhandler.__init__(self, proxies)
165 self.ui = ui
167 self.ui = ui
166
168
167 def proxy_open(self, req, proxy, type_):
169 def proxy_open(self, req, proxy, type_):
168 host = pycompat.bytesurl(urllibcompat.gethost(req)).split(b':')[0]
170 host = pycompat.bytesurl(urllibcompat.gethost(req)).split(b':')[0]
169 for e in self.no_list:
171 for e in self.no_list:
170 if host == e:
172 if host == e:
171 return None
173 return None
172 if e.startswith(b'*.') and host.endswith(e[2:]):
174 if e.startswith(b'*.') and host.endswith(e[2:]):
173 return None
175 return None
174 if e.startswith(b'.') and host.endswith(e[1:]):
176 if e.startswith(b'.') and host.endswith(e[1:]):
175 return None
177 return None
176
178
177 return urlreq.proxyhandler.proxy_open(self, req, proxy, type_)
179 return urlreq.proxyhandler.proxy_open(self, req, proxy, type_)
178
180
179
181
180 def _gen_sendfile(orgsend):
182 def _gen_sendfile(orgsend):
181 def _sendfile(self, data):
183 def _sendfile(self, data):
182 # send a file
184 # send a file
183 if isinstance(data, httpconnectionmod.httpsendfile):
185 if isinstance(data, httpconnectionmod.httpsendfile):
184 # if auth required, some data sent twice, so rewind here
186 # if auth required, some data sent twice, so rewind here
185 data.seek(0)
187 data.seek(0)
186 for chunk in util.filechunkiter(data):
188 for chunk in util.filechunkiter(data):
187 orgsend(self, chunk)
189 orgsend(self, chunk)
188 else:
190 else:
189 orgsend(self, data)
191 orgsend(self, data)
190
192
191 return _sendfile
193 return _sendfile
192
194
193
195
194 has_https = hasattr(urlreq, 'httpshandler')
196 has_https = hasattr(urlreq, 'httpshandler')
195
197
196
198
197 class httpconnection(keepalive.HTTPConnection):
199 class httpconnection(keepalive.HTTPConnection):
198 # must be able to send big bundle as stream.
200 # must be able to send big bundle as stream.
199 send = _gen_sendfile(keepalive.HTTPConnection.send)
201 send = _gen_sendfile(keepalive.HTTPConnection.send)
200
202
201
203
202 # Large parts of this function have their origin from before Python 2.6
204 # Large parts of this function have their origin from before Python 2.6
203 # and could potentially be removed.
205 # and could potentially be removed.
204 def _generic_start_transaction(handler, h, req):
206 def _generic_start_transaction(handler, h, req):
205 tunnel_host = req._tunnel_host
207 tunnel_host = req._tunnel_host
206 if tunnel_host:
208 if tunnel_host:
207 if tunnel_host[:7] not in ['http://', 'https:/']:
209 if tunnel_host[:7] not in ['http://', 'https:/']:
208 tunnel_host = 'https://' + tunnel_host
210 tunnel_host = 'https://' + tunnel_host
209 new_tunnel = True
211 new_tunnel = True
210 else:
212 else:
211 tunnel_host = urllibcompat.getselector(req)
213 tunnel_host = urllibcompat.getselector(req)
212 new_tunnel = False
214 new_tunnel = False
213
215
214 if new_tunnel or tunnel_host == urllibcompat.getfullurl(req): # has proxy
216 if new_tunnel or tunnel_host == urllibcompat.getfullurl(req): # has proxy
215 u = urlutil.url(pycompat.bytesurl(tunnel_host))
217 u = urlutil.url(pycompat.bytesurl(tunnel_host))
216 if new_tunnel or u.scheme == b'https': # only use CONNECT for HTTPS
218 if new_tunnel or u.scheme == b'https': # only use CONNECT for HTTPS
217 h.realhostport = b':'.join([u.host, (u.port or b'443')])
219 h.realhostport = b':'.join([u.host, (u.port or b'443')])
218 h.headers = req.headers.copy()
220 h.headers = req.headers.copy()
219 h.headers.update(handler.parent.addheaders)
221 h.headers.update(handler.parent.addheaders)
220 return
222 return
221
223
222 h.realhostport = None
224 h.realhostport = None
223 h.headers = None
225 h.headers = None
224
226
225
227
226 class httphandler(keepalive.HTTPHandler):
228 class httphandler(keepalive.HTTPHandler):
227 def http_open(self, req):
229 def http_open(self, req):
228 return self.do_open(httpconnection, req)
230 return self.do_open(httpconnection, req)
229
231
230 def _start_transaction(self, h, req):
232 def _start_transaction(self, h, req):
231 _generic_start_transaction(self, h, req)
233 _generic_start_transaction(self, h, req)
232 return keepalive.HTTPHandler._start_transaction(self, h, req)
234 return keepalive.HTTPHandler._start_transaction(self, h, req)
233
235
234
236
235 class logginghttphandler(httphandler):
237 class logginghttphandler(httphandler):
236 """HTTP(S) handler that logs socket I/O."""
238 """HTTP(S) handler that logs socket I/O."""
237
239
238 def __init__(self, logfh, name, observeropts, *args, **kwargs):
240 def __init__(self, logfh, name, observeropts, *args, **kwargs):
239 super().__init__(*args, **kwargs)
241 super().__init__(*args, **kwargs)
240
242
241 self._logfh = logfh
243 self._logfh = logfh
242 self._logname = name
244 self._logname = name
243 self._observeropts = observeropts
245 self._observeropts = observeropts
244
246
245 def do_open(self, http_class, *args, **kwargs):
247 def do_open(self, http_class, *args, **kwargs):
246 _logfh = self._logfh
248 _logfh = self._logfh
247 _logname = self._logname
249 _logname = self._logname
248 _observeropts = self._observeropts
250 _observeropts = self._observeropts
249
251
250 class logginghttpconnection(http_class):
252 class logginghttpconnection(http_class):
251 def connect(self):
253 def connect(self):
252 super().connect()
254 super().connect()
253 self.sock = util.makeloggingsocket(
255 self.sock = util.makeloggingsocket(
254 _logfh, self.sock, _logname, **_observeropts
256 _logfh, self.sock, _logname, **_observeropts
255 )
257 )
256
258
257 return super().do_open(logginghttpconnection, *args, **kwargs)
259 return super().do_open(logginghttpconnection, *args, **kwargs)
258
260
259
261
260 if has_https:
262 if has_https:
261
263
262 def _generic_proxytunnel(self: "httpsconnection"):
264 def _generic_proxytunnel(self: "httpsconnection"):
263 headers = self.headers # pytype: disable=attribute-error
265 headers = self.headers # pytype: disable=attribute-error
264 proxyheaders = {
266 proxyheaders = {
265 pycompat.bytestr(x): pycompat.bytestr(headers[x])
267 pycompat.bytestr(x): pycompat.bytestr(headers[x])
266 for x in headers
268 for x in headers
267 if x.lower().startswith('proxy-')
269 if x.lower().startswith('proxy-')
268 }
270 }
269 realhostport = self.realhostport # pytype: disable=attribute-error
271 realhostport = self.realhostport # pytype: disable=attribute-error
270 self.send(b'CONNECT %s HTTP/1.0\r\n' % realhostport)
272 self.send(b'CONNECT %s HTTP/1.0\r\n' % realhostport)
271
273
272 for header in proxyheaders.items():
274 for header in proxyheaders.items():
273 self.send(b'%s: %s\r\n' % header)
275 self.send(b'%s: %s\r\n' % header)
274 self.send(b'\r\n')
276 self.send(b'\r\n')
275
277
276 # majority of the following code is duplicated from
278 # majority of the following code is duplicated from
277 # httplib.HTTPConnection as there are no adequate places to
279 # httplib.HTTPConnection as there are no adequate places to
278 # override functions to provide the needed functionality.
280 # override functions to provide the needed functionality.
279
281
280 # pytype: disable=attribute-error
282 # pytype: disable=attribute-error
281 res = self.response_class(self.sock, method=self._method)
283 res = self.response_class(self.sock, method=self._method)
282 # pytype: enable=attribute-error
284 # pytype: enable=attribute-error
283
285
284 while True:
286 while True:
285 # pytype: disable=attribute-error
287 # pytype: disable=attribute-error
286 version, status, reason = res._read_status()
288 version, status, reason = res._read_status()
287 # pytype: enable=attribute-error
289 # pytype: enable=attribute-error
288 if status != httplib.CONTINUE:
290 if status != httplib.CONTINUE:
289 break
291 break
290 # skip lines that are all whitespace
292 # skip lines that are all whitespace
291 list(iter(lambda: res.fp.readline().strip(), b''))
293 list(iter(lambda: res.fp.readline().strip(), b''))
292
294
293 if status == 200:
295 if status == 200:
294 # skip lines until we find a blank line
296 # skip lines until we find a blank line
295 list(iter(res.fp.readline, b'\r\n'))
297 list(iter(res.fp.readline, b'\r\n'))
296 else:
298 else:
297 self.close()
299 self.close()
298 raise socket.error(
300 raise socket.error(
299 "Tunnel connection failed: %d %s" % (status, reason.strip())
301 "Tunnel connection failed: %d %s" % (status, reason.strip())
300 )
302 )
301
303
302 class httpsconnection(keepalive.HTTPConnection):
304 class httpsconnection(keepalive.HTTPConnection):
303 response_class = keepalive.HTTPResponse
305 response_class = keepalive.HTTPResponse
304 default_port = httplib.HTTPS_PORT
306 default_port = httplib.HTTPS_PORT
305 # must be able to send big bundle as stream.
307 # must be able to send big bundle as stream.
306 send = _gen_sendfile(keepalive.safesend)
308 send = _gen_sendfile(keepalive.safesend)
307 getresponse = keepalive.wrapgetresponse(httplib.HTTPConnection)
309 getresponse = keepalive.wrapgetresponse(httplib.HTTPConnection)
308
310
309 def __init__(
311 def __init__(
310 self,
312 self,
311 host,
313 host,
312 port=None,
314 port=None,
313 key_file=None,
315 key_file=None,
314 cert_file=None,
316 cert_file=None,
315 *args,
317 *args,
316 **kwargs,
318 **kwargs,
317 ):
319 ):
318 keepalive.HTTPConnection.__init__(self, host, port, *args, **kwargs)
320 keepalive.HTTPConnection.__init__(self, host, port, *args, **kwargs)
319 self.key_file = key_file
321 self.key_file = key_file
320 self.cert_file = cert_file
322 self.cert_file = cert_file
321
323
322 def connect(self):
324 def connect(self):
323 self.sock = socket.create_connection(
325 self.sock = socket.create_connection(
324 (self.host, self.port), self.timeout
326 (self.host, self.port), self.timeout
325 )
327 )
326
328
327 host = self.host
329 host = self.host
328 realhostport = self.realhostport # pytype: disable=attribute-error
330 realhostport = self.realhostport # pytype: disable=attribute-error
329 if realhostport: # use CONNECT proxy
331 if realhostport: # use CONNECT proxy
330 _generic_proxytunnel(self)
332 _generic_proxytunnel(self)
331 host = realhostport.rsplit(b':', 1)[0]
333 host = realhostport.rsplit(b':', 1)[0]
332 self.sock = sslutil.wrapsocket(
334 self.sock = sslutil.wrapsocket(
333 self.sock,
335 self.sock,
334 self.key_file,
336 self.key_file,
335 self.cert_file,
337 self.cert_file,
336 ui=self.ui, # pytype: disable=attribute-error
338 ui=self.ui, # pytype: disable=attribute-error
337 serverhostname=host,
339 serverhostname=host,
338 )
340 )
339 sslutil.validatesocket(self.sock)
341 sslutil.validatesocket(self.sock)
340
342
341 class httpshandler(keepalive.KeepAliveHandler, urlreq.httpshandler):
343 class httpshandler(keepalive.KeepAliveHandler, urlreq.httpshandler):
342 def __init__(self, ui, timeout=None):
344 def __init__(self, ui, timeout=None):
343 keepalive.KeepAliveHandler.__init__(self, timeout=timeout)
345 keepalive.KeepAliveHandler.__init__(self, timeout=timeout)
344 urlreq.httpshandler.__init__(self)
346 urlreq.httpshandler.__init__(self)
345 self.ui = ui
347 self.ui = ui
346 self.pwmgr = passwordmgr(self.ui, self.ui.httppasswordmgrdb)
348 self.pwmgr = passwordmgr(self.ui, self.ui.httppasswordmgrdb)
347
349
348 def _start_transaction(self, h, req):
350 def _start_transaction(self, h, req):
349 _generic_start_transaction(self, h, req)
351 _generic_start_transaction(self, h, req)
350 return keepalive.KeepAliveHandler._start_transaction(self, h, req)
352 return keepalive.KeepAliveHandler._start_transaction(self, h, req)
351
353
352 def https_open(self, req):
354 def https_open(self, req):
353 # urllibcompat.getfullurl() does not contain credentials
355 # urllibcompat.getfullurl() does not contain credentials
354 # and we may need them to match the certificates.
356 # and we may need them to match the certificates.
355 url = urllibcompat.getfullurl(req)
357 url = urllibcompat.getfullurl(req)
356 user, password = self.pwmgr.find_stored_password(url)
358 user, password = self.pwmgr.find_stored_password(url)
357 res = httpconnectionmod.readauthforuri(self.ui, url, user)
359 res = httpconnectionmod.readauthforuri(self.ui, url, user)
358 if res:
360 if res:
359 group, auth = res
361 group, auth = res
360 self.auth = auth
362 self.auth = auth
361 self.ui.debug(b"using auth.%s.* for authentication\n" % group)
363 self.ui.debug(b"using auth.%s.* for authentication\n" % group)
362 else:
364 else:
363 self.auth = None
365 self.auth = None
364 return self.do_open(self._makeconnection, req)
366 return self.do_open(self._makeconnection, req)
365
367
366 def _makeconnection(self, host, port=None, *args, **kwargs):
368 def _makeconnection(self, host, port=None, *args, **kwargs):
367 keyfile = None
369 keyfile = None
368 certfile = None
370 certfile = None
369
371
370 if len(args) >= 1: # key_file
372 if len(args) >= 1: # key_file
371 keyfile = args[0]
373 keyfile = args[0]
372 if len(args) >= 2: # cert_file
374 if len(args) >= 2: # cert_file
373 certfile = args[1]
375 certfile = args[1]
374 args = args[2:]
376 args = args[2:]
375
377
376 # if the user has specified different key/cert files in
378 # if the user has specified different key/cert files in
377 # hgrc, we prefer these
379 # hgrc, we prefer these
378 if self.auth and b'key' in self.auth and b'cert' in self.auth:
380 if self.auth and b'key' in self.auth and b'cert' in self.auth:
379 keyfile = self.auth[b'key']
381 keyfile = self.auth[b'key']
380 certfile = self.auth[b'cert']
382 certfile = self.auth[b'cert']
381
383
382 conn = httpsconnection(
384 conn = httpsconnection(
383 host, port, keyfile, certfile, *args, **kwargs
385 host, port, keyfile, certfile, *args, **kwargs
384 )
386 )
385 conn.ui = self.ui
387 conn.ui = self.ui
386 return conn
388 return conn
387
389
388
390
389 class httpdigestauthhandler(urlreq.httpdigestauthhandler):
391 class httpdigestauthhandler(urlreq.httpdigestauthhandler):
390 def __init__(self, *args, **kwargs):
392 def __init__(self, *args, **kwargs):
391 urlreq.httpdigestauthhandler.__init__(self, *args, **kwargs)
393 urlreq.httpdigestauthhandler.__init__(self, *args, **kwargs)
392 self.retried_req = None
394 self.retried_req = None
393
395
394 def reset_retry_count(self):
396 def reset_retry_count(self):
395 # Python 2.6.5 will call this on 401 or 407 errors and thus loop
397 # Python 2.6.5 will call this on 401 or 407 errors and thus loop
396 # forever. We disable reset_retry_count completely and reset in
398 # forever. We disable reset_retry_count completely and reset in
397 # http_error_auth_reqed instead.
399 # http_error_auth_reqed instead.
398 pass
400 pass
399
401
400 def http_error_auth_reqed(self, auth_header, host, req, headers):
402 def http_error_auth_reqed(self, auth_header, host, req, headers):
401 # Reset the retry counter once for each request.
403 # Reset the retry counter once for each request.
402 if req is not self.retried_req:
404 if req is not self.retried_req:
403 self.retried_req = req
405 self.retried_req = req
404 self.retried = 0
406 self.retried = 0
405 return urlreq.httpdigestauthhandler.http_error_auth_reqed(
407 return urlreq.httpdigestauthhandler.http_error_auth_reqed(
406 self, auth_header, host, req, headers
408 self, auth_header, host, req, headers
407 )
409 )
408
410
409
411
410 class httpbasicauthhandler(urlreq.httpbasicauthhandler):
412 class httpbasicauthhandler(urlreq.httpbasicauthhandler):
411 def __init__(self, *args, **kwargs):
413 def __init__(self, *args, **kwargs):
412 self.auth = None
414 self.auth = None
413 urlreq.httpbasicauthhandler.__init__(self, *args, **kwargs)
415 urlreq.httpbasicauthhandler.__init__(self, *args, **kwargs)
414 self.retried_req = None
416 self.retried_req = None
415
417
416 def http_request(self, request):
418 def http_request(self, request):
417 if self.auth:
419 if self.auth:
418 request.add_unredirected_header(self.auth_header, self.auth)
420 request.add_unredirected_header(self.auth_header, self.auth)
419
421
420 return request
422 return request
421
423
422 def https_request(self, request):
424 def https_request(self, request):
423 if self.auth:
425 if self.auth:
424 request.add_unredirected_header(self.auth_header, self.auth)
426 request.add_unredirected_header(self.auth_header, self.auth)
425
427
426 return request
428 return request
427
429
428 def reset_retry_count(self):
430 def reset_retry_count(self):
429 # Python 2.6.5 will call this on 401 or 407 errors and thus loop
431 # Python 2.6.5 will call this on 401 or 407 errors and thus loop
430 # forever. We disable reset_retry_count completely and reset in
432 # forever. We disable reset_retry_count completely and reset in
431 # http_error_auth_reqed instead.
433 # http_error_auth_reqed instead.
432 pass
434 pass
433
435
434 def http_error_auth_reqed(self, auth_header, host, req, headers):
436 def http_error_auth_reqed(self, auth_header, host, req, headers):
435 # Reset the retry counter once for each request.
437 # Reset the retry counter once for each request.
436 if req is not self.retried_req:
438 if req is not self.retried_req:
437 self.retried_req = req
439 self.retried_req = req
438 self.retried = 0
440 self.retried = 0
439 return urlreq.httpbasicauthhandler.http_error_auth_reqed(
441 return urlreq.httpbasicauthhandler.http_error_auth_reqed(
440 self, auth_header, host, req, headers
442 self, auth_header, host, req, headers
441 )
443 )
442
444
443 def retry_http_basic_auth(self, host, req, realm):
445 def retry_http_basic_auth(self, host, req, realm):
444 user, pw = self.passwd.find_user_password(
446 user, pw = self.passwd.find_user_password(
445 realm, urllibcompat.getfullurl(req)
447 realm, urllibcompat.getfullurl(req)
446 )
448 )
447 if pw is not None:
449 if pw is not None:
448 raw = b"%s:%s" % (pycompat.bytesurl(user), pycompat.bytesurl(pw))
450 raw = b"%s:%s" % (pycompat.bytesurl(user), pycompat.bytesurl(pw))
449 auth = 'Basic %s' % pycompat.strurl(base64.b64encode(raw).strip())
451 auth = 'Basic %s' % pycompat.strurl(base64.b64encode(raw).strip())
450 if req.get_header(self.auth_header, None) == auth:
452 if req.get_header(self.auth_header, None) == auth:
451 return None
453 return None
452 self.auth = auth
454 self.auth = auth
453 req.add_unredirected_header(self.auth_header, auth)
455 req.add_unredirected_header(self.auth_header, auth)
454 return self.parent.open(req)
456 return self.parent.open(req)
455 else:
457 else:
456 return None
458 return None
457
459
458
460
459 def load_cookiejar(ui):
461 def load_cookiejar(ui):
460 cookiefile = ui.config(b'auth', b'cookiefile')
462 cookiefile = ui.config(b'auth', b'cookiefile')
461 if not cookiefile:
463 if not cookiefile:
462 return
464 return
463 cookiefile = util.expandpath(cookiefile)
465 cookiefile = util.expandpath(cookiefile)
464 try:
466 try:
465 cookiejar = util.cookielib.MozillaCookieJar(
467 cookiejar = util.cookielib.MozillaCookieJar(
466 pycompat.fsdecode(cookiefile)
468 pycompat.fsdecode(cookiefile)
467 )
469 )
468 cookiejar.load()
470 cookiejar.load()
469 return cookiejar
471 return cookiejar
470 except util.cookielib.LoadError as e:
472 except util.cookielib.LoadError as e:
471 ui.warn(
473 ui.warn(
472 _(
474 _(
473 b'(error loading cookie file %s: %s; continuing without '
475 b'(error loading cookie file %s: %s; continuing without '
474 b'cookies)\n'
476 b'cookies)\n'
475 )
477 )
476 % (cookiefile, stringutil.forcebytestr(e))
478 % (cookiefile, stringutil.forcebytestr(e))
477 )
479 )
478
480
479
481
480 class readlinehandler(urlreq.basehandler):
482 class readlinehandler(urlreq.basehandler):
481 def http_response(self, request, response):
483 def http_response(self, request, response):
482 class readlineresponse(response.__class__):
484 class readlineresponse(response.__class__):
483 def readlines(self, sizehint=0):
485 def readlines(self, sizehint=0):
484 total = 0
486 total = 0
485 list = []
487 list = []
486 while True:
488 while True:
487 line = self.readline()
489 line = self.readline()
488 if not line:
490 if not line:
489 break
491 break
490 list.append(line)
492 list.append(line)
491 total += len(line)
493 total += len(line)
492 if sizehint and total >= sizehint:
494 if sizehint and total >= sizehint:
493 break
495 break
494 return list
496 return list
495
497
496 response.__class__ = readlineresponse
498 response.__class__ = readlineresponse
497 return response
499 return response
498
500
499 https_response = http_response
501 https_response = http_response
500
502
501
503
504 class digesthandler(urlreq.basehandler):
505 # exchange.py assumes the algorithms are listed in order of preference,
506 # earlier entries are prefered.
507 digest_algorithms = {
508 b'sha256': hashlib.sha256,
509 b'sha512': hashlib.sha512,
510 }
511
512 def __init__(self, digest):
513 if b':' not in digest:
514 raise error.Abort(_(b'invalid digest specification'))
515 algo, checksum = digest.split(b':')
516 if algo not in self.digest_algorithms:
517 raise error.Abort(_(b'unsupported digest algorithm: %s') % algo)
518 self._digest = checksum
519 self._hasher = self.digest_algorithms[algo]()
520
521 def http_response(self, request, response):
522 class digestresponse(response.__class__):
523 def _digest_input(self, data):
524 self._hasher.update(data)
525 self._digest_consumed += len(data)
526 if self._digest_finished:
527 digest = hex(self._hasher.digest())
528 if digest != self._digest:
529 raise error.SecurityError(
530 _(
531 b'file with digest %s expected, but %s found for %d bytes'
532 )
533 % (
534 pycompat.bytestr(self._digest),
535 pycompat.bytestr(digest),
536 self._digest_consumed,
537 )
538 )
539
540 def read(self, amt=None):
541 data = super().read(amt)
542 self._digest_input(data)
543 return data
544
545 def readline(self):
546 data = super().readline()
547 self._digest_input(data)
548 return data
549
550 def readinto(self, dest):
551 got = super().readinto(dest)
552 self._digest_input(dest[:got])
553 return got
554
555 def _close_conn(self):
556 self._digest_finished = True
557 return super().close()
558
559 response.__class__ = digestresponse
560 response._digest = self._digest
561 response._digest_consumed = 0
562 response._hasher = self._hasher.copy()
563 response._digest_finished = False
564 return response
565
566 https_response = http_response
567
568
502 handlerfuncs = []
569 handlerfuncs = []
503
570
504
571
505 def opener(
572 def opener(
506 ui,
573 ui,
507 authinfo=None,
574 authinfo=None,
508 useragent=None,
575 useragent=None,
509 loggingfh=None,
576 loggingfh=None,
510 loggingname=b's',
577 loggingname=b's',
511 loggingopts=None,
578 loggingopts=None,
512 sendaccept=True,
579 sendaccept=True,
580 digest=None,
513 ):
581 ):
514 """
582 """
515 construct an opener suitable for urllib2
583 construct an opener suitable for urllib2
516 authinfo will be added to the password manager
584 authinfo will be added to the password manager
517
585
518 The opener can be configured to log socket events if the various
586 The opener can be configured to log socket events if the various
519 ``logging*`` arguments are specified.
587 ``logging*`` arguments are specified.
520
588
521 ``loggingfh`` denotes a file object to log events to.
589 ``loggingfh`` denotes a file object to log events to.
522 ``loggingname`` denotes the name of the to print when logging.
590 ``loggingname`` denotes the name of the to print when logging.
523 ``loggingopts`` is a dict of keyword arguments to pass to the constructed
591 ``loggingopts`` is a dict of keyword arguments to pass to the constructed
524 ``util.socketobserver`` instance.
592 ``util.socketobserver`` instance.
525
593
526 ``sendaccept`` allows controlling whether the ``Accept`` request header
594 ``sendaccept`` allows controlling whether the ``Accept`` request header
527 is sent. The header is sent by default.
595 is sent. The header is sent by default.
528 """
596 """
529 timeout = ui.configwith(float, b'http', b'timeout')
597 timeout = ui.configwith(float, b'http', b'timeout')
530 handlers = []
598 handlers = []
531
599
532 if loggingfh:
600 if loggingfh:
533 handlers.append(
601 handlers.append(
534 logginghttphandler(
602 logginghttphandler(
535 loggingfh, loggingname, loggingopts or {}, timeout=timeout
603 loggingfh, loggingname, loggingopts or {}, timeout=timeout
536 )
604 )
537 )
605 )
538 else:
606 else:
539 handlers.append(httphandler(timeout=timeout))
607 handlers.append(httphandler(timeout=timeout))
540 if has_https:
608 if has_https:
541 # pytype get confused about the conditional existence for httpshandler here.
609 # pytype get confused about the conditional existence for httpshandler here.
542 handlers.append(
610 handlers.append(
543 httpshandler(ui, timeout=timeout) # pytype: disable=name-error
611 httpshandler(ui, timeout=timeout) # pytype: disable=name-error
544 )
612 )
545
613
546 handlers.append(proxyhandler(ui))
614 handlers.append(proxyhandler(ui))
547
615
548 passmgr = passwordmgr(ui, ui.httppasswordmgrdb)
616 passmgr = passwordmgr(ui, ui.httppasswordmgrdb)
549 if authinfo is not None:
617 if authinfo is not None:
550 realm, uris, user, passwd = authinfo
618 realm, uris, user, passwd = authinfo
551 saveduser, savedpass = passmgr.find_stored_password(uris[0])
619 saveduser, savedpass = passmgr.find_stored_password(uris[0])
552 if user != saveduser or passwd:
620 if user != saveduser or passwd:
553 passmgr.add_password(realm, uris, user, passwd)
621 passmgr.add_password(realm, uris, user, passwd)
554 ui.debug(
622 ui.debug(
555 b'http auth: user %s, password %s\n'
623 b'http auth: user %s, password %s\n'
556 % (user, passwd and b'*' * len(passwd) or b'not set')
624 % (user, passwd and b'*' * len(passwd) or b'not set')
557 )
625 )
558
626
559 handlers.extend(
627 handlers.extend(
560 (httpbasicauthhandler(passmgr), httpdigestauthhandler(passmgr))
628 (httpbasicauthhandler(passmgr), httpdigestauthhandler(passmgr))
561 )
629 )
562 handlers.extend([h(ui, passmgr) for h in handlerfuncs])
630 handlers.extend([h(ui, passmgr) for h in handlerfuncs])
563 handlers.append(urlreq.httpcookieprocessor(cookiejar=load_cookiejar(ui)))
631 handlers.append(urlreq.httpcookieprocessor(cookiejar=load_cookiejar(ui)))
564 handlers.append(readlinehandler())
632 handlers.append(readlinehandler())
633 if digest:
634 handlers.append(digesthandler(digest))
565 opener = urlreq.buildopener(*handlers)
635 opener = urlreq.buildopener(*handlers)
566
636
567 # keepalive.py's handlers will populate these attributes if they exist.
637 # keepalive.py's handlers will populate these attributes if they exist.
568 opener.requestscount = 0
638 opener.requestscount = 0
569 opener.sentbytescount = 0
639 opener.sentbytescount = 0
570 opener.receivedbytescount = 0
640 opener.receivedbytescount = 0
571
641
572 # The user agent should should *NOT* be used by servers for e.g.
642 # The user agent should should *NOT* be used by servers for e.g.
573 # protocol detection or feature negotiation: there are other
643 # protocol detection or feature negotiation: there are other
574 # facilities for that.
644 # facilities for that.
575 #
645 #
576 # "mercurial/proto-1.0" was the original user agent string and
646 # "mercurial/proto-1.0" was the original user agent string and
577 # exists for backwards compatibility reasons.
647 # exists for backwards compatibility reasons.
578 #
648 #
579 # The "(Mercurial %s)" string contains the distribution
649 # The "(Mercurial %s)" string contains the distribution
580 # name and version. Other client implementations should choose their
650 # name and version. Other client implementations should choose their
581 # own distribution name. Since servers should not be using the user
651 # own distribution name. Since servers should not be using the user
582 # agent string for anything, clients should be able to define whatever
652 # agent string for anything, clients should be able to define whatever
583 # user agent they deem appropriate.
653 # user agent they deem appropriate.
584 #
654 #
585 # The custom user agent is for lfs, because unfortunately some servers
655 # The custom user agent is for lfs, because unfortunately some servers
586 # do look at this value.
656 # do look at this value.
587 if not useragent:
657 if not useragent:
588 agent = b'mercurial/proto-1.0 (Mercurial %s)' % util.version()
658 agent = b'mercurial/proto-1.0 (Mercurial %s)' % util.version()
589 opener.addheaders = [('User-agent', pycompat.sysstr(agent))]
659 opener.addheaders = [('User-agent', pycompat.sysstr(agent))]
590 else:
660 else:
591 opener.addheaders = [('User-agent', pycompat.sysstr(useragent))]
661 opener.addheaders = [('User-agent', pycompat.sysstr(useragent))]
592
662
593 # This header should only be needed by wire protocol requests. But it has
663 # This header should only be needed by wire protocol requests. But it has
594 # been sent on all requests since forever. We keep sending it for backwards
664 # been sent on all requests since forever. We keep sending it for backwards
595 # compatibility reasons. Modern versions of the wire protocol use
665 # compatibility reasons. Modern versions of the wire protocol use
596 # X-HgProto-<N> for advertising client support.
666 # X-HgProto-<N> for advertising client support.
597 if sendaccept:
667 if sendaccept:
598 opener.addheaders.append(('Accept', 'application/mercurial-0.1'))
668 opener.addheaders.append(('Accept', 'application/mercurial-0.1'))
599
669
600 return opener
670 return opener
601
671
602
672
603 def open(ui, url_, data=None, sendaccept=True):
673 def open(ui, url_, data=None, sendaccept=True, digest=None):
604 u = urlutil.url(url_)
674 u = urlutil.url(url_)
605 if u.scheme:
675 if u.scheme:
606 u.scheme = u.scheme.lower()
676 u.scheme = u.scheme.lower()
607 url_, authinfo = u.authinfo()
677 url_, authinfo = u.authinfo()
608 else:
678 else:
609 path = util.normpath(util.abspath(url_))
679 path = util.normpath(util.abspath(url_))
610 url_ = b'file://' + pycompat.bytesurl(
680 url_ = b'file://' + pycompat.bytesurl(
611 urlreq.pathname2url(pycompat.fsdecode(path))
681 urlreq.pathname2url(pycompat.fsdecode(path))
612 )
682 )
613 authinfo = None
683 authinfo = None
614 return opener(ui, authinfo, sendaccept=sendaccept).open(
684 return opener(ui, authinfo, sendaccept=sendaccept, digest=digest).open(
615 pycompat.strurl(url_), data
685 pycompat.strurl(url_), data
616 )
686 )
617
687
618
688
619 def wrapresponse(resp):
689 def wrapresponse(resp):
620 """Wrap a response object with common error handlers.
690 """Wrap a response object with common error handlers.
621
691
622 This ensures that any I/O from any consumer raises the appropriate
692 This ensures that any I/O from any consumer raises the appropriate
623 error and messaging.
693 error and messaging.
624 """
694 """
625 origread = resp.read
695 origread = resp.read
626
696
627 class readerproxy(resp.__class__):
697 class readerproxy(resp.__class__):
628 def read(self, size=None):
698 def read(self, size=None):
629 try:
699 try:
630 return origread(size)
700 return origread(size)
631 except httplib.IncompleteRead as e:
701 except httplib.IncompleteRead as e:
632 # e.expected is an integer if length known or None otherwise.
702 # e.expected is an integer if length known or None otherwise.
633 if e.expected:
703 if e.expected:
634 got = len(e.partial)
704 got = len(e.partial)
635 total = e.expected + got
705 total = e.expected + got
636 msg = _(
706 msg = _(
637 b'HTTP request error (incomplete response; '
707 b'HTTP request error (incomplete response; '
638 b'expected %d bytes got %d)'
708 b'expected %d bytes got %d)'
639 ) % (total, got)
709 ) % (total, got)
640 else:
710 else:
641 msg = _(b'HTTP request error (incomplete response)')
711 msg = _(b'HTTP request error (incomplete response)')
642
712
643 raise error.PeerTransportError(
713 raise error.PeerTransportError(
644 msg,
714 msg,
645 hint=_(
715 hint=_(
646 b'this may be an intermittent network failure; '
716 b'this may be an intermittent network failure; '
647 b'if the error persists, consider contacting the '
717 b'if the error persists, consider contacting the '
648 b'network or server operator'
718 b'network or server operator'
649 ),
719 ),
650 )
720 )
651 except httplib.HTTPException as e:
721 except httplib.HTTPException as e:
652 raise error.PeerTransportError(
722 raise error.PeerTransportError(
653 _(b'HTTP request error (%s)') % e,
723 _(b'HTTP request error (%s)') % e,
654 hint=_(
724 hint=_(
655 b'this may be an intermittent network failure; '
725 b'this may be an intermittent network failure; '
656 b'if the error persists, consider contacting the '
726 b'if the error persists, consider contacting the '
657 b'network or server operator'
727 b'network or server operator'
658 ),
728 ),
659 )
729 )
660
730
661 resp.__class__ = readerproxy
731 resp.__class__ = readerproxy
@@ -1,855 +1,915
1 #require no-reposimplestore no-chg
1 #require no-reposimplestore no-chg
2
2
3 Set up a server
3 Set up a server
4
4
5 $ hg init server
5 $ hg init server
6 $ cd server
6 $ cd server
7 $ cat >> .hg/hgrc << EOF
7 $ cat >> .hg/hgrc << EOF
8 > [extensions]
8 > [extensions]
9 > clonebundles =
9 > clonebundles =
10 > EOF
10 > EOF
11
11
12 $ touch foo
12 $ touch foo
13 $ hg -q commit -A -m 'add foo'
13 $ hg -q commit -A -m 'add foo'
14 $ touch bar
14 $ touch bar
15 $ hg -q commit -A -m 'add bar'
15 $ hg -q commit -A -m 'add bar'
16
16
17 $ hg serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
17 $ hg serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
18 $ cat hg.pid >> $DAEMON_PIDS
18 $ cat hg.pid >> $DAEMON_PIDS
19 $ cd ..
19 $ cd ..
20
20
21 Missing manifest should not result in server lookup
21 Missing manifest should not result in server lookup
22
22
23 $ hg --verbose clone -U http://localhost:$HGPORT no-manifest
23 $ hg --verbose clone -U http://localhost:$HGPORT no-manifest
24 requesting all changes
24 requesting all changes
25 adding changesets
25 adding changesets
26 adding manifests
26 adding manifests
27 adding file changes
27 adding file changes
28 added 2 changesets with 2 changes to 2 files
28 added 2 changesets with 2 changes to 2 files
29 new changesets 53245c60e682:aaff8d2ffbbf
29 new changesets 53245c60e682:aaff8d2ffbbf
30 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
30 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
31
31
32 $ cat server/access.log
32 $ cat server/access.log
33 * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
33 * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
34 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
34 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
35 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=1&common=0000000000000000000000000000000000000000&heads=aaff8d2ffbbf07a46dd1f05d8ae7877e3f56e2a2&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
35 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=1&common=0000000000000000000000000000000000000000&heads=aaff8d2ffbbf07a46dd1f05d8ae7877e3f56e2a2&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
36
36
37 Empty manifest file results in retrieval
37 Empty manifest file results in retrieval
38 (the extension only checks if the manifest file exists)
38 (the extension only checks if the manifest file exists)
39
39
40 $ touch server/.hg/clonebundles.manifest
40 $ touch server/.hg/clonebundles.manifest
41 $ hg --verbose clone -U http://localhost:$HGPORT empty-manifest
41 $ hg --verbose clone -U http://localhost:$HGPORT empty-manifest
42 no clone bundles available on remote; falling back to regular clone
42 no clone bundles available on remote; falling back to regular clone
43 requesting all changes
43 requesting all changes
44 adding changesets
44 adding changesets
45 adding manifests
45 adding manifests
46 adding file changes
46 adding file changes
47 added 2 changesets with 2 changes to 2 files
47 added 2 changesets with 2 changes to 2 files
48 new changesets 53245c60e682:aaff8d2ffbbf
48 new changesets 53245c60e682:aaff8d2ffbbf
49 (sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
49 (sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
50
50
51 Manifest file with invalid URL aborts
51 Manifest file with invalid URL aborts
52
52
53 $ echo 'http://does.not.exist/bundle.hg' > server/.hg/clonebundles.manifest
53 $ echo 'http://does.not.exist/bundle.hg' > server/.hg/clonebundles.manifest
54 $ hg clone http://localhost:$HGPORT 404-url
54 $ hg clone http://localhost:$HGPORT 404-url
55 applying clone bundle from http://does.not.exist/bundle.hg
55 applying clone bundle from http://does.not.exist/bundle.hg
56 error fetching bundle: (.* not known|(\[Errno -?\d+] )?([Nn]o address associated with (host)?name|Temporary failure in name resolution|Name does not resolve)) (re) (no-windows !)
56 error fetching bundle: (.* not known|(\[Errno -?\d+] )?([Nn]o address associated with (host)?name|Temporary failure in name resolution|Name does not resolve)) (re) (no-windows !)
57 error fetching bundle: [Errno 1100*] getaddrinfo failed (glob) (windows !)
57 error fetching bundle: [Errno 1100*] getaddrinfo failed (glob) (windows !)
58 abort: error applying bundle
58 abort: error applying bundle
59 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
59 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
60 [255]
60 [255]
61
61
62 Manifest file with URL with unknown scheme skips the URL
62 Manifest file with URL with unknown scheme skips the URL
63 $ echo 'weirdscheme://does.not.exist/bundle.hg' > server/.hg/clonebundles.manifest
63 $ echo 'weirdscheme://does.not.exist/bundle.hg' > server/.hg/clonebundles.manifest
64 $ hg clone http://localhost:$HGPORT unknown-scheme
64 $ hg clone http://localhost:$HGPORT unknown-scheme
65 no compatible clone bundles available on server; falling back to regular clone
65 no compatible clone bundles available on server; falling back to regular clone
66 (you may want to report this to the server operator)
66 (you may want to report this to the server operator)
67 requesting all changes
67 requesting all changes
68 adding changesets
68 adding changesets
69 adding manifests
69 adding manifests
70 adding file changes
70 adding file changes
71 added 2 changesets with 2 changes to 2 files
71 added 2 changesets with 2 changes to 2 files
72 new changesets 53245c60e682:aaff8d2ffbbf
72 new changesets 53245c60e682:aaff8d2ffbbf
73 updating to branch default
73 updating to branch default
74 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
74 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
75
75
76 Server is not running aborts
76 Server is not running aborts
77
77
78 $ echo "http://localhost:$HGPORT1/bundle.hg" > server/.hg/clonebundles.manifest
78 $ echo "http://localhost:$HGPORT1/bundle.hg" > server/.hg/clonebundles.manifest
79 $ hg clone http://localhost:$HGPORT server-not-runner
79 $ hg clone http://localhost:$HGPORT server-not-runner
80 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
80 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
81 error fetching bundle: (.*\$ECONNREFUSED\$|Protocol not supported|(.* )?\$EADDRNOTAVAIL\$|.* No route to host) (re)
81 error fetching bundle: (.*\$ECONNREFUSED\$|Protocol not supported|(.* )?\$EADDRNOTAVAIL\$|.* No route to host) (re)
82 abort: error applying bundle
82 abort: error applying bundle
83 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
83 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
84 [255]
84 [255]
85
85
86 Server returns 404
86 Server returns 404
87
87
88 $ "$PYTHON" $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
88 $ "$PYTHON" $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
89 $ cat http.pid >> $DAEMON_PIDS
89 $ cat http.pid >> $DAEMON_PIDS
90 $ hg clone http://localhost:$HGPORT running-404
90 $ hg clone http://localhost:$HGPORT running-404
91 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
91 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
92 HTTP error fetching bundle: HTTP Error 404: File not found
92 HTTP error fetching bundle: HTTP Error 404: File not found
93 abort: error applying bundle
93 abort: error applying bundle
94 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
94 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
95 [255]
95 [255]
96
96
97 We can override failure to fall back to regular clone
97 We can override failure to fall back to regular clone
98
98
99 $ hg --config ui.clonebundlefallback=true clone -U http://localhost:$HGPORT 404-fallback
99 $ hg --config ui.clonebundlefallback=true clone -U http://localhost:$HGPORT 404-fallback
100 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
100 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
101 HTTP error fetching bundle: HTTP Error 404: File not found
101 HTTP error fetching bundle: HTTP Error 404: File not found
102 falling back to normal clone
102 falling back to normal clone
103 requesting all changes
103 requesting all changes
104 adding changesets
104 adding changesets
105 adding manifests
105 adding manifests
106 adding file changes
106 adding file changes
107 added 2 changesets with 2 changes to 2 files
107 added 2 changesets with 2 changes to 2 files
108 new changesets 53245c60e682:aaff8d2ffbbf
108 new changesets 53245c60e682:aaff8d2ffbbf
109
109
110 Bundle with partial content works
110 Bundle with partial content works
111
111
112 $ hg -R server bundle --type gzip-v1 --base null -r 53245c60e682 partial.hg
112 $ hg -R server bundle --type gzip-v1 --base null -r 53245c60e682 partial.hg
113 1 changesets found
113 1 changesets found
114
114
115 We verify exact bundle content as an extra check against accidental future
115 We verify exact bundle content as an extra check against accidental future
116 changes. If this output changes, we could break old clients.
116 changes. If this output changes, we could break old clients.
117
117
118 $ f --size --hexdump partial.hg
118 $ f --size --hexdump partial.hg
119 partial.hg: size=207
119 partial.hg: size=207
120 0000: 48 47 31 30 47 5a 78 9c 63 60 60 98 17 ac 12 93 |HG10GZx.c``.....|
120 0000: 48 47 31 30 47 5a 78 9c 63 60 60 98 17 ac 12 93 |HG10GZx.c``.....|
121 0010: f0 ac a9 23 45 70 cb bf 0d 5f 59 4e 4a 7f 79 21 |...#Ep..._YNJ.y!|
121 0010: f0 ac a9 23 45 70 cb bf 0d 5f 59 4e 4a 7f 79 21 |...#Ep..._YNJ.y!|
122 0020: 9b cc 40 24 20 a0 d7 ce 2c d1 38 25 cd 24 25 d5 |..@$ ...,.8%.$%.|
122 0020: 9b cc 40 24 20 a0 d7 ce 2c d1 38 25 cd 24 25 d5 |..@$ ...,.8%.$%.|
123 0030: d8 c2 22 cd 38 d9 24 cd 22 d5 c8 22 cd 24 cd 32 |..".8.$."..".$.2|
123 0030: d8 c2 22 cd 38 d9 24 cd 22 d5 c8 22 cd 24 cd 32 |..".8.$."..".$.2|
124 0040: d1 c2 d0 c4 c8 d2 32 d1 38 39 29 c9 34 cd d4 80 |......2.89).4...|
124 0040: d1 c2 d0 c4 c8 d2 32 d1 38 39 29 c9 34 cd d4 80 |......2.89).4...|
125 0050: ab 24 b5 b8 84 cb 40 c1 80 2b 2d 3f 9f 8b 2b 31 |.$....@..+-?..+1|
125 0050: ab 24 b5 b8 84 cb 40 c1 80 2b 2d 3f 9f 8b 2b 31 |.$....@..+-?..+1|
126 0060: 25 45 01 c8 80 9a d2 9b 65 fb e5 9e 45 bf 8d 7f |%E......e...E...|
126 0060: 25 45 01 c8 80 9a d2 9b 65 fb e5 9e 45 bf 8d 7f |%E......e...E...|
127 0070: 9f c6 97 9f 2b 44 34 67 d9 ec 8e 0f a0 92 0b 75 |....+D4g.......u|
127 0070: 9f c6 97 9f 2b 44 34 67 d9 ec 8e 0f a0 92 0b 75 |....+D4g.......u|
128 0080: 41 d6 24 59 18 a4 a4 9a a6 18 1a 5b 98 9b 5a 98 |A.$Y.......[..Z.|
128 0080: 41 d6 24 59 18 a4 a4 9a a6 18 1a 5b 98 9b 5a 98 |A.$Y.......[..Z.|
129 0090: 9a 18 26 9b a6 19 98 1a 99 99 26 a6 18 9a 98 24 |..&.......&....$|
129 0090: 9a 18 26 9b a6 19 98 1a 99 99 26 a6 18 9a 98 24 |..&.......&....$|
130 00a0: 26 59 a6 25 5a 98 a5 18 a6 24 71 41 35 b1 43 dc |&Y.%Z....$qA5.C.|
130 00a0: 26 59 a6 25 5a 98 a5 18 a6 24 71 41 35 b1 43 dc |&Y.%Z....$qA5.C.|
131 00b0: 16 b2 83 f7 e9 45 8b d2 56 c7 a3 1f 82 52 d7 8a |.....E..V....R..|
131 00b0: 16 b2 83 f7 e9 45 8b d2 56 c7 a3 1f 82 52 d7 8a |.....E..V....R..|
132 00c0: 78 ed fc d5 76 f1 36 35 dc 05 00 36 ed 5e c7 |x...v.65...6.^.|
132 00c0: 78 ed fc d5 76 f1 36 35 dc 05 00 36 ed 5e c7 |x...v.65...6.^.|
133
133
134 $ echo "http://localhost:$HGPORT1/partial.hg" > server/.hg/clonebundles.manifest
134 $ echo "http://localhost:$HGPORT1/partial.hg" > server/.hg/clonebundles.manifest
135 $ hg clone -U http://localhost:$HGPORT partial-bundle
135 $ hg clone -U http://localhost:$HGPORT partial-bundle
136 applying clone bundle from http://localhost:$HGPORT1/partial.hg
136 applying clone bundle from http://localhost:$HGPORT1/partial.hg
137 adding changesets
137 adding changesets
138 adding manifests
138 adding manifests
139 adding file changes
139 adding file changes
140 added 1 changesets with 1 changes to 1 files
140 added 1 changesets with 1 changes to 1 files
141 finished applying clone bundle
141 finished applying clone bundle
142 searching for changes
142 searching for changes
143 adding changesets
143 adding changesets
144 adding manifests
144 adding manifests
145 adding file changes
145 adding file changes
146 added 1 changesets with 1 changes to 1 files
146 added 1 changesets with 1 changes to 1 files
147 new changesets aaff8d2ffbbf
147 new changesets aaff8d2ffbbf
148 1 local changesets published
148 1 local changesets published
149
149
150 Incremental pull doesn't fetch bundle
150 Incremental pull doesn't fetch bundle
151
151
152 $ hg clone -r 53245c60e682 -U http://localhost:$HGPORT partial-clone
152 $ hg clone -r 53245c60e682 -U http://localhost:$HGPORT partial-clone
153 adding changesets
153 adding changesets
154 adding manifests
154 adding manifests
155 adding file changes
155 adding file changes
156 added 1 changesets with 1 changes to 1 files
156 added 1 changesets with 1 changes to 1 files
157 new changesets 53245c60e682
157 new changesets 53245c60e682
158
158
159 $ cd partial-clone
159 $ cd partial-clone
160 $ hg pull
160 $ hg pull
161 pulling from http://localhost:$HGPORT/
161 pulling from http://localhost:$HGPORT/
162 searching for changes
162 searching for changes
163 adding changesets
163 adding changesets
164 adding manifests
164 adding manifests
165 adding file changes
165 adding file changes
166 added 1 changesets with 1 changes to 1 files
166 added 1 changesets with 1 changes to 1 files
167 new changesets aaff8d2ffbbf
167 new changesets aaff8d2ffbbf
168 (run 'hg update' to get a working copy)
168 (run 'hg update' to get a working copy)
169 $ cd ..
169 $ cd ..
170
170
171 Bundle with full content works
171 Bundle with full content works
172
172
173 $ hg -R server bundle --type gzip-v2 --base null -r tip full.hg
173 $ hg -R server bundle --type gzip-v2 --base null -r tip full.hg
174 2 changesets found
174 2 changesets found
175
175
176 Again, we perform an extra check against bundle content changes. If this content
176 Again, we perform an extra check against bundle content changes. If this content
177 changes, clone bundles produced by new Mercurial versions may not be readable
177 changes, clone bundles produced by new Mercurial versions may not be readable
178 by old clients.
178 by old clients.
179
179
180 $ f --size --hexdump full.hg
180 $ f --size --hexdump full.hg
181 full.hg: size=442
181 full.hg: size=442
182 0000: 48 47 32 30 00 00 00 0e 43 6f 6d 70 72 65 73 73 |HG20....Compress|
182 0000: 48 47 32 30 00 00 00 0e 43 6f 6d 70 72 65 73 73 |HG20....Compress|
183 0010: 69 6f 6e 3d 47 5a 78 9c 63 60 60 d0 e4 76 f6 70 |ion=GZx.c``..v.p|
183 0010: 69 6f 6e 3d 47 5a 78 9c 63 60 60 d0 e4 76 f6 70 |ion=GZx.c``..v.p|
184 0020: f4 73 77 75 0f f2 0f 0d 60 00 02 46 46 76 26 4e |.swu....`..FFv&N|
184 0020: f4 73 77 75 0f f2 0f 0d 60 00 02 46 46 76 26 4e |.swu....`..FFv&N|
185 0030: c6 b2 d4 a2 e2 cc fc 3c 03 a3 bc a4 e4 8c c4 bc |.......<........|
185 0030: c6 b2 d4 a2 e2 cc fc 3c 03 a3 bc a4 e4 8c c4 bc |.......<........|
186 0040: f4 d4 62 23 06 06 e6 19 40 f9 4d c1 2a 31 09 cf |..b#....@.M.*1..|
186 0040: f4 d4 62 23 06 06 e6 19 40 f9 4d c1 2a 31 09 cf |..b#....@.M.*1..|
187 0050: 9a 3a 52 04 b7 fc db f0 95 e5 a4 f4 97 17 b2 c9 |.:R.............|
187 0050: 9a 3a 52 04 b7 fc db f0 95 e5 a4 f4 97 17 b2 c9 |.:R.............|
188 0060: 0c 14 00 02 e6 d9 99 25 1a a7 a4 99 a4 a4 1a 5b |.......%.......[|
188 0060: 0c 14 00 02 e6 d9 99 25 1a a7 a4 99 a4 a4 1a 5b |.......%.......[|
189 0070: 58 a4 19 27 9b a4 59 a4 1a 59 a4 99 a4 59 26 5a |X..'..Y..Y...Y&Z|
189 0070: 58 a4 19 27 9b a4 59 a4 1a 59 a4 99 a4 59 26 5a |X..'..Y..Y...Y&Z|
190 0080: 18 9a 18 59 5a 26 1a 27 27 25 99 a6 99 1a 70 95 |...YZ&.''%....p.|
190 0080: 18 9a 18 59 5a 26 1a 27 27 25 99 a6 99 1a 70 95 |...YZ&.''%....p.|
191 0090: a4 16 97 70 19 28 18 70 a5 e5 e7 73 71 25 a6 a4 |...p.(.p...sq%..|
191 0090: a4 16 97 70 19 28 18 70 a5 e5 e7 73 71 25 a6 a4 |...p.(.p...sq%..|
192 00a0: 28 00 19 20 17 af fa df ab ff 7b 3f fb 92 dc 8b |(.. ......{?....|
192 00a0: 28 00 19 20 17 af fa df ab ff 7b 3f fb 92 dc 8b |(.. ......{?....|
193 00b0: 1f 62 bb 9e b7 d7 d9 87 3d 5a 44 89 2f b0 99 87 |.b......=ZD./...|
193 00b0: 1f 62 bb 9e b7 d7 d9 87 3d 5a 44 89 2f b0 99 87 |.b......=ZD./...|
194 00c0: ec e2 54 63 43 e3 b4 64 43 73 23 33 43 53 0b 63 |..TcC..dCs#3CS.c|
194 00c0: ec e2 54 63 43 e3 b4 64 43 73 23 33 43 53 0b 63 |..TcC..dCs#3CS.c|
195 00d0: d3 14 23 03 a0 fb 2c 2c 0c d3 80 1e 30 49 49 b1 |..#...,,....0II.|
195 00d0: d3 14 23 03 a0 fb 2c 2c 0c d3 80 1e 30 49 49 b1 |..#...,,....0II.|
196 00e0: 4c 4a 32 48 33 30 b0 34 42 b8 38 29 b1 08 e2 62 |LJ2H30.4B.8)...b|
196 00e0: 4c 4a 32 48 33 30 b0 34 42 b8 38 29 b1 08 e2 62 |LJ2H30.4B.8)...b|
197 00f0: 20 03 6a ca c2 2c db 2f f7 2c fa 6d fc fb 34 be | .j..,./.,.m..4.|
197 00f0: 20 03 6a ca c2 2c db 2f f7 2c fa 6d fc fb 34 be | .j..,./.,.m..4.|
198 0100: fc 5c 21 a2 39 cb 66 77 7c 00 0d c3 59 17 14 58 |.\!.9.fw|...Y..X|
198 0100: fc 5c 21 a2 39 cb 66 77 7c 00 0d c3 59 17 14 58 |.\!.9.fw|...Y..X|
199 0110: 49 16 06 29 a9 a6 29 86 c6 16 e6 a6 16 a6 26 86 |I..)..).......&.|
199 0110: 49 16 06 29 a9 a6 29 86 c6 16 e6 a6 16 a6 26 86 |I..)..).......&.|
200 0120: c9 a6 69 06 a6 46 66 a6 89 29 86 26 26 89 49 96 |..i..Ff..).&&.I.|
200 0120: c9 a6 69 06 a6 46 66 a6 89 29 86 26 26 89 49 96 |..i..Ff..).&&.I.|
201 0130: 69 89 16 66 29 86 29 49 5c 20 07 3e 16 fe 23 ae |i..f).)I\ .>..#.|
201 0130: 69 89 16 66 29 86 29 49 5c 20 07 3e 16 fe 23 ae |i..f).)I\ .>..#.|
202 0140: 26 da 1c ab 10 1f d1 f8 e3 b3 ef cd dd fc 0c 93 |&...............|
202 0140: 26 da 1c ab 10 1f d1 f8 e3 b3 ef cd dd fc 0c 93 |&...............|
203 0150: 88 75 34 36 75 04 82 55 17 14 36 a4 38 10 04 d8 |.u46u..U..6.8...|
203 0150: 88 75 34 36 75 04 82 55 17 14 36 a4 38 10 04 d8 |.u46u..U..6.8...|
204 0160: 21 01 9a b1 83 f7 e9 45 8b d2 56 c7 a3 1f 82 52 |!......E..V....R|
204 0160: 21 01 9a b1 83 f7 e9 45 8b d2 56 c7 a3 1f 82 52 |!......E..V....R|
205 0170: d7 8a 78 ed fc d5 76 f1 36 25 81 89 c7 ad ec 90 |..x...v.6%......|
205 0170: d7 8a 78 ed fc d5 76 f1 36 25 81 89 c7 ad ec 90 |..x...v.6%......|
206 0180: 54 47 75 2b 89 48 b1 b2 62 c9 89 c9 19 a9 56 45 |TGu+.H..b.....VE|
206 0180: 54 47 75 2b 89 48 b1 b2 62 c9 89 c9 19 a9 56 45 |TGu+.H..b.....VE|
207 0190: a9 65 ba 49 45 89 79 c9 19 ba 60 01 a0 14 23 58 |.e.IE.y...`...#X|
207 0190: a9 65 ba 49 45 89 79 c9 19 ba 60 01 a0 14 23 58 |.e.IE.y...`...#X|
208 01a0: 81 35 c8 7d 40 cc 04 e2 a4 a4 a6 25 96 e6 94 60 |.5.}@......%...`|
208 01a0: 81 35 c8 7d 40 cc 04 e2 a4 a4 a6 25 96 e6 94 60 |.5.}@......%...`|
209 01b0: 33 17 5f 54 00 00 d3 1b 0d 4c |3._T.....L|
209 01b0: 33 17 5f 54 00 00 d3 1b 0d 4c |3._T.....L|
210
210
211 $ echo "http://localhost:$HGPORT1/full.hg" > server/.hg/clonebundles.manifest
211 $ echo "http://localhost:$HGPORT1/full.hg" > server/.hg/clonebundles.manifest
212 $ hg clone -U http://localhost:$HGPORT full-bundle
212 $ hg clone -U http://localhost:$HGPORT full-bundle
213 applying clone bundle from http://localhost:$HGPORT1/full.hg
213 applying clone bundle from http://localhost:$HGPORT1/full.hg
214 adding changesets
214 adding changesets
215 adding manifests
215 adding manifests
216 adding file changes
216 adding file changes
217 added 2 changesets with 2 changes to 2 files
217 added 2 changesets with 2 changes to 2 files
218 finished applying clone bundle
218 finished applying clone bundle
219 searching for changes
219 searching for changes
220 no changes found
220 no changes found
221 2 local changesets published
221 2 local changesets published
222
222
223 Feature works over SSH
223 Feature works over SSH
224
224
225 $ hg clone -U ssh://user@dummy/server ssh-full-clone
225 $ hg clone -U ssh://user@dummy/server ssh-full-clone
226 applying clone bundle from http://localhost:$HGPORT1/full.hg
226 applying clone bundle from http://localhost:$HGPORT1/full.hg
227 adding changesets
227 adding changesets
228 adding manifests
228 adding manifests
229 adding file changes
229 adding file changes
230 added 2 changesets with 2 changes to 2 files
230 added 2 changesets with 2 changes to 2 files
231 finished applying clone bundle
231 finished applying clone bundle
232 searching for changes
232 searching for changes
233 no changes found
233 no changes found
234 2 local changesets published
234 2 local changesets published
235
235
236 Inline bundle
236 Inline bundle
237 =============
237 =============
238
238
239 Checking bundle retrieved over the wireprotocol
239 Checking bundle retrieved over the wireprotocol
240
240
241 Feature works over SSH with inline bundle
241 Feature works over SSH with inline bundle
242 -----------------------------------------
242 -----------------------------------------
243
243
244 $ mkdir server/.hg/bundle-cache/
244 $ mkdir server/.hg/bundle-cache/
245 $ cp full.hg server/.hg/bundle-cache/
245 $ cp full.hg server/.hg/bundle-cache/
246 $ echo "peer-bundle-cache://full.hg" > server/.hg/clonebundles.manifest
246 $ echo "peer-bundle-cache://full.hg" > server/.hg/clonebundles.manifest
247 $ hg clone -U ssh://user@dummy/server ssh-inline-clone
247 $ hg clone -U ssh://user@dummy/server ssh-inline-clone
248 applying clone bundle from peer-bundle-cache://full.hg
248 applying clone bundle from peer-bundle-cache://full.hg
249 adding changesets
249 adding changesets
250 adding manifests
250 adding manifests
251 adding file changes
251 adding file changes
252 added 2 changesets with 2 changes to 2 files
252 added 2 changesets with 2 changes to 2 files
253 finished applying clone bundle
253 finished applying clone bundle
254 searching for changes
254 searching for changes
255 no changes found
255 no changes found
256 2 local changesets published
256 2 local changesets published
257
257
258 HTTP Supports
258 HTTP Supports
259 -------------
259 -------------
260
260
261 $ hg clone -U http://localhost:$HGPORT http-inline-clone
261 $ hg clone -U http://localhost:$HGPORT http-inline-clone
262 applying clone bundle from peer-bundle-cache://full.hg
262 applying clone bundle from peer-bundle-cache://full.hg
263 adding changesets
263 adding changesets
264 adding manifests
264 adding manifests
265 adding file changes
265 adding file changes
266 added 2 changesets with 2 changes to 2 files
266 added 2 changesets with 2 changes to 2 files
267 finished applying clone bundle
267 finished applying clone bundle
268 searching for changes
268 searching for changes
269 no changes found
269 no changes found
270 2 local changesets published
270 2 local changesets published
271
271
272
272
273 Check local behavior
273 Check local behavior
274 --------------------
274 --------------------
275
275
276 We don't use the clone bundle, but we do not crash either.
276 We don't use the clone bundle, but we do not crash either.
277
277
278 $ hg clone -U ./server local-inline-clone-default
278 $ hg clone -U ./server local-inline-clone-default
279 $ hg clone -U ./server local-inline-clone-pull --pull
279 $ hg clone -U ./server local-inline-clone-pull --pull
280 requesting all changes
280 requesting all changes
281 adding changesets
281 adding changesets
282 adding manifests
282 adding manifests
283 adding file changes
283 adding file changes
284 added 2 changesets with 2 changes to 2 files
284 added 2 changesets with 2 changes to 2 files
285 new changesets 53245c60e682:aaff8d2ffbbf
285 new changesets 53245c60e682:aaff8d2ffbbf
286
286
287 Pre-transmit Hook
287 Pre-transmit Hook
288 -----------------
288 -----------------
289
289
290 Hooks work with inline bundle
290 Hooks work with inline bundle
291
291
292 $ cp server/.hg/hgrc server/.hg/hgrc-beforeinlinehooks
292 $ cp server/.hg/hgrc server/.hg/hgrc-beforeinlinehooks
293 $ echo "[hooks]" >> server/.hg/hgrc
293 $ echo "[hooks]" >> server/.hg/hgrc
294 $ echo "pretransmit-inline-clone-bundle=echo foo" >> server/.hg/hgrc
294 $ echo "pretransmit-inline-clone-bundle=echo foo" >> server/.hg/hgrc
295 $ hg clone -U ssh://user@dummy/server ssh-inline-clone-hook
295 $ hg clone -U ssh://user@dummy/server ssh-inline-clone-hook
296 applying clone bundle from peer-bundle-cache://full.hg
296 applying clone bundle from peer-bundle-cache://full.hg
297 remote: foo
297 remote: foo
298 adding changesets
298 adding changesets
299 adding manifests
299 adding manifests
300 adding file changes
300 adding file changes
301 added 2 changesets with 2 changes to 2 files
301 added 2 changesets with 2 changes to 2 files
302 finished applying clone bundle
302 finished applying clone bundle
303 searching for changes
303 searching for changes
304 no changes found
304 no changes found
305 2 local changesets published
305 2 local changesets published
306
306
307 Hooks can make an inline bundle fail
307 Hooks can make an inline bundle fail
308
308
309 $ cp server/.hg/hgrc-beforeinlinehooks server/.hg/hgrc
309 $ cp server/.hg/hgrc-beforeinlinehooks server/.hg/hgrc
310 $ echo "[hooks]" >> server/.hg/hgrc
310 $ echo "[hooks]" >> server/.hg/hgrc
311 $ echo "pretransmit-inline-clone-bundle=echo bar && false" >> server/.hg/hgrc
311 $ echo "pretransmit-inline-clone-bundle=echo bar && false" >> server/.hg/hgrc
312 $ hg clone -U ssh://user@dummy/server ssh-inline-clone-hook-fail
312 $ hg clone -U ssh://user@dummy/server ssh-inline-clone-hook-fail
313 applying clone bundle from peer-bundle-cache://full.hg
313 applying clone bundle from peer-bundle-cache://full.hg
314 remote: bar
314 remote: bar
315 remote: abort: pretransmit-inline-clone-bundle hook exited with status 1
315 remote: abort: pretransmit-inline-clone-bundle hook exited with status 1
316 abort: stream ended unexpectedly (got 0 bytes, expected 1)
316 abort: stream ended unexpectedly (got 0 bytes, expected 1)
317 [255]
317 [255]
318 $ cp server/.hg/hgrc-beforeinlinehooks server/.hg/hgrc
318 $ cp server/.hg/hgrc-beforeinlinehooks server/.hg/hgrc
319
319
320 Other tests
320 Other tests
321 ===========
321 ===========
322
322
323 Entry with unknown BUNDLESPEC is filtered and not used
323 Entry with unknown BUNDLESPEC is filtered and not used
324
324
325 $ cat > server/.hg/clonebundles.manifest << EOF
325 $ cat > server/.hg/clonebundles.manifest << EOF
326 > http://bad.entry1 BUNDLESPEC=UNKNOWN
326 > http://bad.entry1 BUNDLESPEC=UNKNOWN
327 > http://bad.entry2 BUNDLESPEC=xz-v1
327 > http://bad.entry2 BUNDLESPEC=xz-v1
328 > http://bad.entry3 BUNDLESPEC=none-v100
328 > http://bad.entry3 BUNDLESPEC=none-v100
329 > http://localhost:$HGPORT1/full.hg BUNDLESPEC=gzip-v2
329 > http://localhost:$HGPORT1/full.hg BUNDLESPEC=gzip-v2
330 > EOF
330 > EOF
331
331
332 $ hg clone -U http://localhost:$HGPORT filter-unknown-type
332 $ hg clone -U http://localhost:$HGPORT filter-unknown-type
333 applying clone bundle from http://localhost:$HGPORT1/full.hg
333 applying clone bundle from http://localhost:$HGPORT1/full.hg
334 adding changesets
334 adding changesets
335 adding manifests
335 adding manifests
336 adding file changes
336 adding file changes
337 added 2 changesets with 2 changes to 2 files
337 added 2 changesets with 2 changes to 2 files
338 finished applying clone bundle
338 finished applying clone bundle
339 searching for changes
339 searching for changes
340 no changes found
340 no changes found
341 2 local changesets published
341 2 local changesets published
342
342
343 Automatic fallback when all entries are filtered
343 Automatic fallback when all entries are filtered
344
344
345 $ cat > server/.hg/clonebundles.manifest << EOF
345 $ cat > server/.hg/clonebundles.manifest << EOF
346 > http://bad.entry BUNDLESPEC=UNKNOWN
346 > http://bad.entry BUNDLESPEC=UNKNOWN
347 > EOF
347 > EOF
348
348
349 $ hg clone -U http://localhost:$HGPORT filter-all
349 $ hg clone -U http://localhost:$HGPORT filter-all
350 no compatible clone bundles available on server; falling back to regular clone
350 no compatible clone bundles available on server; falling back to regular clone
351 (you may want to report this to the server operator)
351 (you may want to report this to the server operator)
352 requesting all changes
352 requesting all changes
353 adding changesets
353 adding changesets
354 adding manifests
354 adding manifests
355 adding file changes
355 adding file changes
356 added 2 changesets with 2 changes to 2 files
356 added 2 changesets with 2 changes to 2 files
357 new changesets 53245c60e682:aaff8d2ffbbf
357 new changesets 53245c60e682:aaff8d2ffbbf
358
358
359 We require a Python version that supports SNI. Therefore, URLs requiring SNI
359 We require a Python version that supports SNI. Therefore, URLs requiring SNI
360 are not filtered.
360 are not filtered.
361
361
362 $ cp full.hg sni.hg
362 $ cp full.hg sni.hg
363 $ cat > server/.hg/clonebundles.manifest << EOF
363 $ cat > server/.hg/clonebundles.manifest << EOF
364 > http://localhost:$HGPORT1/sni.hg REQUIRESNI=true
364 > http://localhost:$HGPORT1/sni.hg REQUIRESNI=true
365 > http://localhost:$HGPORT1/full.hg
365 > http://localhost:$HGPORT1/full.hg
366 > EOF
366 > EOF
367
367
368 $ hg clone -U http://localhost:$HGPORT sni-supported
368 $ hg clone -U http://localhost:$HGPORT sni-supported
369 applying clone bundle from http://localhost:$HGPORT1/sni.hg
369 applying clone bundle from http://localhost:$HGPORT1/sni.hg
370 adding changesets
370 adding changesets
371 adding manifests
371 adding manifests
372 adding file changes
372 adding file changes
373 added 2 changesets with 2 changes to 2 files
373 added 2 changesets with 2 changes to 2 files
374 finished applying clone bundle
374 finished applying clone bundle
375 searching for changes
375 searching for changes
376 no changes found
376 no changes found
377 2 local changesets published
377 2 local changesets published
378
378
379 Stream clone bundles are supported
379 Stream clone bundles are supported
380
380
381 $ hg -R server debugcreatestreamclonebundle packed.hg
381 $ hg -R server debugcreatestreamclonebundle packed.hg
382 writing 613 bytes for 5 files (no-rust !)
382 writing 613 bytes for 5 files (no-rust !)
383 writing 739 bytes for 7 files (rust !)
383 writing 739 bytes for 7 files (rust !)
384 bundle requirements: generaldelta, revlogv1, sparserevlog (no-rust no-zstd !)
384 bundle requirements: generaldelta, revlogv1, sparserevlog (no-rust no-zstd !)
385 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog (no-rust zstd !)
385 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog (no-rust zstd !)
386 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog (rust !)
386 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog (rust !)
387
387
388 No bundle spec should work
388 No bundle spec should work
389
389
390 $ cat > server/.hg/clonebundles.manifest << EOF
390 $ cat > server/.hg/clonebundles.manifest << EOF
391 > http://localhost:$HGPORT1/packed.hg
391 > http://localhost:$HGPORT1/packed.hg
392 > EOF
392 > EOF
393
393
394 $ hg clone -U http://localhost:$HGPORT stream-clone-no-spec
394 $ hg clone -U http://localhost:$HGPORT stream-clone-no-spec
395 applying clone bundle from http://localhost:$HGPORT1/packed.hg
395 applying clone bundle from http://localhost:$HGPORT1/packed.hg
396 5 files to transfer, 613 bytes of data (no-rust !)
396 5 files to transfer, 613 bytes of data (no-rust !)
397 transferred 613 bytes in * seconds (* */sec) (glob) (no-rust !)
397 transferred 613 bytes in * seconds (* */sec) (glob) (no-rust !)
398 7 files to transfer, 739 bytes of data (rust !)
398 7 files to transfer, 739 bytes of data (rust !)
399 transferred 739 bytes in * seconds (* */sec) (glob) (rust !)
399 transferred 739 bytes in * seconds (* */sec) (glob) (rust !)
400 finished applying clone bundle
400 finished applying clone bundle
401 searching for changes
401 searching for changes
402 no changes found
402 no changes found
403
403
404 Bundle spec without parameters should work
404 Bundle spec without parameters should work
405
405
406 $ cat > server/.hg/clonebundles.manifest << EOF
406 $ cat > server/.hg/clonebundles.manifest << EOF
407 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1
407 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1
408 > EOF
408 > EOF
409
409
410 $ hg clone -U http://localhost:$HGPORT stream-clone-vanilla-spec
410 $ hg clone -U http://localhost:$HGPORT stream-clone-vanilla-spec
411 applying clone bundle from http://localhost:$HGPORT1/packed.hg
411 applying clone bundle from http://localhost:$HGPORT1/packed.hg
412 * files to transfer, * bytes of data (glob)
412 * files to transfer, * bytes of data (glob)
413 transferred * bytes in * seconds (* */sec) (glob)
413 transferred * bytes in * seconds (* */sec) (glob)
414 finished applying clone bundle
414 finished applying clone bundle
415 searching for changes
415 searching for changes
416 no changes found
416 no changes found
417
417
418 Bundle spec with format requirements should work
418 Bundle spec with format requirements should work
419
419
420 $ cat > server/.hg/clonebundles.manifest << EOF
420 $ cat > server/.hg/clonebundles.manifest << EOF
421 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv1
421 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv1
422 > EOF
422 > EOF
423
423
424 $ hg clone -U http://localhost:$HGPORT stream-clone-supported-requirements
424 $ hg clone -U http://localhost:$HGPORT stream-clone-supported-requirements
425 applying clone bundle from http://localhost:$HGPORT1/packed.hg
425 applying clone bundle from http://localhost:$HGPORT1/packed.hg
426 * files to transfer, * bytes of data (glob)
426 * files to transfer, * bytes of data (glob)
427 transferred * bytes in * seconds (* */sec) (glob)
427 transferred * bytes in * seconds (* */sec) (glob)
428 finished applying clone bundle
428 finished applying clone bundle
429 searching for changes
429 searching for changes
430 no changes found
430 no changes found
431
431
432 Stream bundle spec with unknown requirements should be filtered out
432 Stream bundle spec with unknown requirements should be filtered out
433
433
434 $ cat > server/.hg/clonebundles.manifest << EOF
434 $ cat > server/.hg/clonebundles.manifest << EOF
435 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv42
435 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv42
436 > EOF
436 > EOF
437
437
438 $ hg clone -U http://localhost:$HGPORT stream-clone-unsupported-requirements
438 $ hg clone -U http://localhost:$HGPORT stream-clone-unsupported-requirements
439 no compatible clone bundles available on server; falling back to regular clone
439 no compatible clone bundles available on server; falling back to regular clone
440 (you may want to report this to the server operator)
440 (you may want to report this to the server operator)
441 requesting all changes
441 requesting all changes
442 adding changesets
442 adding changesets
443 adding manifests
443 adding manifests
444 adding file changes
444 adding file changes
445 added 2 changesets with 2 changes to 2 files
445 added 2 changesets with 2 changes to 2 files
446 new changesets 53245c60e682:aaff8d2ffbbf
446 new changesets 53245c60e682:aaff8d2ffbbf
447
447
448 Set up manifest for testing preferences
448 Set up manifest for testing preferences
449 (Remember, the TYPE does not have to match reality - the URL is
449 (Remember, the TYPE does not have to match reality - the URL is
450 important)
450 important)
451
451
452 $ cp full.hg gz-a.hg
452 $ cp full.hg gz-a.hg
453 $ cp full.hg gz-b.hg
453 $ cp full.hg gz-b.hg
454 $ cp full.hg bz2-a.hg
454 $ cp full.hg bz2-a.hg
455 $ cp full.hg bz2-b.hg
455 $ cp full.hg bz2-b.hg
456 $ cat > server/.hg/clonebundles.manifest << EOF
456 $ cat > server/.hg/clonebundles.manifest << EOF
457 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2 extra=a
457 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2 extra=a
458 > http://localhost:$HGPORT1/bz2-a.hg BUNDLESPEC=bzip2-v2 extra=a
458 > http://localhost:$HGPORT1/bz2-a.hg BUNDLESPEC=bzip2-v2 extra=a
459 > http://localhost:$HGPORT1/gz-b.hg BUNDLESPEC=gzip-v2 extra=b
459 > http://localhost:$HGPORT1/gz-b.hg BUNDLESPEC=gzip-v2 extra=b
460 > http://localhost:$HGPORT1/bz2-b.hg BUNDLESPEC=bzip2-v2 extra=b
460 > http://localhost:$HGPORT1/bz2-b.hg BUNDLESPEC=bzip2-v2 extra=b
461 > EOF
461 > EOF
462
462
463 Preferring an undefined attribute will take first entry
463 Preferring an undefined attribute will take first entry
464
464
465 $ hg --config ui.clonebundleprefers=foo=bar clone -U http://localhost:$HGPORT prefer-foo
465 $ hg --config ui.clonebundleprefers=foo=bar clone -U http://localhost:$HGPORT prefer-foo
466 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
466 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
467 adding changesets
467 adding changesets
468 adding manifests
468 adding manifests
469 adding file changes
469 adding file changes
470 added 2 changesets with 2 changes to 2 files
470 added 2 changesets with 2 changes to 2 files
471 finished applying clone bundle
471 finished applying clone bundle
472 searching for changes
472 searching for changes
473 no changes found
473 no changes found
474 2 local changesets published
474 2 local changesets published
475
475
476 Preferring bz2 type will download first entry of that type
476 Preferring bz2 type will download first entry of that type
477
477
478 $ hg --config ui.clonebundleprefers=COMPRESSION=bzip2 clone -U http://localhost:$HGPORT prefer-bz
478 $ hg --config ui.clonebundleprefers=COMPRESSION=bzip2 clone -U http://localhost:$HGPORT prefer-bz
479 applying clone bundle from http://localhost:$HGPORT1/bz2-a.hg
479 applying clone bundle from http://localhost:$HGPORT1/bz2-a.hg
480 adding changesets
480 adding changesets
481 adding manifests
481 adding manifests
482 adding file changes
482 adding file changes
483 added 2 changesets with 2 changes to 2 files
483 added 2 changesets with 2 changes to 2 files
484 finished applying clone bundle
484 finished applying clone bundle
485 searching for changes
485 searching for changes
486 no changes found
486 no changes found
487 2 local changesets published
487 2 local changesets published
488
488
489 Preferring multiple values of an option works
489 Preferring multiple values of an option works
490
490
491 $ hg --config ui.clonebundleprefers=COMPRESSION=unknown,COMPRESSION=bzip2 clone -U http://localhost:$HGPORT prefer-multiple-bz
491 $ hg --config ui.clonebundleprefers=COMPRESSION=unknown,COMPRESSION=bzip2 clone -U http://localhost:$HGPORT prefer-multiple-bz
492 applying clone bundle from http://localhost:$HGPORT1/bz2-a.hg
492 applying clone bundle from http://localhost:$HGPORT1/bz2-a.hg
493 adding changesets
493 adding changesets
494 adding manifests
494 adding manifests
495 adding file changes
495 adding file changes
496 added 2 changesets with 2 changes to 2 files
496 added 2 changesets with 2 changes to 2 files
497 finished applying clone bundle
497 finished applying clone bundle
498 searching for changes
498 searching for changes
499 no changes found
499 no changes found
500 2 local changesets published
500 2 local changesets published
501
501
502 Sorting multiple values should get us back to original first entry
502 Sorting multiple values should get us back to original first entry
503
503
504 $ hg --config ui.clonebundleprefers=BUNDLESPEC=unknown,BUNDLESPEC=gzip-v2,BUNDLESPEC=bzip2-v2 clone -U http://localhost:$HGPORT prefer-multiple-gz
504 $ hg --config ui.clonebundleprefers=BUNDLESPEC=unknown,BUNDLESPEC=gzip-v2,BUNDLESPEC=bzip2-v2 clone -U http://localhost:$HGPORT prefer-multiple-gz
505 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
505 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
506 adding changesets
506 adding changesets
507 adding manifests
507 adding manifests
508 adding file changes
508 adding file changes
509 added 2 changesets with 2 changes to 2 files
509 added 2 changesets with 2 changes to 2 files
510 finished applying clone bundle
510 finished applying clone bundle
511 searching for changes
511 searching for changes
512 no changes found
512 no changes found
513 2 local changesets published
513 2 local changesets published
514
514
515 Preferring multiple attributes has correct order
515 Preferring multiple attributes has correct order
516
516
517 $ hg --config ui.clonebundleprefers=extra=b,BUNDLESPEC=bzip2-v2 clone -U http://localhost:$HGPORT prefer-separate-attributes
517 $ hg --config ui.clonebundleprefers=extra=b,BUNDLESPEC=bzip2-v2 clone -U http://localhost:$HGPORT prefer-separate-attributes
518 applying clone bundle from http://localhost:$HGPORT1/bz2-b.hg
518 applying clone bundle from http://localhost:$HGPORT1/bz2-b.hg
519 adding changesets
519 adding changesets
520 adding manifests
520 adding manifests
521 adding file changes
521 adding file changes
522 added 2 changesets with 2 changes to 2 files
522 added 2 changesets with 2 changes to 2 files
523 finished applying clone bundle
523 finished applying clone bundle
524 searching for changes
524 searching for changes
525 no changes found
525 no changes found
526 2 local changesets published
526 2 local changesets published
527
527
528 Test where attribute is missing from some entries
528 Test where attribute is missing from some entries
529
529
530 $ cat > server/.hg/clonebundles.manifest << EOF
530 $ cat > server/.hg/clonebundles.manifest << EOF
531 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
531 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
532 > http://localhost:$HGPORT1/bz2-a.hg BUNDLESPEC=bzip2-v2
532 > http://localhost:$HGPORT1/bz2-a.hg BUNDLESPEC=bzip2-v2
533 > http://localhost:$HGPORT1/gz-b.hg BUNDLESPEC=gzip-v2 extra=b
533 > http://localhost:$HGPORT1/gz-b.hg BUNDLESPEC=gzip-v2 extra=b
534 > http://localhost:$HGPORT1/bz2-b.hg BUNDLESPEC=bzip2-v2 extra=b
534 > http://localhost:$HGPORT1/bz2-b.hg BUNDLESPEC=bzip2-v2 extra=b
535 > EOF
535 > EOF
536
536
537 $ hg --config ui.clonebundleprefers=extra=b clone -U http://localhost:$HGPORT prefer-partially-defined-attribute
537 $ hg --config ui.clonebundleprefers=extra=b clone -U http://localhost:$HGPORT prefer-partially-defined-attribute
538 applying clone bundle from http://localhost:$HGPORT1/gz-b.hg
538 applying clone bundle from http://localhost:$HGPORT1/gz-b.hg
539 adding changesets
539 adding changesets
540 adding manifests
540 adding manifests
541 adding file changes
541 adding file changes
542 added 2 changesets with 2 changes to 2 files
542 added 2 changesets with 2 changes to 2 files
543 finished applying clone bundle
543 finished applying clone bundle
544 searching for changes
544 searching for changes
545 no changes found
545 no changes found
546 2 local changesets published
546 2 local changesets published
547
547
548 Test a bad attribute list
548 Test a bad attribute list
549
549
550 $ hg --config ui.clonebundleprefers=bad clone -U http://localhost:$HGPORT bad-input
550 $ hg --config ui.clonebundleprefers=bad clone -U http://localhost:$HGPORT bad-input
551 abort: invalid ui.clonebundleprefers item: bad
551 abort: invalid ui.clonebundleprefers item: bad
552 (each comma separated item should be key=value pairs)
552 (each comma separated item should be key=value pairs)
553 [255]
553 [255]
554 $ hg --config ui.clonebundleprefers=key=val,bad,key2=val2 clone \
554 $ hg --config ui.clonebundleprefers=key=val,bad,key2=val2 clone \
555 > -U http://localhost:$HGPORT bad-input
555 > -U http://localhost:$HGPORT bad-input
556 abort: invalid ui.clonebundleprefers item: bad
556 abort: invalid ui.clonebundleprefers item: bad
557 (each comma separated item should be key=value pairs)
557 (each comma separated item should be key=value pairs)
558 [255]
558 [255]
559
559
560
560
561 Test interaction between clone bundles and --stream
561 Test interaction between clone bundles and --stream
562
562
563 A manifest with just a gzip bundle
563 A manifest with just a gzip bundle
564
564
565 $ cat > server/.hg/clonebundles.manifest << EOF
565 $ cat > server/.hg/clonebundles.manifest << EOF
566 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
566 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
567 > EOF
567 > EOF
568
568
569 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip
569 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip
570 no compatible clone bundles available on server; falling back to regular clone
570 no compatible clone bundles available on server; falling back to regular clone
571 (you may want to report this to the server operator)
571 (you may want to report this to the server operator)
572 streaming all changes
572 streaming all changes
573 * files to transfer, * bytes of data (glob)
573 * files to transfer, * bytes of data (glob)
574 transferred * bytes in * seconds (* */sec) (glob)
574 transferred * bytes in * seconds (* */sec) (glob)
575
575
576 A manifest with a stream clone but no BUNDLESPEC
576 A manifest with a stream clone but no BUNDLESPEC
577
577
578 $ cat > server/.hg/clonebundles.manifest << EOF
578 $ cat > server/.hg/clonebundles.manifest << EOF
579 > http://localhost:$HGPORT1/packed.hg
579 > http://localhost:$HGPORT1/packed.hg
580 > EOF
580 > EOF
581
581
582 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-no-bundlespec
582 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-no-bundlespec
583 no compatible clone bundles available on server; falling back to regular clone
583 no compatible clone bundles available on server; falling back to regular clone
584 (you may want to report this to the server operator)
584 (you may want to report this to the server operator)
585 streaming all changes
585 streaming all changes
586 * files to transfer, * bytes of data (glob)
586 * files to transfer, * bytes of data (glob)
587 transferred * bytes in * seconds (* */sec) (glob)
587 transferred * bytes in * seconds (* */sec) (glob)
588
588
589 A manifest with a gzip bundle and a stream clone
589 A manifest with a gzip bundle and a stream clone
590
590
591 $ cat > server/.hg/clonebundles.manifest << EOF
591 $ cat > server/.hg/clonebundles.manifest << EOF
592 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
592 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
593 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1
593 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1
594 > EOF
594 > EOF
595
595
596 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed
596 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed
597 applying clone bundle from http://localhost:$HGPORT1/packed.hg
597 applying clone bundle from http://localhost:$HGPORT1/packed.hg
598 * files to transfer, * bytes of data (glob)
598 * files to transfer, * bytes of data (glob)
599 transferred * bytes in * seconds (* */sec) (glob)
599 transferred * bytes in * seconds (* */sec) (glob)
600 finished applying clone bundle
600 finished applying clone bundle
601 searching for changes
601 searching for changes
602 no changes found
602 no changes found
603
603
604 A manifest with a gzip bundle and stream clone with supported requirements
604 A manifest with a gzip bundle and stream clone with supported requirements
605
605
606 $ cat > server/.hg/clonebundles.manifest << EOF
606 $ cat > server/.hg/clonebundles.manifest << EOF
607 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
607 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
608 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv1
608 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv1
609 > EOF
609 > EOF
610
610
611 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed-requirements
611 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed-requirements
612 applying clone bundle from http://localhost:$HGPORT1/packed.hg
612 applying clone bundle from http://localhost:$HGPORT1/packed.hg
613 * files to transfer, * bytes of data (glob)
613 * files to transfer, * bytes of data (glob)
614 transferred * bytes in * seconds (* */sec) (glob)
614 transferred * bytes in * seconds (* */sec) (glob)
615 finished applying clone bundle
615 finished applying clone bundle
616 searching for changes
616 searching for changes
617 no changes found
617 no changes found
618
618
619 A manifest with a gzip bundle and a stream clone with unsupported requirements
619 A manifest with a gzip bundle and a stream clone with unsupported requirements
620
620
621 $ cat > server/.hg/clonebundles.manifest << EOF
621 $ cat > server/.hg/clonebundles.manifest << EOF
622 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
622 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
623 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv42
623 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv42
624 > EOF
624 > EOF
625
625
626 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed-unsupported-requirements
626 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed-unsupported-requirements
627 no compatible clone bundles available on server; falling back to regular clone
627 no compatible clone bundles available on server; falling back to regular clone
628 (you may want to report this to the server operator)
628 (you may want to report this to the server operator)
629 streaming all changes
629 streaming all changes
630 * files to transfer, * bytes of data (glob)
630 * files to transfer, * bytes of data (glob)
631 transferred * bytes in * seconds (* */sec) (glob)
631 transferred * bytes in * seconds (* */sec) (glob)
632
632
633 Test clone bundle retrieved through bundle2
633 Test clone bundle retrieved through bundle2
634
634
635 $ cat << EOF >> $HGRCPATH
635 $ cat << EOF >> $HGRCPATH
636 > [extensions]
636 > [extensions]
637 > largefiles=
637 > largefiles=
638 > EOF
638 > EOF
639 $ killdaemons.py
639 $ killdaemons.py
640 $ hg -R server serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
640 $ hg -R server serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
641 $ cat hg.pid >> $DAEMON_PIDS
641 $ cat hg.pid >> $DAEMON_PIDS
642
642
643 $ hg -R server debuglfput gz-a.hg
643 $ hg -R server debuglfput gz-a.hg
644 1f74b3d08286b9b3a16fb3fa185dd29219cbc6ae
644 1f74b3d08286b9b3a16fb3fa185dd29219cbc6ae
645
645
646 $ cat > server/.hg/clonebundles.manifest << EOF
646 $ cat > server/.hg/clonebundles.manifest << EOF
647 > largefile://1f74b3d08286b9b3a16fb3fa185dd29219cbc6ae BUNDLESPEC=gzip-v2
647 > largefile://1f74b3d08286b9b3a16fb3fa185dd29219cbc6ae BUNDLESPEC=gzip-v2
648 > EOF
648 > EOF
649
649
650 $ hg clone -U http://localhost:$HGPORT largefile-provided --traceback
650 $ hg clone -U http://localhost:$HGPORT largefile-provided --traceback
651 applying clone bundle from largefile://1f74b3d08286b9b3a16fb3fa185dd29219cbc6ae
651 applying clone bundle from largefile://1f74b3d08286b9b3a16fb3fa185dd29219cbc6ae
652 adding changesets
652 adding changesets
653 adding manifests
653 adding manifests
654 adding file changes
654 adding file changes
655 added 2 changesets with 2 changes to 2 files
655 added 2 changesets with 2 changes to 2 files
656 finished applying clone bundle
656 finished applying clone bundle
657 searching for changes
657 searching for changes
658 no changes found
658 no changes found
659 2 local changesets published
659 2 local changesets published
660 $ killdaemons.py
660 $ killdaemons.py
661
661
662 A manifest with a gzip bundle requiring too much memory for a 16MB system and working
662 A manifest with a gzip bundle requiring too much memory for a 16MB system and working
663 on a 32MB system.
663 on a 32MB system.
664
664
665 $ "$PYTHON" $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
665 $ "$PYTHON" $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
666 $ cat http.pid >> $DAEMON_PIDS
666 $ cat http.pid >> $DAEMON_PIDS
667 $ hg -R server serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
667 $ hg -R server serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
668 $ cat hg.pid >> $DAEMON_PIDS
668 $ cat hg.pid >> $DAEMON_PIDS
669
669
670 $ cat > server/.hg/clonebundles.manifest << EOF
670 $ cat > server/.hg/clonebundles.manifest << EOF
671 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2 REQUIREDRAM=12MB
671 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2 REQUIREDRAM=12MB
672 > EOF
672 > EOF
673
673
674 $ hg clone -U --debug --config ui.available-memory=16MB http://localhost:$HGPORT gzip-too-large
674 $ hg clone -U --debug --config ui.available-memory=16MB http://localhost:$HGPORT gzip-too-large
675 using http://localhost:$HGPORT/
675 using http://localhost:$HGPORT/
676 sending capabilities command
676 sending capabilities command
677 sending clonebundles_manifest command
677 sending clonebundles_manifest command
678 filtering http://localhost:$HGPORT1/gz-a.hg as it needs more than 2/3 of system memory
678 filtering http://localhost:$HGPORT1/gz-a.hg as it needs more than 2/3 of system memory
679 no compatible clone bundles available on server; falling back to regular clone
679 no compatible clone bundles available on server; falling back to regular clone
680 (you may want to report this to the server operator)
680 (you may want to report this to the server operator)
681 query 1; heads
681 query 1; heads
682 sending batch command
682 sending batch command
683 requesting all changes
683 requesting all changes
684 sending getbundle command
684 sending getbundle command
685 bundle2-input-bundle: with-transaction
685 bundle2-input-bundle: with-transaction
686 bundle2-input-part: "changegroup" (params: 1 mandatory 1 advisory) supported
686 bundle2-input-part: "changegroup" (params: 1 mandatory 1 advisory) supported
687 adding changesets
687 adding changesets
688 add changeset 53245c60e682
688 add changeset 53245c60e682
689 add changeset aaff8d2ffbbf
689 add changeset aaff8d2ffbbf
690 adding manifests
690 adding manifests
691 adding file changes
691 adding file changes
692 adding bar revisions
692 adding bar revisions
693 adding foo revisions
693 adding foo revisions
694 bundle2-input-part: total payload size 936
694 bundle2-input-part: total payload size 936
695 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
695 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
696 bundle2-input-part: "phase-heads" supported
696 bundle2-input-part: "phase-heads" supported
697 bundle2-input-part: total payload size 24
697 bundle2-input-part: total payload size 24
698 bundle2-input-bundle: 3 parts total
698 bundle2-input-bundle: 3 parts total
699 checking for updated bookmarks
699 checking for updated bookmarks
700 updating the branch cache
700 updating the branch cache
701 added 2 changesets with 2 changes to 2 files
701 added 2 changesets with 2 changes to 2 files
702 new changesets 53245c60e682:aaff8d2ffbbf
702 new changesets 53245c60e682:aaff8d2ffbbf
703 calling hook changegroup.lfiles: hgext.largefiles.reposetup.checkrequireslfiles
703 calling hook changegroup.lfiles: hgext.largefiles.reposetup.checkrequireslfiles
704 updating the branch cache
704 updating the branch cache
705 (sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
705 (sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
706
706
707 $ hg clone -U --debug --config ui.available-memory=32MB http://localhost:$HGPORT gzip-too-large2
707 $ hg clone -U --debug --config ui.available-memory=32MB http://localhost:$HGPORT gzip-too-large2
708 using http://localhost:$HGPORT/
708 using http://localhost:$HGPORT/
709 sending capabilities command
709 sending capabilities command
710 sending clonebundles_manifest command
710 sending clonebundles_manifest command
711 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
711 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
712 bundle2-input-bundle: 1 params with-transaction
712 bundle2-input-bundle: 1 params with-transaction
713 bundle2-input-part: "changegroup" (params: 1 mandatory 1 advisory) supported
713 bundle2-input-part: "changegroup" (params: 1 mandatory 1 advisory) supported
714 adding changesets
714 adding changesets
715 add changeset 53245c60e682
715 add changeset 53245c60e682
716 add changeset aaff8d2ffbbf
716 add changeset aaff8d2ffbbf
717 adding manifests
717 adding manifests
718 adding file changes
718 adding file changes
719 adding bar revisions
719 adding bar revisions
720 adding foo revisions
720 adding foo revisions
721 bundle2-input-part: total payload size 920
721 bundle2-input-part: total payload size 920
722 bundle2-input-part: "cache:rev-branch-cache" (advisory) supported
722 bundle2-input-part: "cache:rev-branch-cache" (advisory) supported
723 bundle2-input-part: total payload size 59
723 bundle2-input-part: total payload size 59
724 bundle2-input-bundle: 2 parts total
724 bundle2-input-bundle: 2 parts total
725 updating the branch cache
725 updating the branch cache
726 added 2 changesets with 2 changes to 2 files
726 added 2 changesets with 2 changes to 2 files
727 finished applying clone bundle
727 finished applying clone bundle
728 query 1; heads
728 query 1; heads
729 sending batch command
729 sending batch command
730 searching for changes
730 searching for changes
731 all remote heads known locally
731 all remote heads known locally
732 no changes found
732 no changes found
733 sending getbundle command
733 sending getbundle command
734 bundle2-input-bundle: with-transaction
734 bundle2-input-bundle: with-transaction
735 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
735 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
736 bundle2-input-part: "phase-heads" supported
736 bundle2-input-part: "phase-heads" supported
737 bundle2-input-part: total payload size 24
737 bundle2-input-part: total payload size 24
738 bundle2-input-bundle: 2 parts total
738 bundle2-input-bundle: 2 parts total
739 checking for updated bookmarks
739 checking for updated bookmarks
740 2 local changesets published
740 2 local changesets published
741 calling hook changegroup.lfiles: hgext.largefiles.reposetup.checkrequireslfiles
741 calling hook changegroup.lfiles: hgext.largefiles.reposetup.checkrequireslfiles
742 updating the branch cache
742 updating the branch cache
743 (sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
743 (sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
744 $ killdaemons.py
744 $ killdaemons.py
745
745
746 Testing a clone bundle with digest
747 ==================================
748
749 $ "$PYTHON" $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
750 $ cat http.pid >> $DAEMON_PIDS
751 $ hg -R server serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
752 $ cat hg.pid >> $DAEMON_PIDS
753
754 $ digest=$("$PYTHON" -c "import hashlib; print (hashlib.sha256(open('gz-a.hg', 'rb').read()).hexdigest())")
755 $ cat > server/.hg/clonebundles.manifest << EOF
756 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2 DIGEST=sha256:${digest}
757 > EOF
758 $ hg clone -U http://localhost:$HGPORT digest-valid
759 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
760 adding changesets
761 adding manifests
762 adding file changes
763 added 2 changesets with 2 changes to 2 files
764 finished applying clone bundle
765 searching for changes
766 no changes found
767 2 local changesets published
768 $ digest_bad=$("$PYTHON" -c "import hashlib; print (hashlib.sha256(open('gz-a.hg', 'rb').read()+b'.').hexdigest())")
769 $ cat > server/.hg/clonebundles.manifest << EOF
770 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2 DIGEST=sha256:${digest_bad}
771 > EOF
772 $ hg clone -U http://localhost:$HGPORT digest-invalid
773 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
774 abort: file with digest [0-9a-f]* expected, but [0-9a-f]* found for [0-9]* bytes (re)
775 [150]
776 $ cat > server/.hg/clonebundles.manifest << EOF
777 > http://localhost:$HGPORT1/bad-a.hg BUNDLESPEC=gzip-v2 DIGEST=sha256:xx
778 > http://localhost:$HGPORT1/bad-b.hg BUNDLESPEC=gzip-v2 DIGEST=xxx:0000
779 > http://localhost:$HGPORT1/bad-c.hg BUNDLESPEC=gzip-v2 DIGEST=sha256:0000
780 > http://localhost:$HGPORT1/bad-d.hg BUNDLESPEC=gzip-v2 DIGEST=xxx:00,xxx:01
781 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2 DIGEST=sha256:${digest_bad}
782 > EOF
783 $ hg clone --debug -U http://localhost:$HGPORT digest-malformed
784 using http://localhost:$HGPORT/
785 sending capabilities command
786 sending clonebundles_manifest command
787 filtering http://localhost:$HGPORT1/bad-a.hg due to a bad DIGEST attribute
788 filtering http://localhost:$HGPORT1/bad-b.hg due to lack of supported digest
789 filtering http://localhost:$HGPORT1/bad-c.hg due to a bad sha256 digest
790 filtering http://localhost:$HGPORT1/bad-d.hg due to conflicting xxx digests
791 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
792 bundle2-input-bundle: 1 params with-transaction
793 bundle2-input-bundle: 0 parts total
794 \(sent [0-9]* HTTP requests and [0-9]* bytes; received [0-9]* bytes in responses\) (re)
795 abort: file with digest [0-9a-f]* expected, but [0-9a-f]* found for [0-9]* bytes (re)
796 [150]
797 $ cat > server/.hg/clonebundles.manifest << EOF
798 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2 DIGEST=sha512:00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000,sha256:0000000000000000000000000000000000000000000000000000000000000000
799 > EOF
800 $ hg clone -U http://localhost:$HGPORT digest-preference
801 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
802 abort: file with digest 0{64} expected, but [0-9a-f]+ found for [0-9]+ bytes (re)
803 [150]
804 $ killdaemons.py
805
746 Testing a clone bundles that involves revlog splitting (issue6811)
806 Testing a clone bundles that involves revlog splitting (issue6811)
747 ==================================================================
807 ==================================================================
748
808
749 $ cat >> $HGRCPATH << EOF
809 $ cat >> $HGRCPATH << EOF
750 > [format]
810 > [format]
751 > revlog-compression=none
811 > revlog-compression=none
752 > use-persistent-nodemap=no
812 > use-persistent-nodemap=no
753 > EOF
813 > EOF
754
814
755 $ hg init server-revlog-split/
815 $ hg init server-revlog-split/
756 $ cd server-revlog-split
816 $ cd server-revlog-split
757 $ cat >> .hg/hgrc << EOF
817 $ cat >> .hg/hgrc << EOF
758 > [extensions]
818 > [extensions]
759 > clonebundles =
819 > clonebundles =
760 > EOF
820 > EOF
761 $ echo foo > A
821 $ echo foo > A
762 $ hg add A
822 $ hg add A
763 $ hg commit -m 'initial commit'
823 $ hg commit -m 'initial commit'
764 IMPORTANT: the revlogs must not be split
824 IMPORTANT: the revlogs must not be split
765 $ ls -1 .hg/store/00manifest.*
825 $ ls -1 .hg/store/00manifest.*
766 .hg/store/00manifest.i
826 .hg/store/00manifest.i
767 $ ls -1 .hg/store/data/_a.*
827 $ ls -1 .hg/store/data/_a.*
768 .hg/store/data/_a.i
828 .hg/store/data/_a.i
769
829
770 do big enough update to split the revlogs
830 do big enough update to split the revlogs
771
831
772 $ $TESTDIR/seq.py 100000 > A
832 $ $TESTDIR/seq.py 100000 > A
773 $ mkdir foo
833 $ mkdir foo
774 $ cd foo
834 $ cd foo
775 $ touch `$TESTDIR/seq.py 10000`
835 $ touch `$TESTDIR/seq.py 10000`
776 $ cd ..
836 $ cd ..
777 $ hg add -q foo
837 $ hg add -q foo
778 $ hg commit -m 'split the manifest and one filelog'
838 $ hg commit -m 'split the manifest and one filelog'
779
839
780 IMPORTANT: now the revlogs must be split
840 IMPORTANT: now the revlogs must be split
781 $ ls -1 .hg/store/00manifest.*
841 $ ls -1 .hg/store/00manifest.*
782 .hg/store/00manifest.d
842 .hg/store/00manifest.d
783 .hg/store/00manifest.i
843 .hg/store/00manifest.i
784 $ ls -1 .hg/store/data/_a.*
844 $ ls -1 .hg/store/data/_a.*
785 .hg/store/data/_a.d
845 .hg/store/data/_a.d
786 .hg/store/data/_a.i
846 .hg/store/data/_a.i
787
847
788 Add an extra commit on top of that
848 Add an extra commit on top of that
789
849
790 $ echo foo >> A
850 $ echo foo >> A
791 $ hg commit -m 'one extra commit'
851 $ hg commit -m 'one extra commit'
792
852
793 $ cd ..
853 $ cd ..
794
854
795 Do a bundle that contains the split, but not the update
855 Do a bundle that contains the split, but not the update
796
856
797 $ hg bundle --exact --rev '::(default~1)' -R server-revlog-split/ --type gzip-v2 split-test.hg
857 $ hg bundle --exact --rev '::(default~1)' -R server-revlog-split/ --type gzip-v2 split-test.hg
798 2 changesets found
858 2 changesets found
799
859
800 $ cat > server-revlog-split/.hg/clonebundles.manifest << EOF
860 $ cat > server-revlog-split/.hg/clonebundles.manifest << EOF
801 > http://localhost:$HGPORT1/split-test.hg BUNDLESPEC=gzip-v2
861 > http://localhost:$HGPORT1/split-test.hg BUNDLESPEC=gzip-v2
802 > EOF
862 > EOF
803
863
804 start the necessary server
864 start the necessary server
805
865
806 $ "$PYTHON" $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
866 $ "$PYTHON" $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
807 $ cat http.pid >> $DAEMON_PIDS
867 $ cat http.pid >> $DAEMON_PIDS
808 $ hg -R server-revlog-split serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
868 $ hg -R server-revlog-split serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
809 $ cat hg.pid >> $DAEMON_PIDS
869 $ cat hg.pid >> $DAEMON_PIDS
810
870
811 Check that clone works fine
871 Check that clone works fine
812 ===========================
872 ===========================
813
873
814 Here, the initial clone will trigger a revlog split (which is a bit clowny it
874 Here, the initial clone will trigger a revlog split (which is a bit clowny it
815 itself, but whatever). The split revlogs will see additionnal data added to
875 itself, but whatever). The split revlogs will see additionnal data added to
816 them in the subsequent pull. This should not be a problem
876 them in the subsequent pull. This should not be a problem
817
877
818 $ hg clone http://localhost:$HGPORT revlog-split-in-the-bundle
878 $ hg clone http://localhost:$HGPORT revlog-split-in-the-bundle
819 applying clone bundle from http://localhost:$HGPORT1/split-test.hg
879 applying clone bundle from http://localhost:$HGPORT1/split-test.hg
820 adding changesets
880 adding changesets
821 adding manifests
881 adding manifests
822 adding file changes
882 adding file changes
823 added 2 changesets with 10002 changes to 10001 files
883 added 2 changesets with 10002 changes to 10001 files
824 finished applying clone bundle
884 finished applying clone bundle
825 searching for changes
885 searching for changes
826 adding changesets
886 adding changesets
827 adding manifests
887 adding manifests
828 adding file changes
888 adding file changes
829 added 1 changesets with 1 changes to 1 files
889 added 1 changesets with 1 changes to 1 files
830 new changesets e3879eaa1db7
890 new changesets e3879eaa1db7
831 2 local changesets published
891 2 local changesets published
832 updating to branch default
892 updating to branch default
833 10001 files updated, 0 files merged, 0 files removed, 0 files unresolved
893 10001 files updated, 0 files merged, 0 files removed, 0 files unresolved
834
894
835 check the results
895 check the results
836
896
837 $ cd revlog-split-in-the-bundle
897 $ cd revlog-split-in-the-bundle
838 $ f --size .hg/store/00manifest.*
898 $ f --size .hg/store/00manifest.*
839 .hg/store/00manifest.d: size=499037
899 .hg/store/00manifest.d: size=499037
840 .hg/store/00manifest.i: size=192
900 .hg/store/00manifest.i: size=192
841 $ f --size .hg/store/data/_a.*
901 $ f --size .hg/store/data/_a.*
842 .hg/store/data/_a.d: size=588917
902 .hg/store/data/_a.d: size=588917
843 .hg/store/data/_a.i: size=192
903 .hg/store/data/_a.i: size=192
844
904
845 manifest should work
905 manifest should work
846
906
847 $ hg files -r tip | wc -l
907 $ hg files -r tip | wc -l
848 \s*10001 (re)
908 \s*10001 (re)
849
909
850 file content should work
910 file content should work
851
911
852 $ hg cat -r tip A | wc -l
912 $ hg cat -r tip A | wc -l
853 \s*100001 (re)
913 \s*100001 (re)
854
914
855
915
General Comments 0
You need to be logged in to leave comments. Login now