##// END OF EJS Templates
clonebundles: allow manifest to specify sha256 digest of bundles
Joerg Sonnenberger -
r52875:aa7f4a45 default
parent child Browse files
Show More
@@ -1,570 +1,612
1 1 # bundlecaches.py - utility to deal with pre-computed bundle for servers
2 2 #
3 3 # This software may be used and distributed according to the terms of the
4 4 # GNU General Public License version 2 or any later version.
5 5
6 6 from __future__ import annotations
7 7
8 8 import collections
9 import re
9 10 import typing
10 11
11 12 from typing import (
12 13 Dict,
13 14 Union,
14 15 cast,
15 16 )
16 17
17 18 from .i18n import _
18 19
19 20 from .thirdparty import attr
20 21
21 22 # Force pytype to use the non-vendored package
22 23 if typing.TYPE_CHECKING:
23 24 # noinspection PyPackageRequirements
24 25 import attr
25 26
26 27 from . import (
27 28 error,
28 29 requirements as requirementsmod,
29 30 sslutil,
31 url as urlmod,
30 32 util,
31 33 )
32 34 from .utils import stringutil
33 35
34 36 urlreq = util.urlreq
35 37
36 38 BUNDLE_CACHE_DIR = b'bundle-cache'
37 39 CB_MANIFEST_FILE = b'clonebundles.manifest'
38 40 CLONEBUNDLESCHEME = b"peer-bundle-cache://"
39 41
40 42
41 43 def get_manifest(repo) -> bytes:
42 44 """get the bundle manifest to be served to a client from a server"""
43 45 raw_text = repo.vfs.tryread(CB_MANIFEST_FILE)
44 46 entries = [e.split(b' ', 1) for e in raw_text.splitlines()]
45 47
46 48 new_lines = []
47 49 for e in entries:
48 50 url = alter_bundle_url(repo, e[0])
49 51 if len(e) == 1:
50 52 line = url + b'\n'
51 53 else:
52 54 line = b"%s %s\n" % (url, e[1])
53 55 new_lines.append(line)
54 56 return b''.join(new_lines)
55 57
56 58
57 59 def alter_bundle_url(repo, url: bytes) -> bytes:
58 60 """a function that exist to help extension and hosting to alter the url
59 61
60 62 This will typically be used to inject authentication information in the url
61 63 of cached bundles."""
62 64 return url
63 65
64 66
65 67 SUPPORTED_CLONEBUNDLE_SCHEMES = [
66 68 b"http://",
67 69 b"https://",
68 70 b"largefile://",
69 71 CLONEBUNDLESCHEME,
70 72 ]
71 73
72 74
73 75 @attr.s
74 76 class bundlespec:
75 77 compression = attr.ib()
76 78 wirecompression = attr.ib()
77 79 version = attr.ib()
78 80 wireversion = attr.ib()
79 81 # parameters explicitly overwritten by the config or the specification
80 82 _explicit_params = attr.ib()
81 83 # default parameter for the version
82 84 #
83 85 # Keeping it separated is useful to check what was actually overwritten.
84 86 _default_opts = attr.ib()
85 87
86 88 @property
87 89 def params(self):
88 90 return collections.ChainMap(self._explicit_params, self._default_opts)
89 91
90 92 @property
91 93 def contentopts(self):
92 94 # kept for Backward Compatibility concerns.
93 95 return self.params
94 96
95 97 def set_param(self, key, value, overwrite=True):
96 98 """Set a bundle parameter value.
97 99
98 100 Will only overwrite if overwrite is true"""
99 101 if overwrite or key not in self._explicit_params:
100 102 self._explicit_params[key] = value
101 103
102 104 def as_spec(self):
103 105 parts = [b"%s-%s" % (self.compression, self.version)]
104 106 for param in sorted(self._explicit_params.items()):
105 107 parts.append(b'%s=%s' % param)
106 108 return b';'.join(parts)
107 109
108 110
109 111 # Maps bundle version human names to changegroup versions.
110 112 _bundlespeccgversions = {
111 113 b'v1': b'01',
112 114 b'v2': b'02',
113 115 b'v3': b'03',
114 116 b'packed1': b's1',
115 117 b'bundle2': b'02', # legacy
116 118 }
117 119
118 120 # Maps bundle version with content opts to choose which part to bundle
119 121 _bundlespeccontentopts: Dict[bytes, Dict[bytes, Union[bool, bytes]]] = {
120 122 b'v1': {
121 123 b'changegroup': True,
122 124 b'cg.version': b'01',
123 125 b'obsolescence': False,
124 126 b'phases': False,
125 127 b'tagsfnodescache': False,
126 128 b'revbranchcache': False,
127 129 },
128 130 b'v2': {
129 131 b'changegroup': True,
130 132 b'cg.version': b'02',
131 133 b'obsolescence': False,
132 134 b'phases': False,
133 135 b'tagsfnodescache': True,
134 136 b'revbranchcache': True,
135 137 },
136 138 b'v3': {
137 139 b'changegroup': True,
138 140 b'cg.version': b'03',
139 141 b'obsolescence': False,
140 142 b'phases': True,
141 143 b'tagsfnodescache': True,
142 144 b'revbranchcache': True,
143 145 },
144 146 b'streamv2': {
145 147 b'changegroup': False,
146 148 b'cg.version': b'02',
147 149 b'obsolescence': False,
148 150 b'phases': False,
149 151 b"stream": b"v2",
150 152 b'tagsfnodescache': False,
151 153 b'revbranchcache': False,
152 154 },
153 155 b'streamv3-exp': {
154 156 b'changegroup': False,
155 157 b'cg.version': b'03',
156 158 b'obsolescence': False,
157 159 b'phases': False,
158 160 b"stream": b"v3-exp",
159 161 b'tagsfnodescache': False,
160 162 b'revbranchcache': False,
161 163 },
162 164 b'packed1': {
163 165 b'cg.version': b's1',
164 166 },
165 167 b'bundle2': { # legacy
166 168 b'cg.version': b'02',
167 169 },
168 170 }
169 171 _bundlespeccontentopts[b'bundle2'] = _bundlespeccontentopts[b'v2']
170 172
171 173 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
172 174 _bundlespecv1compengines = {b'gzip', b'bzip2', b'none'}
173 175
174 176
175 177 def param_bool(key, value):
176 178 """make a boolean out of a parameter value"""
177 179 b = stringutil.parsebool(value)
178 180 if b is None:
179 181 msg = _(b"parameter %s should be a boolean ('%s')")
180 182 msg %= (key, value)
181 183 raise error.InvalidBundleSpecification(msg)
182 184 return b
183 185
184 186
185 187 # mapping of known parameter name need their value processed
186 188 bundle_spec_param_processing = {
187 189 b"obsolescence": param_bool,
188 190 b"obsolescence-mandatory": param_bool,
189 191 b"phases": param_bool,
190 192 b"changegroup": param_bool,
191 193 b"tagsfnodescache": param_bool,
192 194 b"revbranchcache": param_bool,
193 195 }
194 196
195 197
196 198 def _parseparams(s):
197 199 """parse bundlespec parameter section
198 200
199 201 input: "comp-version;params" string
200 202
201 203 return: (spec; {param_key: param_value})
202 204 """
203 205 if b';' not in s:
204 206 return s, {}
205 207
206 208 params = {}
207 209 version, paramstr = s.split(b';', 1)
208 210
209 211 err = _(b'invalid bundle specification: missing "=" in parameter: %s')
210 212 for p in paramstr.split(b';'):
211 213 if b'=' not in p:
212 214 msg = err % p
213 215 raise error.InvalidBundleSpecification(msg)
214 216
215 217 key, value = p.split(b'=', 1)
216 218 key = urlreq.unquote(key)
217 219 value = urlreq.unquote(value)
218 220 process = bundle_spec_param_processing.get(key)
219 221 if process is not None:
220 222 value = process(key, value)
221 223 params[key] = value
222 224
223 225 return version, params
224 226
225 227
226 228 def parsebundlespec(repo, spec, strict=True):
227 229 """Parse a bundle string specification into parts.
228 230
229 231 Bundle specifications denote a well-defined bundle/exchange format.
230 232 The content of a given specification should not change over time in
231 233 order to ensure that bundles produced by a newer version of Mercurial are
232 234 readable from an older version.
233 235
234 236 The string currently has the form:
235 237
236 238 <compression>-<type>[;<parameter0>[;<parameter1>]]
237 239
238 240 Where <compression> is one of the supported compression formats
239 241 and <type> is (currently) a version string. A ";" can follow the type and
240 242 all text afterwards is interpreted as URI encoded, ";" delimited key=value
241 243 pairs.
242 244
243 245 If ``strict`` is True (the default) <compression> is required. Otherwise,
244 246 it is optional.
245 247
246 248 Returns a bundlespec object of (compression, version, parameters).
247 249 Compression will be ``None`` if not in strict mode and a compression isn't
248 250 defined.
249 251
250 252 An ``InvalidBundleSpecification`` is raised when the specification is
251 253 not syntactically well formed.
252 254
253 255 An ``UnsupportedBundleSpecification`` is raised when the compression or
254 256 bundle type/version is not recognized.
255 257
256 258 Note: this function will likely eventually return a more complex data
257 259 structure, including bundle2 part information.
258 260 """
259 261 if strict and b'-' not in spec:
260 262 raise error.InvalidBundleSpecification(
261 263 _(
262 264 b'invalid bundle specification; '
263 265 b'must be prefixed with compression: %s'
264 266 )
265 267 % spec
266 268 )
267 269
268 270 pre_args = spec.split(b';', 1)[0]
269 271 if b'-' in pre_args:
270 272 compression, version = spec.split(b'-', 1)
271 273
272 274 if compression not in util.compengines.supportedbundlenames:
273 275 raise error.UnsupportedBundleSpecification(
274 276 _(b'%s compression is not supported') % compression
275 277 )
276 278
277 279 version, params = _parseparams(version)
278 280
279 281 if version not in _bundlespeccontentopts:
280 282 raise error.UnsupportedBundleSpecification(
281 283 _(b'%s is not a recognized bundle version') % version
282 284 )
283 285 else:
284 286 # Value could be just the compression or just the version, in which
285 287 # case some defaults are assumed (but only when not in strict mode).
286 288 assert not strict
287 289
288 290 spec, params = _parseparams(spec)
289 291
290 292 if spec in util.compengines.supportedbundlenames:
291 293 compression = spec
292 294 version = b'v1'
293 295 # Generaldelta repos require v2.
294 296 if requirementsmod.GENERALDELTA_REQUIREMENT in repo.requirements:
295 297 version = b'v2'
296 298 elif requirementsmod.REVLOGV2_REQUIREMENT in repo.requirements:
297 299 version = b'v2'
298 300 # Modern compression engines require v2.
299 301 if compression not in _bundlespecv1compengines:
300 302 version = b'v2'
301 303 elif spec in _bundlespeccontentopts:
302 304 if spec == b'packed1':
303 305 compression = b'none'
304 306 else:
305 307 compression = b'bzip2'
306 308 version = spec
307 309 else:
308 310 raise error.UnsupportedBundleSpecification(
309 311 _(b'%s is not a recognized bundle specification') % spec
310 312 )
311 313
312 314 # Bundle version 1 only supports a known set of compression engines.
313 315 if version == b'v1' and compression not in _bundlespecv1compengines:
314 316 raise error.UnsupportedBundleSpecification(
315 317 _(b'compression engine %s is not supported on v1 bundles')
316 318 % compression
317 319 )
318 320
319 321 # The specification for packed1 can optionally declare the data formats
320 322 # required to apply it. If we see this metadata, compare against what the
321 323 # repo supports and error if the bundle isn't compatible.
322 324 if version == b'packed1' and b'requirements' in params:
323 325 requirements = set(cast(bytes, params[b'requirements']).split(b','))
324 326 missingreqs = requirements - requirementsmod.STREAM_FIXED_REQUIREMENTS
325 327 if missingreqs:
326 328 raise error.UnsupportedBundleSpecification(
327 329 _(b'missing support for repository features: %s')
328 330 % b', '.join(sorted(missingreqs))
329 331 )
330 332
331 333 # Compute contentopts based on the version
332 334 if b"stream" in params:
333 335 # This case is fishy as this mostly derails the version selection
334 336 # mechanism. `stream` bundles are quite specific and used differently
335 337 # as "normal" bundles.
336 338 #
337 339 # (we should probably define a cleaner way to do this and raise a
338 340 # warning when the old way is encountered)
339 341 if params[b"stream"] == b"v2":
340 342 version = b"streamv2"
341 343 if params[b"stream"] == b"v3-exp":
342 344 version = b"streamv3-exp"
343 345 contentopts = _bundlespeccontentopts.get(version, {}).copy()
344 346 if version == b"streamv2" or version == b"streamv3-exp":
345 347 # streamv2 have been reported as "v2" for a while.
346 348 version = b"v2"
347 349
348 350 engine = util.compengines.forbundlename(compression)
349 351 compression, wirecompression = engine.bundletype()
350 352 wireversion = _bundlespeccontentopts[version][b'cg.version']
351 353
352 354 return bundlespec(
353 355 compression, wirecompression, version, wireversion, params, contentopts
354 356 )
355 357
356 358
357 359 def parseclonebundlesmanifest(repo, s):
358 360 """Parses the raw text of a clone bundles manifest.
359 361
360 362 Returns a list of dicts. The dicts have a ``URL`` key corresponding
361 363 to the URL and other keys are the attributes for the entry.
362 364 """
363 365 m = []
364 366 for line in s.splitlines():
365 367 fields = line.split()
366 368 if not fields:
367 369 continue
368 370 attrs = {b'URL': fields[0]}
369 371 for rawattr in fields[1:]:
370 372 key, value = rawattr.split(b'=', 1)
371 373 key = util.urlreq.unquote(key)
372 374 value = util.urlreq.unquote(value)
373 375 attrs[key] = value
374 376
375 377 # Parse BUNDLESPEC into components. This makes client-side
376 378 # preferences easier to specify since you can prefer a single
377 379 # component of the BUNDLESPEC.
378 380 if key == b'BUNDLESPEC':
379 381 try:
380 382 bundlespec = parsebundlespec(repo, value)
381 383 attrs[b'COMPRESSION'] = bundlespec.compression
382 384 attrs[b'VERSION'] = bundlespec.version
383 385 except error.InvalidBundleSpecification:
384 386 pass
385 387 except error.UnsupportedBundleSpecification:
386 388 pass
387 389
388 390 m.append(attrs)
389 391
390 392 return m
391 393
392 394
393 395 def isstreamclonespec(bundlespec):
394 396 # Stream clone v1
395 397 if bundlespec.wirecompression == b'UN' and bundlespec.wireversion == b's1':
396 398 return True
397 399
398 400 # Stream clone v2
399 401 if (
400 402 bundlespec.wirecompression == b'UN'
401 403 and bundlespec.wireversion == b'02'
402 404 and bundlespec.contentopts.get(b'stream', None) in (b"v2", b"v3-exp")
403 405 ):
404 406 return True
405 407
406 408 return False
407 409
408 410
411 digest_regex = re.compile(b'^[a-z0-9]+:[0-9a-f]+(,[a-z0-9]+:[0-9a-f]+)*$')
412
413
409 414 def filterclonebundleentries(
410 415 repo, entries, streamclonerequested=False, pullbundles=False
411 416 ):
412 417 """Remove incompatible clone bundle manifest entries.
413 418
414 419 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
415 420 and returns a new list consisting of only the entries that this client
416 421 should be able to apply.
417 422
418 423 There is no guarantee we'll be able to apply all returned entries because
419 424 the metadata we use to filter on may be missing or wrong.
420 425 """
421 426 newentries = []
422 427 for entry in entries:
423 428 url = entry.get(b'URL')
424 429 if not pullbundles and not any(
425 430 [url.startswith(scheme) for scheme in SUPPORTED_CLONEBUNDLE_SCHEMES]
426 431 ):
427 432 repo.ui.debug(
428 433 b'filtering %s because not a supported clonebundle scheme\n'
429 434 % url
430 435 )
431 436 continue
432 437
433 438 spec = entry.get(b'BUNDLESPEC')
434 439 if spec:
435 440 try:
436 441 bundlespec = parsebundlespec(repo, spec, strict=True)
437 442
438 443 # If a stream clone was requested, filter out non-streamclone
439 444 # entries.
440 445 if streamclonerequested and not isstreamclonespec(bundlespec):
441 446 repo.ui.debug(
442 447 b'filtering %s because not a stream clone\n' % url
443 448 )
444 449 continue
445 450
446 451 except error.InvalidBundleSpecification as e:
447 452 repo.ui.debug(stringutil.forcebytestr(e) + b'\n')
448 453 continue
449 454 except error.UnsupportedBundleSpecification as e:
450 455 repo.ui.debug(
451 456 b'filtering %s because unsupported bundle '
452 457 b'spec: %s\n' % (url, stringutil.forcebytestr(e))
453 458 )
454 459 continue
455 460 # If we don't have a spec and requested a stream clone, we don't know
456 461 # what the entry is so don't attempt to apply it.
457 462 elif streamclonerequested:
458 463 repo.ui.debug(
459 464 b'filtering %s because cannot determine if a stream '
460 465 b'clone bundle\n' % url
461 466 )
462 467 continue
463 468
464 469 if b'REQUIRESNI' in entry and not sslutil.hassni:
465 470 repo.ui.debug(b'filtering %s because SNI not supported\n' % url)
466 471 continue
467 472
468 473 if b'REQUIREDRAM' in entry:
469 474 try:
470 475 requiredram = util.sizetoint(entry[b'REQUIREDRAM'])
471 476 except error.ParseError:
472 477 repo.ui.debug(
473 478 b'filtering %s due to a bad REQUIREDRAM attribute\n' % url
474 479 )
475 480 continue
476 481 actualram = repo.ui.estimatememory()
477 482 if actualram is not None and actualram * 0.66 < requiredram:
478 483 repo.ui.debug(
479 484 b'filtering %s as it needs more than 2/3 of system memory\n'
480 485 % url
481 486 )
482 487 continue
483 488
489 if b'DIGEST' in entry:
490 if not digest_regex.match(entry[b'DIGEST']):
491 repo.ui.debug(
492 b'filtering %s due to a bad DIGEST attribute\n' % url
493 )
494 continue
495 supported = 0
496 seen = {}
497 for digest_entry in entry[b'DIGEST'].split(b','):
498 algo, digest = digest_entry.split(b':')
499 if algo not in seen:
500 seen[algo] = digest
501 elif seen[algo] != digest:
502 repo.ui.debug(
503 b'filtering %s due to conflicting %s digests\n'
504 % (url, algo)
505 )
506 supported = 0
507 break
508 digester = urlmod.digesthandler.digest_algorithms.get(algo)
509 if digester is None:
510 continue
511 if len(digest) != digester().digest_size * 2:
512 repo.ui.debug(
513 b'filtering %s due to a bad %s digest\n' % (url, algo)
514 )
515 supported = 0
516 break
517 supported += 1
518 else:
519 if supported == 0:
520 repo.ui.debug(
521 b'filtering %s due to lack of supported digest\n' % url
522 )
523 if supported == 0:
524 continue
525
484 526 newentries.append(entry)
485 527
486 528 return newentries
487 529
488 530
489 531 class clonebundleentry:
490 532 """Represents an item in a clone bundles manifest.
491 533
492 534 This rich class is needed to support sorting since sorted() in Python 3
493 535 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
494 536 won't work.
495 537 """
496 538
497 539 def __init__(self, value, prefers):
498 540 self.value = value
499 541 self.prefers = prefers
500 542
501 543 def _cmp(self, other):
502 544 for prefkey, prefvalue in self.prefers:
503 545 avalue = self.value.get(prefkey)
504 546 bvalue = other.value.get(prefkey)
505 547
506 548 # Special case for b missing attribute and a matches exactly.
507 549 if avalue is not None and bvalue is None and avalue == prefvalue:
508 550 return -1
509 551
510 552 # Special case for a missing attribute and b matches exactly.
511 553 if bvalue is not None and avalue is None and bvalue == prefvalue:
512 554 return 1
513 555
514 556 # We can't compare unless attribute present on both.
515 557 if avalue is None or bvalue is None:
516 558 continue
517 559
518 560 # Same values should fall back to next attribute.
519 561 if avalue == bvalue:
520 562 continue
521 563
522 564 # Exact matches come first.
523 565 if avalue == prefvalue:
524 566 return -1
525 567 if bvalue == prefvalue:
526 568 return 1
527 569
528 570 # Fall back to next attribute.
529 571 continue
530 572
531 573 # If we got here we couldn't sort by attributes and prefers. Fall
532 574 # back to index order.
533 575 return 0
534 576
535 577 def __lt__(self, other):
536 578 return self._cmp(other) < 0
537 579
538 580 def __gt__(self, other):
539 581 return self._cmp(other) > 0
540 582
541 583 def __eq__(self, other):
542 584 return self._cmp(other) == 0
543 585
544 586 def __le__(self, other):
545 587 return self._cmp(other) <= 0
546 588
547 589 def __ge__(self, other):
548 590 return self._cmp(other) >= 0
549 591
550 592 def __ne__(self, other):
551 593 return self._cmp(other) != 0
552 594
553 595
554 596 def sortclonebundleentries(ui, entries):
555 597 prefers = ui.configlist(b'ui', b'clonebundleprefers')
556 598 if not prefers:
557 599 return list(entries)
558 600
559 601 def _split(p):
560 602 if b'=' not in p:
561 603 hint = _(b"each comma separated item should be key=value pairs")
562 604 raise error.Abort(
563 605 _(b"invalid ui.clonebundleprefers item: %s") % p, hint=hint
564 606 )
565 607 return p.split(b'=', 1)
566 608
567 609 prefers = [_split(p) for p in prefers]
568 610
569 611 items = sorted(clonebundleentry(v, prefers) for v in entries)
570 612 return [i.value for i in items]
@@ -1,2959 +1,2974
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import annotations
9 9
10 10 import collections
11 11 import weakref
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 hex,
16 16 nullrev,
17 17 )
18 18 from . import (
19 19 bookmarks as bookmod,
20 20 bundle2,
21 21 bundlecaches,
22 22 changegroup,
23 23 discovery,
24 24 error,
25 25 lock as lockmod,
26 26 logexchange,
27 27 narrowspec,
28 28 obsolete,
29 29 obsutil,
30 30 phases,
31 31 pushkey,
32 32 pycompat,
33 33 requirements,
34 34 scmutil,
35 35 streamclone,
36 36 url as urlmod,
37 37 util,
38 38 wireprototypes,
39 39 )
40 40 from .utils import (
41 41 hashutil,
42 42 stringutil,
43 43 urlutil,
44 44 )
45 45 from .interfaces import repository
46 46
47 47 urlerr = util.urlerr
48 48 urlreq = util.urlreq
49 49
50 50 _NARROWACL_SECTION = b'narrowacl'
51 51
52 52
53 53 def readbundle(ui, fh, fname, vfs=None):
54 54 header = changegroup.readexactly(fh, 4)
55 55
56 56 alg = None
57 57 if not fname:
58 58 fname = b"stream"
59 59 if not header.startswith(b'HG') and header.startswith(b'\0'):
60 60 fh = changegroup.headerlessfixup(fh, header)
61 61 header = b"HG10"
62 62 alg = b'UN'
63 63 elif vfs:
64 64 fname = vfs.join(fname)
65 65
66 66 magic, version = header[0:2], header[2:4]
67 67
68 68 if magic != b'HG':
69 69 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
70 70 if version == b'10':
71 71 if alg is None:
72 72 alg = changegroup.readexactly(fh, 2)
73 73 return changegroup.cg1unpacker(fh, alg)
74 74 elif version.startswith(b'2'):
75 75 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
76 76 elif version == b'S1':
77 77 return streamclone.streamcloneapplier(fh)
78 78 else:
79 79 raise error.Abort(
80 80 _(b'%s: unknown bundle version %s') % (fname, version)
81 81 )
82 82
83 83
84 84 def _format_params(params):
85 85 parts = []
86 86 for key, value in sorted(params.items()):
87 87 value = urlreq.quote(value)
88 88 parts.append(b"%s=%s" % (key, value))
89 89 return b';'.join(parts)
90 90
91 91
92 92 def getbundlespec(ui, fh):
93 93 """Infer the bundlespec from a bundle file handle.
94 94
95 95 The input file handle is seeked and the original seek position is not
96 96 restored.
97 97 """
98 98
99 99 def speccompression(alg):
100 100 try:
101 101 return util.compengines.forbundletype(alg).bundletype()[0]
102 102 except KeyError:
103 103 return None
104 104
105 105 params = {}
106 106
107 107 b = readbundle(ui, fh, None)
108 108 if isinstance(b, changegroup.cg1unpacker):
109 109 alg = b._type
110 110 if alg == b'_truncatedBZ':
111 111 alg = b'BZ'
112 112 comp = speccompression(alg)
113 113 if not comp:
114 114 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
115 115 return b'%s-v1' % comp
116 116 elif isinstance(b, bundle2.unbundle20):
117 117 if b'Compression' in b.params:
118 118 comp = speccompression(b.params[b'Compression'])
119 119 if not comp:
120 120 raise error.Abort(
121 121 _(b'unknown compression algorithm: %s') % comp
122 122 )
123 123 else:
124 124 comp = b'none'
125 125
126 126 version = None
127 127 for part in b.iterparts():
128 128 if part.type == b'changegroup':
129 129 cgversion = part.params[b'version']
130 130 if cgversion in (b'01', b'02'):
131 131 version = b'v2'
132 132 elif cgversion in (b'03',):
133 133 version = b'v2'
134 134 params[b'cg.version'] = cgversion
135 135 else:
136 136 raise error.Abort(
137 137 _(
138 138 b'changegroup version %s does not have '
139 139 b'a known bundlespec'
140 140 )
141 141 % version,
142 142 hint=_(b'try upgrading your Mercurial client'),
143 143 )
144 144 elif part.type == b'stream2' and version is None:
145 145 # A stream2 part requires to be part of a v2 bundle
146 146 requirements = urlreq.unquote(part.params[b'requirements'])
147 147 splitted = requirements.split()
148 148 params = bundle2._formatrequirementsparams(splitted)
149 149 return b'none-v2;stream=v2;%s' % params
150 150 elif part.type == b'stream3-exp' and version is None:
151 151 # A stream3 part requires to be part of a v2 bundle
152 152 requirements = urlreq.unquote(part.params[b'requirements'])
153 153 splitted = requirements.split()
154 154 params = bundle2._formatrequirementsparams(splitted)
155 155 return b'none-v2;stream=v3-exp;%s' % params
156 156 elif part.type == b'obsmarkers':
157 157 params[b'obsolescence'] = b'yes'
158 158 if not part.mandatory:
159 159 params[b'obsolescence-mandatory'] = b'no'
160 160
161 161 if not version:
162 162 params[b'changegroup'] = b'no'
163 163 version = b'v2'
164 164 spec = b'%s-%s' % (comp, version)
165 165 if params:
166 166 spec += b';'
167 167 spec += _format_params(params)
168 168 return spec
169 169
170 170 elif isinstance(b, streamclone.streamcloneapplier):
171 171 requirements = streamclone.readbundle1header(fh)[2]
172 172 formatted = bundle2._formatrequirementsparams(requirements)
173 173 return b'none-packed1;%s' % formatted
174 174 else:
175 175 raise error.Abort(_(b'unknown bundle type: %s') % b)
176 176
177 177
178 178 def _computeoutgoing(repo, heads, common):
179 179 """Computes which revs are outgoing given a set of common
180 180 and a set of heads.
181 181
182 182 This is a separate function so extensions can have access to
183 183 the logic.
184 184
185 185 Returns a discovery.outgoing object.
186 186 """
187 187 cl = repo.changelog
188 188 if common:
189 189 hasnode = cl.hasnode
190 190 common = [n for n in common if hasnode(n)]
191 191 else:
192 192 common = [repo.nullid]
193 193 if not heads:
194 194 heads = cl.heads()
195 195 return discovery.outgoing(repo, common, heads)
196 196
197 197
198 198 def _checkpublish(pushop):
199 199 repo = pushop.repo
200 200 ui = repo.ui
201 201 behavior = ui.config(b'experimental', b'auto-publish')
202 202 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
203 203 return
204 204 remotephases = listkeys(pushop.remote, b'phases')
205 205 if not remotephases.get(b'publishing', False):
206 206 return
207 207
208 208 if pushop.revs is None:
209 209 published = repo.filtered(b'served').revs(b'not public()')
210 210 else:
211 211 published = repo.revs(b'::%ln - public()', pushop.revs)
212 212 # we want to use pushop.revs in the revset even if they themselves are
213 213 # secret, but we don't want to have anything that the server won't see
214 214 # in the result of this expression
215 215 published &= repo.filtered(b'served')
216 216 if published:
217 217 if behavior == b'warn':
218 218 ui.warn(
219 219 _(b'%i changesets about to be published\n') % len(published)
220 220 )
221 221 elif behavior == b'confirm':
222 222 if ui.promptchoice(
223 223 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
224 224 % len(published)
225 225 ):
226 226 raise error.CanceledError(_(b'user quit'))
227 227 elif behavior == b'abort':
228 228 msg = _(b'push would publish %i changesets') % len(published)
229 229 hint = _(
230 230 b"use --publish or adjust 'experimental.auto-publish'"
231 231 b" config"
232 232 )
233 233 raise error.Abort(msg, hint=hint)
234 234
235 235
236 236 def _forcebundle1(op):
237 237 """return true if a pull/push must use bundle1
238 238
239 239 This function is used to allow testing of the older bundle version"""
240 240 ui = op.repo.ui
241 241 # The goal is this config is to allow developer to choose the bundle
242 242 # version used during exchanged. This is especially handy during test.
243 243 # Value is a list of bundle version to be picked from, highest version
244 244 # should be used.
245 245 #
246 246 # developer config: devel.legacy.exchange
247 247 exchange = ui.configlist(b'devel', b'legacy.exchange')
248 248 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
249 249 return forcebundle1 or not op.remote.capable(b'bundle2')
250 250
251 251
252 252 class pushoperation:
253 253 """A object that represent a single push operation
254 254
255 255 Its purpose is to carry push related state and very common operations.
256 256
257 257 A new pushoperation should be created at the beginning of each push and
258 258 discarded afterward.
259 259 """
260 260
261 261 def __init__(
262 262 self,
263 263 repo,
264 264 remote,
265 265 force=False,
266 266 revs=None,
267 267 newbranch=False,
268 268 bookmarks=(),
269 269 publish=False,
270 270 pushvars=None,
271 271 ):
272 272 # repo we push from
273 273 self.repo = repo
274 274 self.ui = repo.ui
275 275 # repo we push to
276 276 self.remote = remote
277 277 # force option provided
278 278 self.force = force
279 279 # revs to be pushed (None is "all")
280 280 self.revs = revs
281 281 # bookmark explicitly pushed
282 282 self.bookmarks = bookmarks
283 283 # allow push of new branch
284 284 self.newbranch = newbranch
285 285 # step already performed
286 286 # (used to check what steps have been already performed through bundle2)
287 287 self.stepsdone = set()
288 288 # Integer version of the changegroup push result
289 289 # - None means nothing to push
290 290 # - 0 means HTTP error
291 291 # - 1 means we pushed and remote head count is unchanged *or*
292 292 # we have outgoing changesets but refused to push
293 293 # - other values as described by addchangegroup()
294 294 self.cgresult = None
295 295 # Boolean value for the bookmark push
296 296 self.bkresult = None
297 297 # discover.outgoing object (contains common and outgoing data)
298 298 self.outgoing = None
299 299 # all remote topological heads before the push
300 300 self.remoteheads = None
301 301 # Details of the remote branch pre and post push
302 302 #
303 303 # mapping: {'branch': ([remoteheads],
304 304 # [newheads],
305 305 # [unsyncedheads],
306 306 # [discardedheads])}
307 307 # - branch: the branch name
308 308 # - remoteheads: the list of remote heads known locally
309 309 # None if the branch is new
310 310 # - newheads: the new remote heads (known locally) with outgoing pushed
311 311 # - unsyncedheads: the list of remote heads unknown locally.
312 312 # - discardedheads: the list of remote heads made obsolete by the push
313 313 self.pushbranchmap = None
314 314 # testable as a boolean indicating if any nodes are missing locally.
315 315 self.incoming = None
316 316 # summary of the remote phase situation
317 317 self.remotephases = None
318 318 # phases changes that must be pushed along side the changesets
319 319 self.outdatedphases = None
320 320 # phases changes that must be pushed if changeset push fails
321 321 self.fallbackoutdatedphases = None
322 322 # outgoing obsmarkers
323 323 self.outobsmarkers = set()
324 324 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
325 325 self.outbookmarks = []
326 326 # transaction manager
327 327 self.trmanager = None
328 328 # map { pushkey partid -> callback handling failure}
329 329 # used to handle exception from mandatory pushkey part failure
330 330 self.pkfailcb = {}
331 331 # an iterable of pushvars or None
332 332 self.pushvars = pushvars
333 333 # publish pushed changesets
334 334 self.publish = publish
335 335
336 336 @util.propertycache
337 337 def futureheads(self):
338 338 """future remote heads if the changeset push succeeds"""
339 339 return self.outgoing.ancestorsof
340 340
341 341 @util.propertycache
342 342 def fallbackheads(self):
343 343 """future remote heads if the changeset push fails"""
344 344 if self.revs is None:
345 345 # not target to push, all common are relevant
346 346 return self.outgoing.commonheads
347 347 unfi = self.repo.unfiltered()
348 348 # I want cheads = heads(::push_heads and ::commonheads)
349 349 #
350 350 # To push, we already computed
351 351 # common = (::commonheads)
352 352 # missing = ((commonheads::push_heads) - commonheads)
353 353 #
354 354 # So we basically search
355 355 #
356 356 # almost_heads = heads((parents(missing) + push_heads) & common)
357 357 #
358 358 # We use "almost" here as this can return revision that are ancestors
359 359 # of other in the set and we need to explicitly turn it into an
360 360 # antichain later. We can do so using:
361 361 #
362 362 # cheads = heads(almost_heads::almost_heads)
363 363 #
364 364 # In pratice the code is a bit more convulted to avoid some extra
365 365 # computation. It aims at doing the same computation as highlighted
366 366 # above however.
367 367 common = self.outgoing.common
368 368 unfi = self.repo.unfiltered()
369 369 cl = unfi.changelog
370 370 to_rev = cl.index.rev
371 371 to_node = cl.node
372 372 parent_revs = cl.parentrevs
373 373 unselected = []
374 374 cheads = set()
375 375 # XXX-perf: `self.revs` and `outgoing.missing` could hold revs directly
376 376 for n in self.revs:
377 377 r = to_rev(n)
378 378 if r in common:
379 379 cheads.add(r)
380 380 else:
381 381 unselected.append(r)
382 382 known_non_heads = cl.ancestors(cheads, inclusive=True)
383 383 if unselected:
384 384 missing_revs = {to_rev(n) for n in self.outgoing.missing}
385 385 missing_revs.add(nullrev)
386 386 root_points = set()
387 387 for r in missing_revs:
388 388 p1, p2 = parent_revs(r)
389 389 if p1 not in missing_revs and p1 not in known_non_heads:
390 390 root_points.add(p1)
391 391 if p2 not in missing_revs and p2 not in known_non_heads:
392 392 root_points.add(p2)
393 393 if root_points:
394 394 heads = unfi.revs('heads(%ld::%ld)', root_points, root_points)
395 395 cheads.update(heads)
396 396 # XXX-perf: could this be a set of revision?
397 397 return [to_node(r) for r in sorted(cheads)]
398 398
399 399 @property
400 400 def commonheads(self):
401 401 """set of all common heads after changeset bundle push"""
402 402 if self.cgresult:
403 403 return self.futureheads
404 404 else:
405 405 return self.fallbackheads
406 406
407 407
408 408 # mapping of message used when pushing bookmark
409 409 bookmsgmap = {
410 410 b'update': (
411 411 _(b"updating bookmark %s\n"),
412 412 _(b'updating bookmark %s failed\n'),
413 413 ),
414 414 b'export': (
415 415 _(b"exporting bookmark %s\n"),
416 416 _(b'exporting bookmark %s failed\n'),
417 417 ),
418 418 b'delete': (
419 419 _(b"deleting remote bookmark %s\n"),
420 420 _(b'deleting remote bookmark %s failed\n'),
421 421 ),
422 422 }
423 423
424 424
425 425 def push(
426 426 repo,
427 427 remote,
428 428 force=False,
429 429 revs=None,
430 430 newbranch=False,
431 431 bookmarks=(),
432 432 publish=False,
433 433 opargs=None,
434 434 ):
435 435 """Push outgoing changesets (limited by revs) from a local
436 436 repository to remote. Return an integer:
437 437 - None means nothing to push
438 438 - 0 means HTTP error
439 439 - 1 means we pushed and remote head count is unchanged *or*
440 440 we have outgoing changesets but refused to push
441 441 - other values as described by addchangegroup()
442 442 """
443 443 if opargs is None:
444 444 opargs = {}
445 445 pushop = pushoperation(
446 446 repo,
447 447 remote,
448 448 force,
449 449 revs,
450 450 newbranch,
451 451 bookmarks,
452 452 publish,
453 453 **pycompat.strkwargs(opargs),
454 454 )
455 455 if pushop.remote.local():
456 456 missing = (
457 457 set(pushop.repo.requirements) - pushop.remote.local().supported
458 458 )
459 459 if missing:
460 460 msg = _(
461 461 b"required features are not"
462 462 b" supported in the destination:"
463 463 b" %s"
464 464 ) % (b', '.join(sorted(missing)))
465 465 raise error.Abort(msg)
466 466
467 467 if not pushop.remote.canpush():
468 468 raise error.Abort(_(b"destination does not support push"))
469 469
470 470 if not pushop.remote.capable(b'unbundle'):
471 471 raise error.Abort(
472 472 _(
473 473 b'cannot push: destination does not support the '
474 474 b'unbundle wire protocol command'
475 475 )
476 476 )
477 477 for category in sorted(bundle2.read_remote_wanted_sidedata(pushop.remote)):
478 478 # Check that a computer is registered for that category for at least
479 479 # one revlog kind.
480 480 for kind, computers in repo._sidedata_computers.items():
481 481 if computers.get(category):
482 482 break
483 483 else:
484 484 raise error.Abort(
485 485 _(
486 486 b'cannot push: required sidedata category not supported'
487 487 b" by this client: '%s'"
488 488 )
489 489 % pycompat.bytestr(category)
490 490 )
491 491 # get lock as we might write phase data
492 492 wlock = lock = None
493 493 try:
494 494 try:
495 495 # bundle2 push may receive a reply bundle touching bookmarks
496 496 # requiring the wlock. Take it now to ensure proper ordering.
497 497 maypushback = pushop.ui.configbool(
498 498 b'experimental',
499 499 b'bundle2.pushback',
500 500 )
501 501 if (
502 502 (not _forcebundle1(pushop))
503 503 and maypushback
504 504 and not bookmod.bookmarksinstore(repo)
505 505 ):
506 506 wlock = pushop.repo.wlock()
507 507 lock = pushop.repo.lock()
508 508 pushop.trmanager = transactionmanager(
509 509 pushop.repo, b'push-response', pushop.remote.url()
510 510 )
511 511 except error.LockUnavailable as err:
512 512 # source repo cannot be locked.
513 513 # We do not abort the push, but just disable the local phase
514 514 # synchronisation.
515 515 msg = b'cannot lock source repository: %s\n'
516 516 msg %= stringutil.forcebytestr(err)
517 517 pushop.ui.debug(msg)
518 518
519 519 pushop.repo.checkpush(pushop)
520 520 _checkpublish(pushop)
521 521 _pushdiscovery(pushop)
522 522 if not pushop.force:
523 523 _checksubrepostate(pushop)
524 524 if not _forcebundle1(pushop):
525 525 _pushbundle2(pushop)
526 526 _pushchangeset(pushop)
527 527 _pushsyncphase(pushop)
528 528 _pushobsolete(pushop)
529 529 _pushbookmark(pushop)
530 530 if pushop.trmanager is not None:
531 531 pushop.trmanager.close()
532 532 finally:
533 533 lockmod.release(pushop.trmanager, lock, wlock)
534 534
535 535 if repo.ui.configbool(b'experimental', b'remotenames'):
536 536 logexchange.pullremotenames(repo, remote)
537 537
538 538 return pushop
539 539
540 540
541 541 # list of steps to perform discovery before push
542 542 pushdiscoveryorder = []
543 543
544 544 # Mapping between step name and function
545 545 #
546 546 # This exists to help extensions wrap steps if necessary
547 547 pushdiscoverymapping = {}
548 548
549 549
550 550 def pushdiscovery(stepname):
551 551 """decorator for function performing discovery before push
552 552
553 553 The function is added to the step -> function mapping and appended to the
554 554 list of steps. Beware that decorated function will be added in order (this
555 555 may matter).
556 556
557 557 You can only use this decorator for a new step, if you want to wrap a step
558 558 from an extension, change the pushdiscovery dictionary directly."""
559 559
560 560 def dec(func):
561 561 assert stepname not in pushdiscoverymapping
562 562 pushdiscoverymapping[stepname] = func
563 563 pushdiscoveryorder.append(stepname)
564 564 return func
565 565
566 566 return dec
567 567
568 568
569 569 def _pushdiscovery(pushop):
570 570 """Run all discovery steps"""
571 571 for stepname in pushdiscoveryorder:
572 572 step = pushdiscoverymapping[stepname]
573 573 step(pushop)
574 574
575 575
576 576 def _checksubrepostate(pushop):
577 577 """Ensure all outgoing referenced subrepo revisions are present locally"""
578 578
579 579 repo = pushop.repo
580 580
581 581 # If the repository does not use subrepos, skip the expensive
582 582 # manifest checks.
583 583 if not len(repo.file(b'.hgsub')) or not len(repo.file(b'.hgsubstate')):
584 584 return
585 585
586 586 for n in pushop.outgoing.missing:
587 587 ctx = repo[n]
588 588
589 589 if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files():
590 590 for subpath in sorted(ctx.substate):
591 591 sub = ctx.sub(subpath)
592 592 sub.verify(onpush=True)
593 593
594 594
595 595 @pushdiscovery(b'changeset')
596 596 def _pushdiscoverychangeset(pushop):
597 597 """discover the changeset that need to be pushed"""
598 598 fci = discovery.findcommonincoming
599 599 if pushop.revs:
600 600 commoninc = fci(
601 601 pushop.repo,
602 602 pushop.remote,
603 603 force=pushop.force,
604 604 ancestorsof=pushop.revs,
605 605 )
606 606 else:
607 607 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
608 608 common, inc, remoteheads = commoninc
609 609 fco = discovery.findcommonoutgoing
610 610 outgoing = fco(
611 611 pushop.repo,
612 612 pushop.remote,
613 613 onlyheads=pushop.revs,
614 614 commoninc=commoninc,
615 615 force=pushop.force,
616 616 )
617 617 pushop.outgoing = outgoing
618 618 pushop.remoteheads = remoteheads
619 619 pushop.incoming = inc
620 620
621 621
622 622 @pushdiscovery(b'phase')
623 623 def _pushdiscoveryphase(pushop):
624 624 """discover the phase that needs to be pushed
625 625
626 626 (computed for both success and failure case for changesets push)"""
627 627 outgoing = pushop.outgoing
628 628 repo = pushop.repo
629 629 unfi = repo.unfiltered()
630 630 cl = unfi.changelog
631 631 to_rev = cl.index.rev
632 632 remotephases = listkeys(pushop.remote, b'phases')
633 633
634 634 if (
635 635 pushop.ui.configbool(b'ui', b'_usedassubrepo')
636 636 and remotephases # server supports phases
637 637 and not pushop.outgoing.missing # no changesets to be pushed
638 638 and remotephases.get(b'publishing', False)
639 639 ):
640 640 # When:
641 641 # - this is a subrepo push
642 642 # - and remote support phase
643 643 # - and no changeset are to be pushed
644 644 # - and remote is publishing
645 645 # We may be in issue 3781 case!
646 646 # We drop the possible phase synchronisation done by
647 647 # courtesy to publish changesets possibly locally draft
648 648 # on the remote.
649 649 pushop.outdatedphases = []
650 650 pushop.fallbackoutdatedphases = []
651 651 return
652 652
653 653 fallbackheads_rev = {to_rev(n) for n in pushop.fallbackheads}
654 654 pushop.remotephases = phases.RemotePhasesSummary(
655 655 pushop.repo,
656 656 fallbackheads_rev,
657 657 remotephases,
658 658 )
659 659 droots = set(pushop.remotephases.draft_roots)
660 660
661 661 fallback_publishing = pushop.remotephases.publishing
662 662 push_publishing = pushop.remotephases.publishing or pushop.publish
663 663 missing_revs = {to_rev(n) for n in outgoing.missing}
664 664 drafts = unfi._phasecache.get_raw_set(unfi, phases.draft)
665 665
666 666 if fallback_publishing:
667 667 fallback_roots = droots - missing_revs
668 668 revset = b'heads(%ld::%ld)'
669 669 else:
670 670 fallback_roots = droots - drafts
671 671 fallback_roots -= missing_revs
672 672 # Get the list of all revs draft on remote but public here.
673 673 revset = b'heads((%ld::%ld) and public())'
674 674 if not fallback_roots:
675 675 fallback = fallback_rev = []
676 676 else:
677 677 fallback_rev = unfi.revs(revset, fallback_roots, fallbackheads_rev)
678 678 fallback = [repo[r] for r in fallback_rev]
679 679
680 680 if push_publishing:
681 681 published = missing_revs.copy()
682 682 else:
683 683 published = missing_revs - drafts
684 684 if pushop.publish:
685 685 published.update(fallbackheads_rev & drafts)
686 686 elif fallback:
687 687 published.update(fallback_rev)
688 688
689 689 pushop.outdatedphases = [repo[r] for r in cl.headrevs(published)]
690 690 pushop.fallbackoutdatedphases = fallback
691 691
692 692
693 693 @pushdiscovery(b'obsmarker')
694 694 def _pushdiscoveryobsmarkers(pushop):
695 695 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
696 696 return
697 697
698 698 if not pushop.repo.obsstore:
699 699 return
700 700
701 701 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
702 702 return
703 703
704 704 repo = pushop.repo
705 705 # very naive computation, that can be quite expensive on big repo.
706 706 # However: evolution is currently slow on them anyway.
707 707 revs = repo.revs(b'::%ln', pushop.futureheads)
708 708 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(revs=revs)
709 709
710 710
711 711 @pushdiscovery(b'bookmarks')
712 712 def _pushdiscoverybookmarks(pushop):
713 713 ui = pushop.ui
714 714 repo = pushop.repo.unfiltered()
715 715 remote = pushop.remote
716 716 ui.debug(b"checking for updated bookmarks\n")
717 717 ancestors = ()
718 718 if pushop.revs:
719 719 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
720 720 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
721 721
722 722 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
723 723
724 724 explicit = {
725 725 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
726 726 }
727 727
728 728 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
729 729 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
730 730
731 731
732 732 def _processcompared(pushop, pushed, explicit, remotebms, comp):
733 733 """take decision on bookmarks to push to the remote repo
734 734
735 735 Exists to help extensions alter this behavior.
736 736 """
737 737 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
738 738
739 739 repo = pushop.repo
740 740
741 741 for b, scid, dcid in advsrc:
742 742 if b in explicit:
743 743 explicit.remove(b)
744 744 if not pushed or repo[scid].rev() in pushed:
745 745 pushop.outbookmarks.append((b, dcid, scid))
746 746 # search added bookmark
747 747 for b, scid, dcid in addsrc:
748 748 if b in explicit:
749 749 explicit.remove(b)
750 750 if bookmod.isdivergent(b):
751 751 pushop.ui.warn(_(b'cannot push divergent bookmark %s!\n') % b)
752 752 pushop.bkresult = 2
753 753 elif pushed and repo[scid].rev() not in pushed:
754 754 # in case of race or secret
755 755 msg = _(b'cannot push bookmark X without its revision: %s!\n')
756 756 pushop.ui.warn(msg % b)
757 757 pushop.bkresult = 2
758 758 else:
759 759 pushop.outbookmarks.append((b, b'', scid))
760 760 # search for overwritten bookmark
761 761 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
762 762 if b in explicit:
763 763 explicit.remove(b)
764 764 if not pushed or repo[scid].rev() in pushed:
765 765 pushop.outbookmarks.append((b, dcid, scid))
766 766 # search for bookmark to delete
767 767 for b, scid, dcid in adddst:
768 768 if b in explicit:
769 769 explicit.remove(b)
770 770 # treat as "deleted locally"
771 771 pushop.outbookmarks.append((b, dcid, b''))
772 772 # identical bookmarks shouldn't get reported
773 773 for b, scid, dcid in same:
774 774 if b in explicit:
775 775 explicit.remove(b)
776 776
777 777 if explicit:
778 778 explicit = sorted(explicit)
779 779 # we should probably list all of them
780 780 pushop.ui.warn(
781 781 _(
782 782 b'bookmark %s does not exist on the local '
783 783 b'or remote repository!\n'
784 784 )
785 785 % explicit[0]
786 786 )
787 787 pushop.bkresult = 2
788 788
789 789 pushop.outbookmarks.sort()
790 790
791 791
792 792 def _pushcheckoutgoing(pushop):
793 793 outgoing = pushop.outgoing
794 794 unfi = pushop.repo.unfiltered()
795 795 if not outgoing.missing:
796 796 # nothing to push
797 797 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
798 798 return False
799 799 # something to push
800 800 if not pushop.force:
801 801 # if repo.obsstore == False --> no obsolete
802 802 # then, save the iteration
803 803 if unfi.obsstore:
804 804 # this message are here for 80 char limit reason
805 805 mso = _(b"push includes obsolete changeset: %s!")
806 806 mspd = _(b"push includes phase-divergent changeset: %s!")
807 807 mscd = _(b"push includes content-divergent changeset: %s!")
808 808 mst = {
809 809 b"orphan": _(b"push includes orphan changeset: %s!"),
810 810 b"phase-divergent": mspd,
811 811 b"content-divergent": mscd,
812 812 }
813 813 # If we are to push if there is at least one
814 814 # obsolete or unstable changeset in missing, at
815 815 # least one of the missinghead will be obsolete or
816 816 # unstable. So checking heads only is ok
817 817 for node in outgoing.ancestorsof:
818 818 ctx = unfi[node]
819 819 if ctx.obsolete():
820 820 raise error.Abort(mso % ctx)
821 821 elif ctx.isunstable():
822 822 # TODO print more than one instability in the abort
823 823 # message
824 824 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
825 825
826 826 discovery.checkheads(pushop)
827 827 return True
828 828
829 829
830 830 # List of names of steps to perform for an outgoing bundle2, order matters.
831 831 b2partsgenorder = []
832 832
833 833 # Mapping between step name and function
834 834 #
835 835 # This exists to help extensions wrap steps if necessary
836 836 b2partsgenmapping = {}
837 837
838 838
839 839 def b2partsgenerator(stepname, idx=None):
840 840 """decorator for function generating bundle2 part
841 841
842 842 The function is added to the step -> function mapping and appended to the
843 843 list of steps. Beware that decorated functions will be added in order
844 844 (this may matter).
845 845
846 846 You can only use this decorator for new steps, if you want to wrap a step
847 847 from an extension, attack the b2partsgenmapping dictionary directly."""
848 848
849 849 def dec(func):
850 850 assert stepname not in b2partsgenmapping
851 851 b2partsgenmapping[stepname] = func
852 852 if idx is None:
853 853 b2partsgenorder.append(stepname)
854 854 else:
855 855 b2partsgenorder.insert(idx, stepname)
856 856 return func
857 857
858 858 return dec
859 859
860 860
861 861 def _pushb2ctxcheckheads(pushop, bundler):
862 862 """Generate race condition checking parts
863 863
864 864 Exists as an independent function to aid extensions
865 865 """
866 866 # * 'force' do not check for push race,
867 867 # * if we don't push anything, there are nothing to check.
868 868 if not pushop.force and pushop.outgoing.ancestorsof:
869 869 allowunrelated = b'related' in bundler.capabilities.get(
870 870 b'checkheads', ()
871 871 )
872 872 emptyremote = pushop.pushbranchmap is None
873 873 if not allowunrelated or emptyremote:
874 874 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
875 875 else:
876 876 affected = set()
877 877 for branch, heads in pushop.pushbranchmap.items():
878 878 remoteheads, newheads, unsyncedheads, discardedheads = heads
879 879 if remoteheads is not None:
880 880 remote = set(remoteheads)
881 881 affected |= set(discardedheads) & remote
882 882 affected |= remote - set(newheads)
883 883 if affected:
884 884 data = iter(sorted(affected))
885 885 bundler.newpart(b'check:updated-heads', data=data)
886 886
887 887
888 888 def _pushing(pushop):
889 889 """return True if we are pushing anything"""
890 890 return bool(
891 891 pushop.outgoing.missing
892 892 or pushop.outdatedphases
893 893 or pushop.outobsmarkers
894 894 or pushop.outbookmarks
895 895 )
896 896
897 897
898 898 @b2partsgenerator(b'check-bookmarks')
899 899 def _pushb2checkbookmarks(pushop, bundler):
900 900 """insert bookmark move checking"""
901 901 if not _pushing(pushop) or pushop.force:
902 902 return
903 903 b2caps = bundle2.bundle2caps(pushop.remote)
904 904 hasbookmarkcheck = b'bookmarks' in b2caps
905 905 if not (pushop.outbookmarks and hasbookmarkcheck):
906 906 return
907 907 data = []
908 908 for book, old, new in pushop.outbookmarks:
909 909 data.append((book, old))
910 910 checkdata = bookmod.binaryencode(pushop.repo, data)
911 911 bundler.newpart(b'check:bookmarks', data=checkdata)
912 912
913 913
914 914 @b2partsgenerator(b'check-phases')
915 915 def _pushb2checkphases(pushop, bundler):
916 916 """insert phase move checking"""
917 917 if not _pushing(pushop) or pushop.force:
918 918 return
919 919 b2caps = bundle2.bundle2caps(pushop.remote)
920 920 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
921 921 if pushop.remotephases is not None and hasphaseheads:
922 922 # check that the remote phase has not changed
923 923 checks = {p: [] for p in phases.allphases}
924 924 to_node = pushop.repo.unfiltered().changelog.node
925 925 checks[phases.public].extend(
926 926 to_node(r) for r in pushop.remotephases.public_heads
927 927 )
928 928 checks[phases.draft].extend(
929 929 to_node(r) for r in pushop.remotephases.draft_roots
930 930 )
931 931 if any(checks.values()):
932 932 for phase in checks:
933 933 checks[phase].sort()
934 934 checkdata = phases.binaryencode(checks)
935 935 bundler.newpart(b'check:phases', data=checkdata)
936 936
937 937
938 938 @b2partsgenerator(b'changeset')
939 939 def _pushb2ctx(pushop, bundler):
940 940 """handle changegroup push through bundle2
941 941
942 942 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
943 943 """
944 944 if b'changesets' in pushop.stepsdone:
945 945 return
946 946 pushop.stepsdone.add(b'changesets')
947 947 # Send known heads to the server for race detection.
948 948 if not _pushcheckoutgoing(pushop):
949 949 return
950 950 pushop.repo.prepushoutgoinghooks(pushop)
951 951
952 952 _pushb2ctxcheckheads(pushop, bundler)
953 953
954 954 b2caps = bundle2.bundle2caps(pushop.remote)
955 955 version = b'01'
956 956 cgversions = b2caps.get(b'changegroup')
957 957 if cgversions: # 3.1 and 3.2 ship with an empty value
958 958 cgversions = [
959 959 v
960 960 for v in cgversions
961 961 if v in changegroup.supportedoutgoingversions(pushop.repo)
962 962 ]
963 963 if not cgversions:
964 964 raise error.Abort(_(b'no common changegroup version'))
965 965 version = max(cgversions)
966 966
967 967 remote_sidedata = bundle2.read_remote_wanted_sidedata(pushop.remote)
968 968 cgstream = changegroup.makestream(
969 969 pushop.repo,
970 970 pushop.outgoing,
971 971 version,
972 972 b'push',
973 973 bundlecaps=b2caps,
974 974 remote_sidedata=remote_sidedata,
975 975 )
976 976 cgpart = bundler.newpart(b'changegroup', data=cgstream)
977 977 if cgversions:
978 978 cgpart.addparam(b'version', version)
979 979 if scmutil.istreemanifest(pushop.repo):
980 980 cgpart.addparam(b'treemanifest', b'1')
981 981 if repository.REPO_FEATURE_SIDE_DATA in pushop.repo.features:
982 982 cgpart.addparam(b'exp-sidedata', b'1')
983 983
984 984 def handlereply(op):
985 985 """extract addchangegroup returns from server reply"""
986 986 cgreplies = op.records.getreplies(cgpart.id)
987 987 assert len(cgreplies[b'changegroup']) == 1
988 988 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
989 989
990 990 return handlereply
991 991
992 992
993 993 @b2partsgenerator(b'phase')
994 994 def _pushb2phases(pushop, bundler):
995 995 """handle phase push through bundle2"""
996 996 if b'phases' in pushop.stepsdone:
997 997 return
998 998 b2caps = bundle2.bundle2caps(pushop.remote)
999 999 ui = pushop.repo.ui
1000 1000
1001 1001 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1002 1002 haspushkey = b'pushkey' in b2caps
1003 1003 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1004 1004
1005 1005 if hasphaseheads and not legacyphase:
1006 1006 return _pushb2phaseheads(pushop, bundler)
1007 1007 elif haspushkey:
1008 1008 return _pushb2phasespushkey(pushop, bundler)
1009 1009
1010 1010
1011 1011 def _pushb2phaseheads(pushop, bundler):
1012 1012 """push phase information through a bundle2 - binary part"""
1013 1013 pushop.stepsdone.add(b'phases')
1014 1014 if pushop.outdatedphases:
1015 1015 updates = {p: [] for p in phases.allphases}
1016 1016 updates[0].extend(h.node() for h in pushop.outdatedphases)
1017 1017 phasedata = phases.binaryencode(updates)
1018 1018 bundler.newpart(b'phase-heads', data=phasedata)
1019 1019
1020 1020
1021 1021 def _pushb2phasespushkey(pushop, bundler):
1022 1022 """push phase information through a bundle2 - pushkey part"""
1023 1023 pushop.stepsdone.add(b'phases')
1024 1024 part2node = []
1025 1025
1026 1026 def handlefailure(pushop, exc):
1027 1027 targetid = int(exc.partid)
1028 1028 for partid, node in part2node:
1029 1029 if partid == targetid:
1030 1030 raise error.Abort(_(b'updating %s to public failed') % node)
1031 1031
1032 1032 enc = pushkey.encode
1033 1033 for newremotehead in pushop.outdatedphases:
1034 1034 part = bundler.newpart(b'pushkey')
1035 1035 part.addparam(b'namespace', enc(b'phases'))
1036 1036 part.addparam(b'key', enc(newremotehead.hex()))
1037 1037 part.addparam(b'old', enc(b'%d' % phases.draft))
1038 1038 part.addparam(b'new', enc(b'%d' % phases.public))
1039 1039 part2node.append((part.id, newremotehead))
1040 1040 pushop.pkfailcb[part.id] = handlefailure
1041 1041
1042 1042 def handlereply(op):
1043 1043 for partid, node in part2node:
1044 1044 partrep = op.records.getreplies(partid)
1045 1045 results = partrep[b'pushkey']
1046 1046 assert len(results) <= 1
1047 1047 msg = None
1048 1048 if not results:
1049 1049 msg = _(b'server ignored update of %s to public!\n') % node
1050 1050 elif not int(results[0][b'return']):
1051 1051 msg = _(b'updating %s to public failed!\n') % node
1052 1052 if msg is not None:
1053 1053 pushop.ui.warn(msg)
1054 1054
1055 1055 return handlereply
1056 1056
1057 1057
1058 1058 @b2partsgenerator(b'obsmarkers')
1059 1059 def _pushb2obsmarkers(pushop, bundler):
1060 1060 if b'obsmarkers' in pushop.stepsdone:
1061 1061 return
1062 1062 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1063 1063 if obsolete.commonversion(remoteversions) is None:
1064 1064 return
1065 1065 pushop.stepsdone.add(b'obsmarkers')
1066 1066 if pushop.outobsmarkers:
1067 1067 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1068 1068 bundle2.buildobsmarkerspart(bundler, markers)
1069 1069
1070 1070
1071 1071 @b2partsgenerator(b'bookmarks')
1072 1072 def _pushb2bookmarks(pushop, bundler):
1073 1073 """handle bookmark push through bundle2"""
1074 1074 if b'bookmarks' in pushop.stepsdone:
1075 1075 return
1076 1076 b2caps = bundle2.bundle2caps(pushop.remote)
1077 1077
1078 1078 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1079 1079 legacybooks = b'bookmarks' in legacy
1080 1080
1081 1081 if not legacybooks and b'bookmarks' in b2caps:
1082 1082 return _pushb2bookmarkspart(pushop, bundler)
1083 1083 elif b'pushkey' in b2caps:
1084 1084 return _pushb2bookmarkspushkey(pushop, bundler)
1085 1085
1086 1086
1087 1087 def _bmaction(old, new):
1088 1088 """small utility for bookmark pushing"""
1089 1089 if not old:
1090 1090 return b'export'
1091 1091 elif not new:
1092 1092 return b'delete'
1093 1093 return b'update'
1094 1094
1095 1095
1096 1096 def _abortonsecretctx(pushop, node, b):
1097 1097 """abort if a given bookmark points to a secret changeset"""
1098 1098 if node and pushop.repo[node].phase() == phases.secret:
1099 1099 raise error.Abort(
1100 1100 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1101 1101 )
1102 1102
1103 1103
1104 1104 def _pushb2bookmarkspart(pushop, bundler):
1105 1105 pushop.stepsdone.add(b'bookmarks')
1106 1106 if not pushop.outbookmarks:
1107 1107 return
1108 1108
1109 1109 allactions = []
1110 1110 data = []
1111 1111 for book, old, new in pushop.outbookmarks:
1112 1112 _abortonsecretctx(pushop, new, book)
1113 1113 data.append((book, new))
1114 1114 allactions.append((book, _bmaction(old, new)))
1115 1115 checkdata = bookmod.binaryencode(pushop.repo, data)
1116 1116 bundler.newpart(b'bookmarks', data=checkdata)
1117 1117
1118 1118 def handlereply(op):
1119 1119 ui = pushop.ui
1120 1120 # if success
1121 1121 for book, action in allactions:
1122 1122 ui.status(bookmsgmap[action][0] % book)
1123 1123
1124 1124 return handlereply
1125 1125
1126 1126
1127 1127 def _pushb2bookmarkspushkey(pushop, bundler):
1128 1128 pushop.stepsdone.add(b'bookmarks')
1129 1129 part2book = []
1130 1130 enc = pushkey.encode
1131 1131
1132 1132 def handlefailure(pushop, exc):
1133 1133 targetid = int(exc.partid)
1134 1134 for partid, book, action in part2book:
1135 1135 if partid == targetid:
1136 1136 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1137 1137 # we should not be called for part we did not generated
1138 1138 assert False
1139 1139
1140 1140 for book, old, new in pushop.outbookmarks:
1141 1141 _abortonsecretctx(pushop, new, book)
1142 1142 part = bundler.newpart(b'pushkey')
1143 1143 part.addparam(b'namespace', enc(b'bookmarks'))
1144 1144 part.addparam(b'key', enc(book))
1145 1145 part.addparam(b'old', enc(hex(old)))
1146 1146 part.addparam(b'new', enc(hex(new)))
1147 1147 action = b'update'
1148 1148 if not old:
1149 1149 action = b'export'
1150 1150 elif not new:
1151 1151 action = b'delete'
1152 1152 part2book.append((part.id, book, action))
1153 1153 pushop.pkfailcb[part.id] = handlefailure
1154 1154
1155 1155 def handlereply(op):
1156 1156 ui = pushop.ui
1157 1157 for partid, book, action in part2book:
1158 1158 partrep = op.records.getreplies(partid)
1159 1159 results = partrep[b'pushkey']
1160 1160 assert len(results) <= 1
1161 1161 if not results:
1162 1162 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1163 1163 else:
1164 1164 ret = int(results[0][b'return'])
1165 1165 if ret:
1166 1166 ui.status(bookmsgmap[action][0] % book)
1167 1167 else:
1168 1168 ui.warn(bookmsgmap[action][1] % book)
1169 1169 if pushop.bkresult is not None:
1170 1170 pushop.bkresult = 1
1171 1171
1172 1172 return handlereply
1173 1173
1174 1174
1175 1175 @b2partsgenerator(b'pushvars', idx=0)
1176 1176 def _getbundlesendvars(pushop, bundler):
1177 1177 '''send shellvars via bundle2'''
1178 1178 pushvars = pushop.pushvars
1179 1179 if pushvars:
1180 1180 shellvars = {}
1181 1181 for raw in pushvars:
1182 1182 if b'=' not in raw:
1183 1183 msg = (
1184 1184 b"unable to parse variable '%s', should follow "
1185 1185 b"'KEY=VALUE' or 'KEY=' format"
1186 1186 )
1187 1187 raise error.Abort(msg % raw)
1188 1188 k, v = raw.split(b'=', 1)
1189 1189 shellvars[k] = v
1190 1190
1191 1191 part = bundler.newpart(b'pushvars')
1192 1192
1193 1193 for key, value in shellvars.items():
1194 1194 part.addparam(key, value, mandatory=False)
1195 1195
1196 1196
1197 1197 def _pushbundle2(pushop):
1198 1198 """push data to the remote using bundle2
1199 1199
1200 1200 The only currently supported type of data is changegroup but this will
1201 1201 evolve in the future."""
1202 1202 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1203 1203 pushback = pushop.trmanager and pushop.ui.configbool(
1204 1204 b'experimental', b'bundle2.pushback'
1205 1205 )
1206 1206
1207 1207 # create reply capability
1208 1208 capsblob = bundle2.encodecaps(
1209 1209 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1210 1210 )
1211 1211 bundler.newpart(b'replycaps', data=capsblob)
1212 1212 replyhandlers = []
1213 1213 for partgenname in b2partsgenorder:
1214 1214 partgen = b2partsgenmapping[partgenname]
1215 1215 ret = partgen(pushop, bundler)
1216 1216 if callable(ret):
1217 1217 replyhandlers.append(ret)
1218 1218 # do not push if nothing to push
1219 1219 if bundler.nbparts <= 1:
1220 1220 return
1221 1221 stream = util.chunkbuffer(bundler.getchunks())
1222 1222 try:
1223 1223 try:
1224 1224 with pushop.remote.commandexecutor() as e:
1225 1225 reply = e.callcommand(
1226 1226 b'unbundle',
1227 1227 {
1228 1228 b'bundle': stream,
1229 1229 b'heads': [b'force'],
1230 1230 b'url': pushop.remote.url(),
1231 1231 },
1232 1232 ).result()
1233 1233 except error.BundleValueError as exc:
1234 1234 raise error.RemoteError(_(b'missing support for %s') % exc)
1235 1235 try:
1236 1236 trgetter = None
1237 1237 if pushback:
1238 1238 trgetter = pushop.trmanager.transaction
1239 1239 op = bundle2.processbundle(
1240 1240 pushop.repo,
1241 1241 reply,
1242 1242 trgetter,
1243 1243 remote=pushop.remote,
1244 1244 )
1245 1245 except error.BundleValueError as exc:
1246 1246 raise error.RemoteError(_(b'missing support for %s') % exc)
1247 1247 except bundle2.AbortFromPart as exc:
1248 1248 pushop.ui.error(_(b'remote: %s\n') % exc)
1249 1249 if exc.hint is not None:
1250 1250 pushop.ui.error(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1251 1251 raise error.RemoteError(_(b'push failed on remote'))
1252 1252 except error.PushkeyFailed as exc:
1253 1253 partid = int(exc.partid)
1254 1254 if partid not in pushop.pkfailcb:
1255 1255 raise
1256 1256 pushop.pkfailcb[partid](pushop, exc)
1257 1257 for rephand in replyhandlers:
1258 1258 rephand(op)
1259 1259
1260 1260
1261 1261 def _pushchangeset(pushop):
1262 1262 """Make the actual push of changeset bundle to remote repo"""
1263 1263 if b'changesets' in pushop.stepsdone:
1264 1264 return
1265 1265 pushop.stepsdone.add(b'changesets')
1266 1266 if not _pushcheckoutgoing(pushop):
1267 1267 return
1268 1268
1269 1269 # Should have verified this in push().
1270 1270 assert pushop.remote.capable(b'unbundle')
1271 1271
1272 1272 pushop.repo.prepushoutgoinghooks(pushop)
1273 1273 outgoing = pushop.outgoing
1274 1274 # TODO: get bundlecaps from remote
1275 1275 bundlecaps = None
1276 1276 # create a changegroup from local
1277 1277 if pushop.revs is None and not (
1278 1278 outgoing.excluded or pushop.repo.changelog.filteredrevs
1279 1279 ):
1280 1280 # push everything,
1281 1281 # use the fast path, no race possible on push
1282 1282 fastpath = True
1283 1283 else:
1284 1284 fastpath = False
1285 1285
1286 1286 cg = changegroup.makechangegroup(
1287 1287 pushop.repo,
1288 1288 outgoing,
1289 1289 b'01',
1290 1290 b'push',
1291 1291 fastpath=fastpath,
1292 1292 bundlecaps=bundlecaps,
1293 1293 )
1294 1294
1295 1295 # apply changegroup to remote
1296 1296 # local repo finds heads on server, finds out what
1297 1297 # revs it must push. once revs transferred, if server
1298 1298 # finds it has different heads (someone else won
1299 1299 # commit/push race), server aborts.
1300 1300 if pushop.force:
1301 1301 remoteheads = [b'force']
1302 1302 else:
1303 1303 remoteheads = pushop.remoteheads
1304 1304 # ssh: return remote's addchangegroup()
1305 1305 # http: return remote's addchangegroup() or 0 for error
1306 1306 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1307 1307
1308 1308
1309 1309 def _pushsyncphase(pushop):
1310 1310 """synchronise phase information locally and remotely"""
1311 1311 cheads = pushop.commonheads
1312 1312 # even when we don't push, exchanging phase data is useful
1313 1313 remotephases = listkeys(pushop.remote, b'phases')
1314 1314 if (
1315 1315 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1316 1316 and remotephases # server supports phases
1317 1317 and pushop.cgresult is None # nothing was pushed
1318 1318 and remotephases.get(b'publishing', False)
1319 1319 ):
1320 1320 # When:
1321 1321 # - this is a subrepo push
1322 1322 # - and remote support phase
1323 1323 # - and no changeset was pushed
1324 1324 # - and remote is publishing
1325 1325 # We may be in issue 3871 case!
1326 1326 # We drop the possible phase synchronisation done by
1327 1327 # courtesy to publish changesets possibly locally draft
1328 1328 # on the remote.
1329 1329 remotephases = {b'publishing': b'True'}
1330 1330 if not remotephases: # old server or public only reply from non-publishing
1331 1331 _localphasemove(pushop, cheads)
1332 1332 # don't push any phase data as there is nothing to push
1333 1333 else:
1334 1334 unfi = pushop.repo.unfiltered()
1335 1335 to_rev = unfi.changelog.index.rev
1336 1336 to_node = unfi.changelog.node
1337 1337 cheads_revs = [to_rev(n) for n in cheads]
1338 1338 pheads_revs, _dr = phases.analyze_remote_phases(
1339 1339 pushop.repo,
1340 1340 cheads_revs,
1341 1341 remotephases,
1342 1342 )
1343 1343 pheads = [to_node(r) for r in pheads_revs]
1344 1344 ### Apply remote phase on local
1345 1345 if remotephases.get(b'publishing', False):
1346 1346 _localphasemove(pushop, cheads)
1347 1347 else: # publish = False
1348 1348 _localphasemove(pushop, pheads)
1349 1349 _localphasemove(pushop, cheads, phases.draft)
1350 1350 ### Apply local phase on remote
1351 1351
1352 1352 if pushop.cgresult:
1353 1353 if b'phases' in pushop.stepsdone:
1354 1354 # phases already pushed though bundle2
1355 1355 return
1356 1356 outdated = pushop.outdatedphases
1357 1357 else:
1358 1358 outdated = pushop.fallbackoutdatedphases
1359 1359
1360 1360 pushop.stepsdone.add(b'phases')
1361 1361
1362 1362 # filter heads already turned public by the push
1363 1363 outdated = [c for c in outdated if c.node() not in pheads]
1364 1364 # fallback to independent pushkey command
1365 1365 for newremotehead in outdated:
1366 1366 with pushop.remote.commandexecutor() as e:
1367 1367 r = e.callcommand(
1368 1368 b'pushkey',
1369 1369 {
1370 1370 b'namespace': b'phases',
1371 1371 b'key': newremotehead.hex(),
1372 1372 b'old': b'%d' % phases.draft,
1373 1373 b'new': b'%d' % phases.public,
1374 1374 },
1375 1375 ).result()
1376 1376
1377 1377 if not r:
1378 1378 pushop.ui.warn(
1379 1379 _(b'updating %s to public failed!\n') % newremotehead
1380 1380 )
1381 1381
1382 1382
1383 1383 def _localphasemove(pushop, nodes, phase=phases.public):
1384 1384 """move <nodes> to <phase> in the local source repo"""
1385 1385 if pushop.trmanager:
1386 1386 phases.advanceboundary(
1387 1387 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1388 1388 )
1389 1389 else:
1390 1390 # repo is not locked, do not change any phases!
1391 1391 # Informs the user that phases should have been moved when
1392 1392 # applicable.
1393 1393 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1394 1394 phasestr = phases.phasenames[phase]
1395 1395 if actualmoves:
1396 1396 pushop.ui.status(
1397 1397 _(
1398 1398 b'cannot lock source repo, skipping '
1399 1399 b'local %s phase update\n'
1400 1400 )
1401 1401 % phasestr
1402 1402 )
1403 1403
1404 1404
1405 1405 def _pushobsolete(pushop):
1406 1406 """utility function to push obsolete markers to a remote"""
1407 1407 if b'obsmarkers' in pushop.stepsdone:
1408 1408 return
1409 1409 repo = pushop.repo
1410 1410 remote = pushop.remote
1411 1411 pushop.stepsdone.add(b'obsmarkers')
1412 1412 if pushop.outobsmarkers:
1413 1413 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1414 1414 rslts = []
1415 1415 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1416 1416 remotedata = obsolete._pushkeyescape(markers)
1417 1417 for key in sorted(remotedata, reverse=True):
1418 1418 # reverse sort to ensure we end with dump0
1419 1419 data = remotedata[key]
1420 1420 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1421 1421 if [r for r in rslts if not r]:
1422 1422 msg = _(b'failed to push some obsolete markers!\n')
1423 1423 repo.ui.warn(msg)
1424 1424
1425 1425
1426 1426 def _pushbookmark(pushop):
1427 1427 """Update bookmark position on remote"""
1428 1428 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1429 1429 return
1430 1430 pushop.stepsdone.add(b'bookmarks')
1431 1431 ui = pushop.ui
1432 1432 remote = pushop.remote
1433 1433
1434 1434 for b, old, new in pushop.outbookmarks:
1435 1435 action = b'update'
1436 1436 if not old:
1437 1437 action = b'export'
1438 1438 elif not new:
1439 1439 action = b'delete'
1440 1440
1441 1441 with remote.commandexecutor() as e:
1442 1442 r = e.callcommand(
1443 1443 b'pushkey',
1444 1444 {
1445 1445 b'namespace': b'bookmarks',
1446 1446 b'key': b,
1447 1447 b'old': hex(old),
1448 1448 b'new': hex(new),
1449 1449 },
1450 1450 ).result()
1451 1451
1452 1452 if r:
1453 1453 ui.status(bookmsgmap[action][0] % b)
1454 1454 else:
1455 1455 ui.warn(bookmsgmap[action][1] % b)
1456 1456 # discovery can have set the value form invalid entry
1457 1457 if pushop.bkresult is not None:
1458 1458 pushop.bkresult = 1
1459 1459
1460 1460
1461 1461 class pulloperation:
1462 1462 """A object that represent a single pull operation
1463 1463
1464 1464 It purpose is to carry pull related state and very common operation.
1465 1465
1466 1466 A new should be created at the beginning of each pull and discarded
1467 1467 afterward.
1468 1468 """
1469 1469
1470 1470 def __init__(
1471 1471 self,
1472 1472 repo,
1473 1473 remote,
1474 1474 heads=None,
1475 1475 force=False,
1476 1476 bookmarks=(),
1477 1477 remotebookmarks=None,
1478 1478 streamclonerequested=None,
1479 1479 includepats=None,
1480 1480 excludepats=None,
1481 1481 depth=None,
1482 1482 path=None,
1483 1483 ):
1484 1484 # repo we pull into
1485 1485 self.repo = repo
1486 1486 # repo we pull from
1487 1487 self.remote = remote
1488 1488 # path object used to build this remote
1489 1489 #
1490 1490 # Ideally, the remote peer would carry that directly.
1491 1491 self.remote_path = path
1492 1492 # revision we try to pull (None is "all")
1493 1493 self.heads = heads
1494 1494 # bookmark pulled explicitly
1495 1495 self.explicitbookmarks = [
1496 1496 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1497 1497 ]
1498 1498 # do we force pull?
1499 1499 self.force = force
1500 1500 # whether a streaming clone was requested
1501 1501 self.streamclonerequested = streamclonerequested
1502 1502 # transaction manager
1503 1503 self.trmanager = None
1504 1504 # set of common changeset between local and remote before pull
1505 1505 self.common = None
1506 1506 # set of pulled head
1507 1507 self.rheads = None
1508 1508 # list of missing changeset to fetch remotely
1509 1509 self.fetch = None
1510 1510 # remote bookmarks data
1511 1511 self.remotebookmarks = remotebookmarks
1512 1512 # result of changegroup pulling (used as return code by pull)
1513 1513 self.cgresult = None
1514 1514 # list of step already done
1515 1515 self.stepsdone = set()
1516 1516 # Whether we attempted a clone from pre-generated bundles.
1517 1517 self.clonebundleattempted = False
1518 1518 # Set of file patterns to include.
1519 1519 self.includepats = includepats
1520 1520 # Set of file patterns to exclude.
1521 1521 self.excludepats = excludepats
1522 1522 # Number of ancestor changesets to pull from each pulled head.
1523 1523 self.depth = depth
1524 1524
1525 1525 @util.propertycache
1526 1526 def pulledsubset(self):
1527 1527 """heads of the set of changeset target by the pull"""
1528 1528 # compute target subset
1529 1529 if self.heads is None:
1530 1530 # We pulled every thing possible
1531 1531 # sync on everything common
1532 1532 c = set(self.common)
1533 1533 ret = list(self.common)
1534 1534 for n in self.rheads:
1535 1535 if n not in c:
1536 1536 ret.append(n)
1537 1537 return ret
1538 1538 else:
1539 1539 # We pulled a specific subset
1540 1540 # sync on this subset
1541 1541 return self.heads
1542 1542
1543 1543 @util.propertycache
1544 1544 def canusebundle2(self):
1545 1545 return not _forcebundle1(self)
1546 1546
1547 1547 @util.propertycache
1548 1548 def remotebundle2caps(self):
1549 1549 return bundle2.bundle2caps(self.remote)
1550 1550
1551 1551 def gettransaction(self):
1552 1552 # deprecated; talk to trmanager directly
1553 1553 return self.trmanager.transaction()
1554 1554
1555 1555
1556 1556 class transactionmanager(util.transactional):
1557 1557 """An object to manage the life cycle of a transaction
1558 1558
1559 1559 It creates the transaction on demand and calls the appropriate hooks when
1560 1560 closing the transaction."""
1561 1561
1562 1562 def __init__(self, repo, source, url):
1563 1563 self.repo = repo
1564 1564 self.source = source
1565 1565 self.url = url
1566 1566 self._tr = None
1567 1567
1568 1568 def transaction(self):
1569 1569 """Return an open transaction object, constructing if necessary"""
1570 1570 if not self._tr:
1571 1571 trname = b'%s\n%s' % (self.source, urlutil.hidepassword(self.url))
1572 1572 self._tr = self.repo.transaction(trname)
1573 1573 self._tr.hookargs[b'source'] = self.source
1574 1574 self._tr.hookargs[b'url'] = self.url
1575 1575 return self._tr
1576 1576
1577 1577 def close(self):
1578 1578 """close transaction if created"""
1579 1579 if self._tr is not None:
1580 1580 self._tr.close()
1581 1581
1582 1582 def release(self):
1583 1583 """release transaction if created"""
1584 1584 if self._tr is not None:
1585 1585 self._tr.release()
1586 1586
1587 1587
1588 1588 def listkeys(remote, namespace):
1589 1589 with remote.commandexecutor() as e:
1590 1590 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1591 1591
1592 1592
1593 1593 def _fullpullbundle2(repo, pullop):
1594 1594 # The server may send a partial reply, i.e. when inlining
1595 1595 # pre-computed bundles. In that case, update the common
1596 1596 # set based on the results and pull another bundle.
1597 1597 #
1598 1598 # There are two indicators that the process is finished:
1599 1599 # - no changeset has been added, or
1600 1600 # - all remote heads are known locally.
1601 1601 # The head check must use the unfiltered view as obsoletion
1602 1602 # markers can hide heads.
1603 1603 unfi = repo.unfiltered()
1604 1604 unficl = unfi.changelog
1605 1605
1606 1606 def headsofdiff(h1, h2):
1607 1607 """Returns heads(h1 % h2)"""
1608 1608 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1609 1609 return {ctx.node() for ctx in res}
1610 1610
1611 1611 def headsofunion(h1, h2):
1612 1612 """Returns heads((h1 + h2) - null)"""
1613 1613 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1614 1614 return {ctx.node() for ctx in res}
1615 1615
1616 1616 while True:
1617 1617 old_heads = unficl.heads()
1618 1618 clstart = len(unficl)
1619 1619 _pullbundle2(pullop)
1620 1620 if requirements.NARROW_REQUIREMENT in repo.requirements:
1621 1621 # XXX narrow clones filter the heads on the server side during
1622 1622 # XXX getbundle and result in partial replies as well.
1623 1623 # XXX Disable pull bundles in this case as band aid to avoid
1624 1624 # XXX extra round trips.
1625 1625 break
1626 1626 if clstart == len(unficl):
1627 1627 break
1628 1628 if all(unficl.hasnode(n) for n in pullop.rheads):
1629 1629 break
1630 1630 new_heads = headsofdiff(unficl.heads(), old_heads)
1631 1631 pullop.common = headsofunion(new_heads, pullop.common)
1632 1632 pullop.rheads = set(pullop.rheads) - pullop.common
1633 1633
1634 1634
1635 1635 def add_confirm_callback(repo, pullop):
1636 1636 """adds a finalize callback to transaction which can be used to show stats
1637 1637 to user and confirm the pull before committing transaction"""
1638 1638
1639 1639 tr = pullop.trmanager.transaction()
1640 1640 scmutil.registersummarycallback(
1641 1641 repo, tr, txnname=b'pull', as_validator=True
1642 1642 )
1643 1643 reporef = weakref.ref(repo.unfiltered())
1644 1644
1645 1645 def prompt(tr):
1646 1646 repo = reporef()
1647 1647 cm = _(b'accept incoming changes (yn)?$$ &Yes $$ &No')
1648 1648 if repo.ui.promptchoice(cm):
1649 1649 raise error.Abort(b"user aborted")
1650 1650
1651 1651 tr.addvalidator(b'900-pull-prompt', prompt)
1652 1652
1653 1653
1654 1654 def pull(
1655 1655 repo,
1656 1656 remote,
1657 1657 path=None,
1658 1658 heads=None,
1659 1659 force=False,
1660 1660 bookmarks=(),
1661 1661 opargs=None,
1662 1662 streamclonerequested=None,
1663 1663 includepats=None,
1664 1664 excludepats=None,
1665 1665 depth=None,
1666 1666 confirm=None,
1667 1667 ):
1668 1668 """Fetch repository data from a remote.
1669 1669
1670 1670 This is the main function used to retrieve data from a remote repository.
1671 1671
1672 1672 ``repo`` is the local repository to clone into.
1673 1673 ``remote`` is a peer instance.
1674 1674 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1675 1675 default) means to pull everything from the remote.
1676 1676 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1677 1677 default, all remote bookmarks are pulled.
1678 1678 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1679 1679 initialization.
1680 1680 ``streamclonerequested`` is a boolean indicating whether a "streaming
1681 1681 clone" is requested. A "streaming clone" is essentially a raw file copy
1682 1682 of revlogs from the server. This only works when the local repository is
1683 1683 empty. The default value of ``None`` means to respect the server
1684 1684 configuration for preferring stream clones.
1685 1685 ``includepats`` and ``excludepats`` define explicit file patterns to
1686 1686 include and exclude in storage, respectively. If not defined, narrow
1687 1687 patterns from the repo instance are used, if available.
1688 1688 ``depth`` is an integer indicating the DAG depth of history we're
1689 1689 interested in. If defined, for each revision specified in ``heads``, we
1690 1690 will fetch up to this many of its ancestors and data associated with them.
1691 1691 ``confirm`` is a boolean indicating whether the pull should be confirmed
1692 1692 before committing the transaction. This overrides HGPLAIN.
1693 1693
1694 1694 Returns the ``pulloperation`` created for this pull.
1695 1695 """
1696 1696 if opargs is None:
1697 1697 opargs = {}
1698 1698
1699 1699 # We allow the narrow patterns to be passed in explicitly to provide more
1700 1700 # flexibility for API consumers.
1701 1701 if includepats is not None or excludepats is not None:
1702 1702 includepats = includepats or set()
1703 1703 excludepats = excludepats or set()
1704 1704 else:
1705 1705 includepats, excludepats = repo.narrowpats
1706 1706
1707 1707 narrowspec.validatepatterns(includepats)
1708 1708 narrowspec.validatepatterns(excludepats)
1709 1709
1710 1710 pullop = pulloperation(
1711 1711 repo,
1712 1712 remote,
1713 1713 path=path,
1714 1714 heads=heads,
1715 1715 force=force,
1716 1716 bookmarks=bookmarks,
1717 1717 streamclonerequested=streamclonerequested,
1718 1718 includepats=includepats,
1719 1719 excludepats=excludepats,
1720 1720 depth=depth,
1721 1721 **pycompat.strkwargs(opargs),
1722 1722 )
1723 1723
1724 1724 peerlocal = pullop.remote.local()
1725 1725 if peerlocal:
1726 1726 missing = set(peerlocal.requirements) - pullop.repo.supported
1727 1727 if missing:
1728 1728 msg = _(
1729 1729 b"required features are not"
1730 1730 b" supported in the destination:"
1731 1731 b" %s"
1732 1732 ) % (b', '.join(sorted(missing)))
1733 1733 raise error.Abort(msg)
1734 1734
1735 1735 for category in repo._wanted_sidedata:
1736 1736 # Check that a computer is registered for that category for at least
1737 1737 # one revlog kind.
1738 1738 for kind, computers in repo._sidedata_computers.items():
1739 1739 if computers.get(category):
1740 1740 break
1741 1741 else:
1742 1742 # This should never happen since repos are supposed to be able to
1743 1743 # generate the sidedata they require.
1744 1744 raise error.ProgrammingError(
1745 1745 _(
1746 1746 b'sidedata category requested by local side without local'
1747 1747 b"support: '%s'"
1748 1748 )
1749 1749 % pycompat.bytestr(category)
1750 1750 )
1751 1751
1752 1752 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1753 1753 wlock = util.nullcontextmanager
1754 1754 if not bookmod.bookmarksinstore(repo):
1755 1755 wlock = repo.wlock
1756 1756 with wlock(), repo.lock(), pullop.trmanager:
1757 1757 if confirm or (
1758 1758 repo.ui.configbool(b"pull", b"confirm") and not repo.ui.plain()
1759 1759 ):
1760 1760 add_confirm_callback(repo, pullop)
1761 1761
1762 1762 # This should ideally be in _pullbundle2(). However, it needs to run
1763 1763 # before discovery to avoid extra work.
1764 1764 _maybeapplyclonebundle(pullop)
1765 1765 streamclone.maybeperformlegacystreamclone(pullop)
1766 1766 _pulldiscovery(pullop)
1767 1767 if pullop.canusebundle2:
1768 1768 _fullpullbundle2(repo, pullop)
1769 1769 _pullchangeset(pullop)
1770 1770 _pullphase(pullop)
1771 1771 _pullbookmarks(pullop)
1772 1772 _pullobsolete(pullop)
1773 1773
1774 1774 # storing remotenames
1775 1775 if repo.ui.configbool(b'experimental', b'remotenames'):
1776 1776 logexchange.pullremotenames(repo, remote)
1777 1777
1778 1778 return pullop
1779 1779
1780 1780
1781 1781 # list of steps to perform discovery before pull
1782 1782 pulldiscoveryorder = []
1783 1783
1784 1784 # Mapping between step name and function
1785 1785 #
1786 1786 # This exists to help extensions wrap steps if necessary
1787 1787 pulldiscoverymapping = {}
1788 1788
1789 1789
1790 1790 def pulldiscovery(stepname):
1791 1791 """decorator for function performing discovery before pull
1792 1792
1793 1793 The function is added to the step -> function mapping and appended to the
1794 1794 list of steps. Beware that decorated function will be added in order (this
1795 1795 may matter).
1796 1796
1797 1797 You can only use this decorator for a new step, if you want to wrap a step
1798 1798 from an extension, change the pulldiscovery dictionary directly."""
1799 1799
1800 1800 def dec(func):
1801 1801 assert stepname not in pulldiscoverymapping
1802 1802 pulldiscoverymapping[stepname] = func
1803 1803 pulldiscoveryorder.append(stepname)
1804 1804 return func
1805 1805
1806 1806 return dec
1807 1807
1808 1808
1809 1809 def _pulldiscovery(pullop):
1810 1810 """Run all discovery steps"""
1811 1811 for stepname in pulldiscoveryorder:
1812 1812 step = pulldiscoverymapping[stepname]
1813 1813 step(pullop)
1814 1814
1815 1815
1816 1816 @pulldiscovery(b'b1:bookmarks')
1817 1817 def _pullbookmarkbundle1(pullop):
1818 1818 """fetch bookmark data in bundle1 case
1819 1819
1820 1820 If not using bundle2, we have to fetch bookmarks before changeset
1821 1821 discovery to reduce the chance and impact of race conditions."""
1822 1822 if pullop.remotebookmarks is not None:
1823 1823 return
1824 1824 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1825 1825 # all known bundle2 servers now support listkeys, but lets be nice with
1826 1826 # new implementation.
1827 1827 return
1828 1828 books = listkeys(pullop.remote, b'bookmarks')
1829 1829 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1830 1830
1831 1831
1832 1832 @pulldiscovery(b'changegroup')
1833 1833 def _pulldiscoverychangegroup(pullop):
1834 1834 """discovery phase for the pull
1835 1835
1836 1836 Current handle changeset discovery only, will change handle all discovery
1837 1837 at some point."""
1838 1838 tmp = discovery.findcommonincoming(
1839 1839 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1840 1840 )
1841 1841 common, fetch, rheads = tmp
1842 1842 has_node = pullop.repo.unfiltered().changelog.index.has_node
1843 1843 if fetch and rheads:
1844 1844 # If a remote heads is filtered locally, put in back in common.
1845 1845 #
1846 1846 # This is a hackish solution to catch most of "common but locally
1847 1847 # hidden situation". We do not performs discovery on unfiltered
1848 1848 # repository because it end up doing a pathological amount of round
1849 1849 # trip for w huge amount of changeset we do not care about.
1850 1850 #
1851 1851 # If a set of such "common but filtered" changeset exist on the server
1852 1852 # but are not including a remote heads, we'll not be able to detect it,
1853 1853 scommon = set(common)
1854 1854 for n in rheads:
1855 1855 if has_node(n):
1856 1856 if n not in scommon:
1857 1857 common.append(n)
1858 1858 if set(rheads).issubset(set(common)):
1859 1859 fetch = []
1860 1860 pullop.common = common
1861 1861 pullop.fetch = fetch
1862 1862 pullop.rheads = rheads
1863 1863
1864 1864
1865 1865 def _pullbundle2(pullop):
1866 1866 """pull data using bundle2
1867 1867
1868 1868 For now, the only supported data are changegroup."""
1869 1869 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1870 1870
1871 1871 # make ui easier to access
1872 1872 ui = pullop.repo.ui
1873 1873
1874 1874 # At the moment we don't do stream clones over bundle2. If that is
1875 1875 # implemented then here's where the check for that will go.
1876 1876 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1877 1877
1878 1878 # declare pull perimeters
1879 1879 kwargs[b'common'] = pullop.common
1880 1880 kwargs[b'heads'] = pullop.heads or pullop.rheads
1881 1881
1882 1882 # check server supports narrow and then adding includepats and excludepats
1883 1883 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1884 1884 if servernarrow and pullop.includepats:
1885 1885 kwargs[b'includepats'] = pullop.includepats
1886 1886 if servernarrow and pullop.excludepats:
1887 1887 kwargs[b'excludepats'] = pullop.excludepats
1888 1888
1889 1889 if streaming:
1890 1890 kwargs[b'cg'] = False
1891 1891 kwargs[b'stream'] = True
1892 1892 pullop.stepsdone.add(b'changegroup')
1893 1893 pullop.stepsdone.add(b'phases')
1894 1894
1895 1895 else:
1896 1896 # pulling changegroup
1897 1897 pullop.stepsdone.add(b'changegroup')
1898 1898
1899 1899 kwargs[b'cg'] = pullop.fetch
1900 1900
1901 1901 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1902 1902 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1903 1903 if not legacyphase and hasbinaryphase:
1904 1904 kwargs[b'phases'] = True
1905 1905 pullop.stepsdone.add(b'phases')
1906 1906
1907 1907 if b'listkeys' in pullop.remotebundle2caps:
1908 1908 if b'phases' not in pullop.stepsdone:
1909 1909 kwargs[b'listkeys'] = [b'phases']
1910 1910
1911 1911 bookmarksrequested = False
1912 1912 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1913 1913 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1914 1914
1915 1915 if pullop.remotebookmarks is not None:
1916 1916 pullop.stepsdone.add(b'request-bookmarks')
1917 1917
1918 1918 if (
1919 1919 b'request-bookmarks' not in pullop.stepsdone
1920 1920 and pullop.remotebookmarks is None
1921 1921 and not legacybookmark
1922 1922 and hasbinarybook
1923 1923 ):
1924 1924 kwargs[b'bookmarks'] = True
1925 1925 bookmarksrequested = True
1926 1926
1927 1927 if b'listkeys' in pullop.remotebundle2caps:
1928 1928 if b'request-bookmarks' not in pullop.stepsdone:
1929 1929 # make sure to always includes bookmark data when migrating
1930 1930 # `hg incoming --bundle` to using this function.
1931 1931 pullop.stepsdone.add(b'request-bookmarks')
1932 1932 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1933 1933
1934 1934 # If this is a full pull / clone and the server supports the clone bundles
1935 1935 # feature, tell the server whether we attempted a clone bundle. The
1936 1936 # presence of this flag indicates the client supports clone bundles. This
1937 1937 # will enable the server to treat clients that support clone bundles
1938 1938 # differently from those that don't.
1939 1939 if (
1940 1940 pullop.remote.capable(b'clonebundles')
1941 1941 and pullop.heads is None
1942 1942 and list(pullop.common) == [pullop.repo.nullid]
1943 1943 ):
1944 1944 kwargs[b'cbattempted'] = pullop.clonebundleattempted
1945 1945
1946 1946 if streaming:
1947 1947 pullop.repo.ui.status(_(b'streaming all changes\n'))
1948 1948 elif not pullop.fetch:
1949 1949 pullop.repo.ui.status(_(b"no changes found\n"))
1950 1950 pullop.cgresult = 0
1951 1951 else:
1952 1952 if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
1953 1953 pullop.repo.ui.status(_(b"requesting all changes\n"))
1954 1954 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1955 1955 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1956 1956 if obsolete.commonversion(remoteversions) is not None:
1957 1957 kwargs[b'obsmarkers'] = True
1958 1958 pullop.stepsdone.add(b'obsmarkers')
1959 1959 _pullbundle2extraprepare(pullop, kwargs)
1960 1960
1961 1961 remote_sidedata = bundle2.read_remote_wanted_sidedata(pullop.remote)
1962 1962 if remote_sidedata:
1963 1963 kwargs[b'remote_sidedata'] = remote_sidedata
1964 1964
1965 1965 with pullop.remote.commandexecutor() as e:
1966 1966 args = dict(kwargs)
1967 1967 args[b'source'] = b'pull'
1968 1968 bundle = e.callcommand(b'getbundle', args).result()
1969 1969
1970 1970 try:
1971 1971 op = bundle2.bundleoperation(
1972 1972 pullop.repo,
1973 1973 pullop.gettransaction,
1974 1974 source=b'pull',
1975 1975 remote=pullop.remote,
1976 1976 )
1977 1977 op.modes[b'bookmarks'] = b'records'
1978 1978 bundle2.processbundle(
1979 1979 pullop.repo,
1980 1980 bundle,
1981 1981 op=op,
1982 1982 remote=pullop.remote,
1983 1983 )
1984 1984 except bundle2.AbortFromPart as exc:
1985 1985 pullop.repo.ui.error(_(b'remote: abort: %s\n') % exc)
1986 1986 raise error.RemoteError(_(b'pull failed on remote'), hint=exc.hint)
1987 1987 except error.BundleValueError as exc:
1988 1988 raise error.RemoteError(_(b'missing support for %s') % exc)
1989 1989
1990 1990 if pullop.fetch:
1991 1991 pullop.cgresult = bundle2.combinechangegroupresults(op)
1992 1992
1993 1993 # processing phases change
1994 1994 for namespace, value in op.records[b'listkeys']:
1995 1995 if namespace == b'phases':
1996 1996 _pullapplyphases(pullop, value)
1997 1997
1998 1998 # processing bookmark update
1999 1999 if bookmarksrequested:
2000 2000 books = {}
2001 2001 for record in op.records[b'bookmarks']:
2002 2002 books[record[b'bookmark']] = record[b"node"]
2003 2003 pullop.remotebookmarks = books
2004 2004 else:
2005 2005 for namespace, value in op.records[b'listkeys']:
2006 2006 if namespace == b'bookmarks':
2007 2007 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
2008 2008
2009 2009 # bookmark data were either already there or pulled in the bundle
2010 2010 if pullop.remotebookmarks is not None:
2011 2011 _pullbookmarks(pullop)
2012 2012
2013 2013
2014 2014 def _pullbundle2extraprepare(pullop, kwargs):
2015 2015 """hook function so that extensions can extend the getbundle call"""
2016 2016
2017 2017
2018 2018 def _pullchangeset(pullop):
2019 2019 """pull changeset from unbundle into the local repo"""
2020 2020 # We delay the open of the transaction as late as possible so we
2021 2021 # don't open transaction for nothing or you break future useful
2022 2022 # rollback call
2023 2023 if b'changegroup' in pullop.stepsdone:
2024 2024 return
2025 2025 pullop.stepsdone.add(b'changegroup')
2026 2026 if not pullop.fetch:
2027 2027 pullop.repo.ui.status(_(b"no changes found\n"))
2028 2028 pullop.cgresult = 0
2029 2029 return
2030 2030 tr = pullop.gettransaction()
2031 2031 if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
2032 2032 pullop.repo.ui.status(_(b"requesting all changes\n"))
2033 2033 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
2034 2034 # issue1320, avoid a race if remote changed after discovery
2035 2035 pullop.heads = pullop.rheads
2036 2036
2037 2037 if pullop.remote.capable(b'getbundle'):
2038 2038 # TODO: get bundlecaps from remote
2039 2039 cg = pullop.remote.getbundle(
2040 2040 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
2041 2041 )
2042 2042 elif pullop.heads is None:
2043 2043 with pullop.remote.commandexecutor() as e:
2044 2044 cg = e.callcommand(
2045 2045 b'changegroup',
2046 2046 {
2047 2047 b'nodes': pullop.fetch,
2048 2048 b'source': b'pull',
2049 2049 },
2050 2050 ).result()
2051 2051
2052 2052 elif not pullop.remote.capable(b'changegroupsubset'):
2053 2053 raise error.Abort(
2054 2054 _(
2055 2055 b"partial pull cannot be done because "
2056 2056 b"other repository doesn't support "
2057 2057 b"changegroupsubset."
2058 2058 )
2059 2059 )
2060 2060 else:
2061 2061 with pullop.remote.commandexecutor() as e:
2062 2062 cg = e.callcommand(
2063 2063 b'changegroupsubset',
2064 2064 {
2065 2065 b'bases': pullop.fetch,
2066 2066 b'heads': pullop.heads,
2067 2067 b'source': b'pull',
2068 2068 },
2069 2069 ).result()
2070 2070
2071 2071 bundleop = bundle2.applybundle(
2072 2072 pullop.repo,
2073 2073 cg,
2074 2074 tr,
2075 2075 b'pull',
2076 2076 pullop.remote.url(),
2077 2077 remote=pullop.remote,
2078 2078 )
2079 2079 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
2080 2080
2081 2081
2082 2082 def _pullphase(pullop):
2083 2083 # Get remote phases data from remote
2084 2084 if b'phases' in pullop.stepsdone:
2085 2085 return
2086 2086 remotephases = listkeys(pullop.remote, b'phases')
2087 2087 _pullapplyphases(pullop, remotephases)
2088 2088
2089 2089
2090 2090 def _pullapplyphases(pullop, remotephases):
2091 2091 """apply phase movement from observed remote state"""
2092 2092 if b'phases' in pullop.stepsdone:
2093 2093 return
2094 2094 pullop.stepsdone.add(b'phases')
2095 2095 publishing = bool(remotephases.get(b'publishing', False))
2096 2096 if remotephases and not publishing:
2097 2097 unfi = pullop.repo.unfiltered()
2098 2098 to_rev = unfi.changelog.index.rev
2099 2099 to_node = unfi.changelog.node
2100 2100 pulledsubset_revs = [to_rev(n) for n in pullop.pulledsubset]
2101 2101 # remote is new and non-publishing
2102 2102 pheads_revs, _dr = phases.analyze_remote_phases(
2103 2103 pullop.repo,
2104 2104 pulledsubset_revs,
2105 2105 remotephases,
2106 2106 )
2107 2107 pheads = [to_node(r) for r in pheads_revs]
2108 2108 dheads = pullop.pulledsubset
2109 2109 else:
2110 2110 # Remote is old or publishing all common changesets
2111 2111 # should be seen as public
2112 2112 pheads = pullop.pulledsubset
2113 2113 dheads = []
2114 2114 unfi = pullop.repo.unfiltered()
2115 2115 phase = unfi._phasecache.phase
2116 2116 rev = unfi.changelog.index.get_rev
2117 2117 public = phases.public
2118 2118 draft = phases.draft
2119 2119
2120 2120 # exclude changesets already public locally and update the others
2121 2121 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2122 2122 if pheads:
2123 2123 tr = pullop.gettransaction()
2124 2124 phases.advanceboundary(pullop.repo, tr, public, pheads)
2125 2125
2126 2126 # exclude changesets already draft locally and update the others
2127 2127 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2128 2128 if dheads:
2129 2129 tr = pullop.gettransaction()
2130 2130 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2131 2131
2132 2132
2133 2133 def _pullbookmarks(pullop):
2134 2134 """process the remote bookmark information to update the local one"""
2135 2135 if b'bookmarks' in pullop.stepsdone:
2136 2136 return
2137 2137 pullop.stepsdone.add(b'bookmarks')
2138 2138 repo = pullop.repo
2139 2139 remotebookmarks = pullop.remotebookmarks
2140 2140 bookmarks_mode = None
2141 2141 if pullop.remote_path is not None:
2142 2142 bookmarks_mode = pullop.remote_path.bookmarks_mode
2143 2143 bookmod.updatefromremote(
2144 2144 repo.ui,
2145 2145 repo,
2146 2146 remotebookmarks,
2147 2147 pullop.remote.url(),
2148 2148 pullop.gettransaction,
2149 2149 explicit=pullop.explicitbookmarks,
2150 2150 mode=bookmarks_mode,
2151 2151 )
2152 2152
2153 2153
2154 2154 def _pullobsolete(pullop):
2155 2155 """utility function to pull obsolete markers from a remote
2156 2156
2157 2157 The `gettransaction` is function that return the pull transaction, creating
2158 2158 one if necessary. We return the transaction to inform the calling code that
2159 2159 a new transaction have been created (when applicable).
2160 2160
2161 2161 Exists mostly to allow overriding for experimentation purpose"""
2162 2162 if b'obsmarkers' in pullop.stepsdone:
2163 2163 return
2164 2164 pullop.stepsdone.add(b'obsmarkers')
2165 2165 tr = None
2166 2166 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2167 2167 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2168 2168 remoteobs = listkeys(pullop.remote, b'obsolete')
2169 2169 if b'dump0' in remoteobs:
2170 2170 tr = pullop.gettransaction()
2171 2171 markers = []
2172 2172 for key in sorted(remoteobs, reverse=True):
2173 2173 if key.startswith(b'dump'):
2174 2174 data = util.b85decode(remoteobs[key])
2175 2175 version, newmarks = obsolete._readmarkers(data)
2176 2176 markers += newmarks
2177 2177 if markers:
2178 2178 pullop.repo.obsstore.add(tr, markers)
2179 2179 pullop.repo.invalidatevolatilesets()
2180 2180 return tr
2181 2181
2182 2182
2183 2183 def applynarrowacl(repo, kwargs):
2184 2184 """Apply narrow fetch access control.
2185 2185
2186 2186 This massages the named arguments for getbundle wire protocol commands
2187 2187 so requested data is filtered through access control rules.
2188 2188 """
2189 2189 ui = repo.ui
2190 2190 # TODO this assumes existence of HTTP and is a layering violation.
2191 2191 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2192 2192 user_includes = ui.configlist(
2193 2193 _NARROWACL_SECTION,
2194 2194 username + b'.includes',
2195 2195 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2196 2196 )
2197 2197 user_excludes = ui.configlist(
2198 2198 _NARROWACL_SECTION,
2199 2199 username + b'.excludes',
2200 2200 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2201 2201 )
2202 2202 if not user_includes:
2203 2203 raise error.Abort(
2204 2204 _(b"%s configuration for user %s is empty")
2205 2205 % (_NARROWACL_SECTION, username)
2206 2206 )
2207 2207
2208 2208 user_includes = [
2209 2209 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2210 2210 ]
2211 2211 user_excludes = [
2212 2212 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2213 2213 ]
2214 2214
2215 2215 req_includes = set(kwargs.get('includepats', []))
2216 2216 req_excludes = set(kwargs.get('excludepats', []))
2217 2217
2218 2218 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2219 2219 req_includes, req_excludes, user_includes, user_excludes
2220 2220 )
2221 2221
2222 2222 if invalid_includes:
2223 2223 raise error.Abort(
2224 2224 _(b"The following includes are not accessible for %s: %s")
2225 2225 % (username, stringutil.pprint(invalid_includes))
2226 2226 )
2227 2227
2228 2228 new_args = {}
2229 2229 new_args.update(kwargs)
2230 2230 new_args['narrow'] = True
2231 2231 new_args['narrow_acl'] = True
2232 2232 new_args['includepats'] = req_includes
2233 2233 if req_excludes:
2234 2234 new_args['excludepats'] = req_excludes
2235 2235
2236 2236 return new_args
2237 2237
2238 2238
2239 2239 def _computeellipsis(repo, common, heads, known, match, depth=None):
2240 2240 """Compute the shape of a narrowed DAG.
2241 2241
2242 2242 Args:
2243 2243 repo: The repository we're transferring.
2244 2244 common: The roots of the DAG range we're transferring.
2245 2245 May be just [nullid], which means all ancestors of heads.
2246 2246 heads: The heads of the DAG range we're transferring.
2247 2247 match: The narrowmatcher that allows us to identify relevant changes.
2248 2248 depth: If not None, only consider nodes to be full nodes if they are at
2249 2249 most depth changesets away from one of heads.
2250 2250
2251 2251 Returns:
2252 2252 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2253 2253
2254 2254 visitnodes: The list of nodes (either full or ellipsis) which
2255 2255 need to be sent to the client.
2256 2256 relevant_nodes: The set of changelog nodes which change a file inside
2257 2257 the narrowspec. The client needs these as non-ellipsis nodes.
2258 2258 ellipsisroots: A dict of {rev: parents} that is used in
2259 2259 narrowchangegroup to produce ellipsis nodes with the
2260 2260 correct parents.
2261 2261 """
2262 2262 cl = repo.changelog
2263 2263 mfl = repo.manifestlog
2264 2264
2265 2265 clrev = cl.rev
2266 2266
2267 2267 commonrevs = {clrev(n) for n in common} | {nullrev}
2268 2268 headsrevs = {clrev(n) for n in heads}
2269 2269
2270 2270 if depth:
2271 2271 revdepth = {h: 0 for h in headsrevs}
2272 2272
2273 2273 ellipsisheads = collections.defaultdict(set)
2274 2274 ellipsisroots = collections.defaultdict(set)
2275 2275
2276 2276 def addroot(head, curchange):
2277 2277 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2278 2278 ellipsisroots[head].add(curchange)
2279 2279 # Recursively split ellipsis heads with 3 roots by finding the
2280 2280 # roots' youngest common descendant which is an elided merge commit.
2281 2281 # That descendant takes 2 of the 3 roots as its own, and becomes a
2282 2282 # root of the head.
2283 2283 while len(ellipsisroots[head]) > 2:
2284 2284 child, roots = splithead(head)
2285 2285 splitroots(head, child, roots)
2286 2286 head = child # Recurse in case we just added a 3rd root
2287 2287
2288 2288 def splitroots(head, child, roots):
2289 2289 ellipsisroots[head].difference_update(roots)
2290 2290 ellipsisroots[head].add(child)
2291 2291 ellipsisroots[child].update(roots)
2292 2292 ellipsisroots[child].discard(child)
2293 2293
2294 2294 def splithead(head):
2295 2295 r1, r2, r3 = sorted(ellipsisroots[head])
2296 2296 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2297 2297 mid = repo.revs(
2298 2298 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2299 2299 )
2300 2300 for j in mid:
2301 2301 if j == nr2:
2302 2302 return nr2, (nr1, nr2)
2303 2303 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2304 2304 return j, (nr1, nr2)
2305 2305 raise error.Abort(
2306 2306 _(
2307 2307 b'Failed to split up ellipsis node! head: %d, '
2308 2308 b'roots: %d %d %d'
2309 2309 )
2310 2310 % (head, r1, r2, r3)
2311 2311 )
2312 2312
2313 2313 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2314 2314 visit = reversed(missing)
2315 2315 relevant_nodes = set()
2316 2316 visitnodes = [cl.node(m) for m in missing]
2317 2317 required = set(headsrevs) | known
2318 2318 for rev in visit:
2319 2319 clrev = cl.changelogrevision(rev)
2320 2320 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2321 2321 if depth is not None:
2322 2322 curdepth = revdepth[rev]
2323 2323 for p in ps:
2324 2324 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2325 2325 needed = False
2326 2326 shallow_enough = depth is None or revdepth[rev] <= depth
2327 2327 if shallow_enough:
2328 2328 curmf = mfl[clrev.manifest].read()
2329 2329 if ps:
2330 2330 # We choose to not trust the changed files list in
2331 2331 # changesets because it's not always correct. TODO: could
2332 2332 # we trust it for the non-merge case?
2333 2333 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2334 2334 needed = bool(curmf.diff(p1mf, match))
2335 2335 if not needed and len(ps) > 1:
2336 2336 # For merge changes, the list of changed files is not
2337 2337 # helpful, since we need to emit the merge if a file
2338 2338 # in the narrow spec has changed on either side of the
2339 2339 # merge. As a result, we do a manifest diff to check.
2340 2340 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2341 2341 needed = bool(curmf.diff(p2mf, match))
2342 2342 else:
2343 2343 # For a root node, we need to include the node if any
2344 2344 # files in the node match the narrowspec.
2345 2345 needed = any(curmf.walk(match))
2346 2346
2347 2347 if needed:
2348 2348 for head in ellipsisheads[rev]:
2349 2349 addroot(head, rev)
2350 2350 for p in ps:
2351 2351 required.add(p)
2352 2352 relevant_nodes.add(cl.node(rev))
2353 2353 else:
2354 2354 if not ps:
2355 2355 ps = [nullrev]
2356 2356 if rev in required:
2357 2357 for head in ellipsisheads[rev]:
2358 2358 addroot(head, rev)
2359 2359 for p in ps:
2360 2360 ellipsisheads[p].add(rev)
2361 2361 else:
2362 2362 for p in ps:
2363 2363 ellipsisheads[p] |= ellipsisheads[rev]
2364 2364
2365 2365 # add common changesets as roots of their reachable ellipsis heads
2366 2366 for c in commonrevs:
2367 2367 for head in ellipsisheads[c]:
2368 2368 addroot(head, c)
2369 2369 return visitnodes, relevant_nodes, ellipsisroots
2370 2370
2371 2371
2372 2372 def caps20to10(repo, role):
2373 2373 """return a set with appropriate options to use bundle20 during getbundle"""
2374 2374 caps = {b'HG20'}
2375 2375 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2376 2376 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2377 2377 return caps
2378 2378
2379 2379
2380 2380 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2381 2381 getbundle2partsorder = []
2382 2382
2383 2383 # Mapping between step name and function
2384 2384 #
2385 2385 # This exists to help extensions wrap steps if necessary
2386 2386 getbundle2partsmapping = {}
2387 2387
2388 2388
2389 2389 def getbundle2partsgenerator(stepname, idx=None):
2390 2390 """decorator for function generating bundle2 part for getbundle
2391 2391
2392 2392 The function is added to the step -> function mapping and appended to the
2393 2393 list of steps. Beware that decorated functions will be added in order
2394 2394 (this may matter).
2395 2395
2396 2396 You can only use this decorator for new steps, if you want to wrap a step
2397 2397 from an extension, attack the getbundle2partsmapping dictionary directly."""
2398 2398
2399 2399 def dec(func):
2400 2400 assert stepname not in getbundle2partsmapping
2401 2401 getbundle2partsmapping[stepname] = func
2402 2402 if idx is None:
2403 2403 getbundle2partsorder.append(stepname)
2404 2404 else:
2405 2405 getbundle2partsorder.insert(idx, stepname)
2406 2406 return func
2407 2407
2408 2408 return dec
2409 2409
2410 2410
2411 2411 def bundle2requested(bundlecaps):
2412 2412 if bundlecaps is not None:
2413 2413 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2414 2414 return False
2415 2415
2416 2416
2417 2417 def getbundlechunks(
2418 2418 repo,
2419 2419 source,
2420 2420 heads=None,
2421 2421 common=None,
2422 2422 bundlecaps=None,
2423 2423 remote_sidedata=None,
2424 2424 **kwargs,
2425 2425 ):
2426 2426 """Return chunks constituting a bundle's raw data.
2427 2427
2428 2428 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2429 2429 passed.
2430 2430
2431 2431 Returns a 2-tuple of a dict with metadata about the generated bundle
2432 2432 and an iterator over raw chunks (of varying sizes).
2433 2433 """
2434 2434 kwargs = pycompat.byteskwargs(kwargs)
2435 2435 info = {}
2436 2436 usebundle2 = bundle2requested(bundlecaps)
2437 2437 # bundle10 case
2438 2438 if not usebundle2:
2439 2439 if bundlecaps and not kwargs.get(b'cg', True):
2440 2440 raise ValueError(
2441 2441 _(b'request for bundle10 must include changegroup')
2442 2442 )
2443 2443
2444 2444 if kwargs:
2445 2445 raise ValueError(
2446 2446 _(b'unsupported getbundle arguments: %s')
2447 2447 % b', '.join(sorted(kwargs.keys()))
2448 2448 )
2449 2449 outgoing = _computeoutgoing(repo, heads, common)
2450 2450 info[b'bundleversion'] = 1
2451 2451 return (
2452 2452 info,
2453 2453 changegroup.makestream(
2454 2454 repo,
2455 2455 outgoing,
2456 2456 b'01',
2457 2457 source,
2458 2458 bundlecaps=bundlecaps,
2459 2459 remote_sidedata=remote_sidedata,
2460 2460 ),
2461 2461 )
2462 2462
2463 2463 # bundle20 case
2464 2464 info[b'bundleversion'] = 2
2465 2465 b2caps = {}
2466 2466 for bcaps in bundlecaps:
2467 2467 if bcaps.startswith(b'bundle2='):
2468 2468 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2469 2469 b2caps.update(bundle2.decodecaps(blob))
2470 2470 bundler = bundle2.bundle20(repo.ui, b2caps)
2471 2471
2472 2472 kwargs[b'heads'] = heads
2473 2473 kwargs[b'common'] = common
2474 2474
2475 2475 for name in getbundle2partsorder:
2476 2476 func = getbundle2partsmapping[name]
2477 2477 func(
2478 2478 bundler,
2479 2479 repo,
2480 2480 source,
2481 2481 bundlecaps=bundlecaps,
2482 2482 b2caps=b2caps,
2483 2483 remote_sidedata=remote_sidedata,
2484 2484 **pycompat.strkwargs(kwargs),
2485 2485 )
2486 2486
2487 2487 info[b'prefercompressed'] = bundler.prefercompressed
2488 2488
2489 2489 return info, bundler.getchunks()
2490 2490
2491 2491
2492 2492 @getbundle2partsgenerator(b'stream')
2493 2493 def _getbundlestream2(bundler, repo, *args, **kwargs):
2494 2494 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2495 2495
2496 2496
2497 2497 @getbundle2partsgenerator(b'changegroup')
2498 2498 def _getbundlechangegrouppart(
2499 2499 bundler,
2500 2500 repo,
2501 2501 source,
2502 2502 bundlecaps=None,
2503 2503 b2caps=None,
2504 2504 heads=None,
2505 2505 common=None,
2506 2506 remote_sidedata=None,
2507 2507 **kwargs,
2508 2508 ):
2509 2509 """add a changegroup part to the requested bundle"""
2510 2510 if not kwargs.get('cg', True) or not b2caps:
2511 2511 return
2512 2512
2513 2513 version = b'01'
2514 2514 cgversions = b2caps.get(b'changegroup')
2515 2515 if cgversions: # 3.1 and 3.2 ship with an empty value
2516 2516 cgversions = [
2517 2517 v
2518 2518 for v in cgversions
2519 2519 if v in changegroup.supportedoutgoingversions(repo)
2520 2520 ]
2521 2521 if not cgversions:
2522 2522 raise error.Abort(_(b'no common changegroup version'))
2523 2523 version = max(cgversions)
2524 2524
2525 2525 outgoing = _computeoutgoing(repo, heads, common)
2526 2526 if not outgoing.missing:
2527 2527 return
2528 2528
2529 2529 if kwargs.get('narrow', False):
2530 2530 include = sorted(filter(bool, kwargs.get('includepats', [])))
2531 2531 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2532 2532 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2533 2533 else:
2534 2534 matcher = None
2535 2535
2536 2536 cgstream = changegroup.makestream(
2537 2537 repo,
2538 2538 outgoing,
2539 2539 version,
2540 2540 source,
2541 2541 bundlecaps=bundlecaps,
2542 2542 matcher=matcher,
2543 2543 remote_sidedata=remote_sidedata,
2544 2544 )
2545 2545
2546 2546 part = bundler.newpart(b'changegroup', data=cgstream)
2547 2547 if cgversions:
2548 2548 part.addparam(b'version', version)
2549 2549
2550 2550 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2551 2551
2552 2552 if scmutil.istreemanifest(repo):
2553 2553 part.addparam(b'treemanifest', b'1')
2554 2554
2555 2555 if repository.REPO_FEATURE_SIDE_DATA in repo.features:
2556 2556 part.addparam(b'exp-sidedata', b'1')
2557 2557 sidedata = bundle2.format_remote_wanted_sidedata(repo)
2558 2558 part.addparam(b'exp-wanted-sidedata', sidedata)
2559 2559
2560 2560 if (
2561 2561 kwargs.get('narrow', False)
2562 2562 and kwargs.get('narrow_acl', False)
2563 2563 and (include or exclude)
2564 2564 ):
2565 2565 # this is mandatory because otherwise ACL clients won't work
2566 2566 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2567 2567 narrowspecpart.data = b'%s\0%s' % (
2568 2568 b'\n'.join(include),
2569 2569 b'\n'.join(exclude),
2570 2570 )
2571 2571
2572 2572
2573 2573 @getbundle2partsgenerator(b'bookmarks')
2574 2574 def _getbundlebookmarkpart(
2575 2575 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2576 2576 ):
2577 2577 """add a bookmark part to the requested bundle"""
2578 2578 if not kwargs.get('bookmarks', False):
2579 2579 return
2580 2580 if not b2caps or b'bookmarks' not in b2caps:
2581 2581 raise error.Abort(_(b'no common bookmarks exchange method'))
2582 2582 books = bookmod.listbinbookmarks(repo)
2583 2583 data = bookmod.binaryencode(repo, books)
2584 2584 if data:
2585 2585 bundler.newpart(b'bookmarks', data=data)
2586 2586
2587 2587
2588 2588 @getbundle2partsgenerator(b'listkeys')
2589 2589 def _getbundlelistkeysparts(
2590 2590 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2591 2591 ):
2592 2592 """add parts containing listkeys namespaces to the requested bundle"""
2593 2593 listkeys = kwargs.get('listkeys', ())
2594 2594 for namespace in listkeys:
2595 2595 part = bundler.newpart(b'listkeys')
2596 2596 part.addparam(b'namespace', namespace)
2597 2597 keys = repo.listkeys(namespace).items()
2598 2598 part.data = pushkey.encodekeys(keys)
2599 2599
2600 2600
2601 2601 @getbundle2partsgenerator(b'obsmarkers')
2602 2602 def _getbundleobsmarkerpart(
2603 2603 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2604 2604 ):
2605 2605 """add an obsolescence markers part to the requested bundle"""
2606 2606 if kwargs.get('obsmarkers', False):
2607 2607 unfi_cl = repo.unfiltered().changelog
2608 2608 if heads is None:
2609 2609 headrevs = repo.changelog.headrevs()
2610 2610 else:
2611 2611 get_rev = unfi_cl.index.get_rev
2612 2612 headrevs = [get_rev(node) for node in heads]
2613 2613 headrevs = [rev for rev in headrevs if rev is not None]
2614 2614 revs = unfi_cl.ancestors(headrevs, inclusive=True)
2615 2615 markers = repo.obsstore.relevantmarkers(revs=revs)
2616 2616 markers = obsutil.sortedmarkers(markers)
2617 2617 bundle2.buildobsmarkerspart(bundler, markers)
2618 2618
2619 2619
2620 2620 @getbundle2partsgenerator(b'phases')
2621 2621 def _getbundlephasespart(
2622 2622 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2623 2623 ):
2624 2624 """add phase heads part to the requested bundle"""
2625 2625 if kwargs.get('phases', False):
2626 2626 if not b2caps or b'heads' not in b2caps.get(b'phases'):
2627 2627 raise error.Abort(_(b'no common phases exchange method'))
2628 2628 if heads is None:
2629 2629 heads = repo.heads()
2630 2630
2631 2631 headsbyphase = collections.defaultdict(set)
2632 2632 if repo.publishing():
2633 2633 headsbyphase[phases.public] = heads
2634 2634 else:
2635 2635 # find the appropriate heads to move
2636 2636
2637 2637 phase = repo._phasecache.phase
2638 2638 node = repo.changelog.node
2639 2639 rev = repo.changelog.rev
2640 2640 for h in heads:
2641 2641 headsbyphase[phase(repo, rev(h))].add(h)
2642 2642 seenphases = list(headsbyphase.keys())
2643 2643
2644 2644 # We do not handle anything but public and draft phase for now)
2645 2645 if seenphases:
2646 2646 assert max(seenphases) <= phases.draft
2647 2647
2648 2648 # if client is pulling non-public changesets, we need to find
2649 2649 # intermediate public heads.
2650 2650 draftheads = headsbyphase.get(phases.draft, set())
2651 2651 if draftheads:
2652 2652 publicheads = headsbyphase.get(phases.public, set())
2653 2653
2654 2654 revset = b'heads(only(%ln, %ln) and public())'
2655 2655 extraheads = repo.revs(revset, draftheads, publicheads)
2656 2656 for r in extraheads:
2657 2657 headsbyphase[phases.public].add(node(r))
2658 2658
2659 2659 # transform data in a format used by the encoding function
2660 2660 phasemapping = {
2661 2661 phase: sorted(headsbyphase[phase]) for phase in phases.allphases
2662 2662 }
2663 2663
2664 2664 # generate the actual part
2665 2665 phasedata = phases.binaryencode(phasemapping)
2666 2666 bundler.newpart(b'phase-heads', data=phasedata)
2667 2667
2668 2668
2669 2669 @getbundle2partsgenerator(b'hgtagsfnodes')
2670 2670 def _getbundletagsfnodes(
2671 2671 bundler,
2672 2672 repo,
2673 2673 source,
2674 2674 bundlecaps=None,
2675 2675 b2caps=None,
2676 2676 heads=None,
2677 2677 common=None,
2678 2678 **kwargs,
2679 2679 ):
2680 2680 """Transfer the .hgtags filenodes mapping.
2681 2681
2682 2682 Only values for heads in this bundle will be transferred.
2683 2683
2684 2684 The part data consists of pairs of 20 byte changeset node and .hgtags
2685 2685 filenodes raw values.
2686 2686 """
2687 2687 # Don't send unless:
2688 2688 # - changeset are being exchanged,
2689 2689 # - the client supports it.
2690 2690 if not b2caps or not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2691 2691 return
2692 2692
2693 2693 outgoing = _computeoutgoing(repo, heads, common)
2694 2694 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2695 2695
2696 2696
2697 2697 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2698 2698 def _getbundlerevbranchcache(
2699 2699 bundler,
2700 2700 repo,
2701 2701 source,
2702 2702 bundlecaps=None,
2703 2703 b2caps=None,
2704 2704 heads=None,
2705 2705 common=None,
2706 2706 **kwargs,
2707 2707 ):
2708 2708 """Transfer the rev-branch-cache mapping
2709 2709
2710 2710 The payload is a series of data related to each branch
2711 2711
2712 2712 1) branch name length
2713 2713 2) number of open heads
2714 2714 3) number of closed heads
2715 2715 4) open heads nodes
2716 2716 5) closed heads nodes
2717 2717 """
2718 2718 # Don't send unless:
2719 2719 # - changeset are being exchanged,
2720 2720 # - the client supports it.
2721 2721 # - narrow bundle isn't in play (not currently compatible).
2722 2722 if (
2723 2723 not kwargs.get('cg', True)
2724 2724 or not b2caps
2725 2725 or b'rev-branch-cache' not in b2caps
2726 2726 or kwargs.get('narrow', False)
2727 2727 or repo.ui.has_section(_NARROWACL_SECTION)
2728 2728 ):
2729 2729 return
2730 2730
2731 2731 outgoing = _computeoutgoing(repo, heads, common)
2732 2732 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2733 2733
2734 2734
2735 2735 def check_heads(repo, their_heads, context):
2736 2736 """check if the heads of a repo have been modified
2737 2737
2738 2738 Used by peer for unbundling.
2739 2739 """
2740 2740 heads = repo.heads()
2741 2741 heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest()
2742 2742 if not (
2743 2743 their_heads == [b'force']
2744 2744 or their_heads == heads
2745 2745 or their_heads == [b'hashed', heads_hash]
2746 2746 ):
2747 2747 # someone else committed/pushed/unbundled while we
2748 2748 # were transferring data
2749 2749 raise error.PushRaced(
2750 2750 b'repository changed while %s - please try again' % context
2751 2751 )
2752 2752
2753 2753
2754 2754 def unbundle(repo, cg, heads, source, url):
2755 2755 """Apply a bundle to a repo.
2756 2756
2757 2757 this function makes sure the repo is locked during the application and have
2758 2758 mechanism to check that no push race occurred between the creation of the
2759 2759 bundle and its application.
2760 2760
2761 2761 If the push was raced as PushRaced exception is raised."""
2762 2762 r = 0
2763 2763 # need a transaction when processing a bundle2 stream
2764 2764 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2765 2765 lockandtr = [None, None, None]
2766 2766 recordout = None
2767 2767 # quick fix for output mismatch with bundle2 in 3.4
2768 2768 captureoutput = repo.ui.configbool(
2769 2769 b'experimental', b'bundle2-output-capture'
2770 2770 )
2771 2771 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2772 2772 captureoutput = True
2773 2773 try:
2774 2774 # note: outside bundle1, 'heads' is expected to be empty and this
2775 2775 # 'check_heads' call wil be a no-op
2776 2776 check_heads(repo, heads, b'uploading changes')
2777 2777 # push can proceed
2778 2778 if not isinstance(cg, bundle2.unbundle20):
2779 2779 # legacy case: bundle1 (changegroup 01)
2780 2780 txnname = b"\n".join([source, urlutil.hidepassword(url)])
2781 2781 with repo.lock(), repo.transaction(txnname) as tr:
2782 2782 op = bundle2.applybundle(repo, cg, tr, source, url)
2783 2783 r = bundle2.combinechangegroupresults(op)
2784 2784 else:
2785 2785 r = None
2786 2786 try:
2787 2787
2788 2788 def gettransaction():
2789 2789 if not lockandtr[2]:
2790 2790 if not bookmod.bookmarksinstore(repo):
2791 2791 lockandtr[0] = repo.wlock()
2792 2792 lockandtr[1] = repo.lock()
2793 2793 lockandtr[2] = repo.transaction(source)
2794 2794 lockandtr[2].hookargs[b'source'] = source
2795 2795 lockandtr[2].hookargs[b'url'] = url
2796 2796 lockandtr[2].hookargs[b'bundle2'] = b'1'
2797 2797 return lockandtr[2]
2798 2798
2799 2799 # Do greedy locking by default until we're satisfied with lazy
2800 2800 # locking.
2801 2801 if not repo.ui.configbool(
2802 2802 b'experimental', b'bundle2lazylocking'
2803 2803 ):
2804 2804 gettransaction()
2805 2805
2806 2806 op = bundle2.bundleoperation(
2807 2807 repo,
2808 2808 gettransaction,
2809 2809 captureoutput=captureoutput,
2810 2810 source=b'push',
2811 2811 )
2812 2812 try:
2813 2813 op = bundle2.processbundle(repo, cg, op=op)
2814 2814 finally:
2815 2815 r = op.reply
2816 2816 if captureoutput and r is not None:
2817 2817 repo.ui.pushbuffer(error=True, subproc=True)
2818 2818
2819 2819 def recordout(output):
2820 2820 r.newpart(b'output', data=output, mandatory=False)
2821 2821
2822 2822 if lockandtr[2] is not None:
2823 2823 lockandtr[2].close()
2824 2824 except BaseException as exc:
2825 2825 exc.duringunbundle2 = True
2826 2826 if captureoutput and r is not None:
2827 2827 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2828 2828
2829 2829 def recordout(output):
2830 2830 part = bundle2.bundlepart(
2831 2831 b'output', data=output, mandatory=False
2832 2832 )
2833 2833 parts.append(part)
2834 2834
2835 2835 raise
2836 2836 finally:
2837 2837 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2838 2838 if recordout is not None:
2839 2839 recordout(repo.ui.popbuffer())
2840 2840 return r
2841 2841
2842 2842
2843 2843 def _maybeapplyclonebundle(pullop):
2844 2844 """Apply a clone bundle from a remote, if possible."""
2845 2845
2846 2846 repo = pullop.repo
2847 2847 remote = pullop.remote
2848 2848
2849 2849 if not repo.ui.configbool(b'ui', b'clonebundles'):
2850 2850 return
2851 2851
2852 2852 # Only run if local repo is empty.
2853 2853 if len(repo):
2854 2854 return
2855 2855
2856 2856 if pullop.heads:
2857 2857 return
2858 2858
2859 2859 if not remote.capable(b'clonebundles'):
2860 2860 return
2861 2861
2862 2862 with remote.commandexecutor() as e:
2863 2863 res = e.callcommand(b'clonebundles', {}).result()
2864 2864
2865 2865 # If we call the wire protocol command, that's good enough to record the
2866 2866 # attempt.
2867 2867 pullop.clonebundleattempted = True
2868 2868
2869 2869 entries = bundlecaches.parseclonebundlesmanifest(repo, res)
2870 2870 if not entries:
2871 2871 repo.ui.note(
2872 2872 _(
2873 2873 b'no clone bundles available on remote; '
2874 2874 b'falling back to regular clone\n'
2875 2875 )
2876 2876 )
2877 2877 return
2878 2878
2879 2879 entries = bundlecaches.filterclonebundleentries(
2880 2880 repo, entries, streamclonerequested=pullop.streamclonerequested
2881 2881 )
2882 2882
2883 2883 if not entries:
2884 2884 # There is a thundering herd concern here. However, if a server
2885 2885 # operator doesn't advertise bundles appropriate for its clients,
2886 2886 # they deserve what's coming. Furthermore, from a client's
2887 2887 # perspective, no automatic fallback would mean not being able to
2888 2888 # clone!
2889 2889 repo.ui.warn(
2890 2890 _(
2891 2891 b'no compatible clone bundles available on server; '
2892 2892 b'falling back to regular clone\n'
2893 2893 )
2894 2894 )
2895 2895 repo.ui.warn(
2896 2896 _(b'(you may want to report this to the server operator)\n')
2897 2897 )
2898 2898 return
2899 2899
2900 2900 entries = bundlecaches.sortclonebundleentries(repo.ui, entries)
2901 2901
2902 2902 url = entries[0][b'URL']
2903 digest = entries[0].get(b'DIGEST')
2904 if digest:
2905 algorithms = urlmod.digesthandler.digest_algorithms.keys()
2906 preference = dict(zip(algorithms, range(len(algorithms))))
2907 best_entry = None
2908 best_preference = len(preference)
2909 for digest_entry in digest.split(b','):
2910 cur_algo, cur_digest = digest_entry.split(b':')
2911 if cur_algo not in preference:
2912 continue
2913 if preference[cur_algo] < best_preference:
2914 best_entry = digest_entry
2915 best_preference = preference[cur_algo]
2916 digest = best_entry
2917
2903 2918 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2904 if trypullbundlefromurl(repo.ui, repo, url, remote):
2919 if trypullbundlefromurl(repo.ui, repo, url, remote, digest):
2905 2920 repo.ui.status(_(b'finished applying clone bundle\n'))
2906 2921 # Bundle failed.
2907 2922 #
2908 2923 # We abort by default to avoid the thundering herd of
2909 2924 # clients flooding a server that was expecting expensive
2910 2925 # clone load to be offloaded.
2911 2926 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2912 2927 repo.ui.warn(_(b'falling back to normal clone\n'))
2913 2928 else:
2914 2929 raise error.Abort(
2915 2930 _(b'error applying bundle'),
2916 2931 hint=_(
2917 2932 b'if this error persists, consider contacting '
2918 2933 b'the server operator or disable clone '
2919 2934 b'bundles via '
2920 2935 b'"--config ui.clonebundles=false"'
2921 2936 ),
2922 2937 )
2923 2938
2924 2939
2925 2940 def inline_clone_bundle_open(ui, url, peer):
2926 2941 if not peer:
2927 2942 raise error.Abort(_(b'no remote repository supplied for %s' % url))
2928 2943 clonebundleid = url[len(bundlecaches.CLONEBUNDLESCHEME) :]
2929 2944 peerclonebundle = peer.get_cached_bundle_inline(clonebundleid)
2930 2945 return util.chunkbuffer(peerclonebundle)
2931 2946
2932 2947
2933 def trypullbundlefromurl(ui, repo, url, peer):
2948 def trypullbundlefromurl(ui, repo, url, peer, digest):
2934 2949 """Attempt to apply a bundle from a URL."""
2935 2950 with repo.lock(), repo.transaction(b'bundleurl') as tr:
2936 2951 try:
2937 2952 if url.startswith(bundlecaches.CLONEBUNDLESCHEME):
2938 2953 fh = inline_clone_bundle_open(ui, url, peer)
2939 2954 else:
2940 fh = urlmod.open(ui, url)
2955 fh = urlmod.open(ui, url, digest=digest)
2941 2956 cg = readbundle(ui, fh, b'stream')
2942 2957
2943 2958 if isinstance(cg, streamclone.streamcloneapplier):
2944 2959 cg.apply(repo)
2945 2960 else:
2946 2961 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
2947 2962 return True
2948 2963 except urlerr.httperror as e:
2949 2964 ui.warn(
2950 2965 _(b'HTTP error fetching bundle: %s\n')
2951 2966 % stringutil.forcebytestr(e)
2952 2967 )
2953 2968 except urlerr.urlerror as e:
2954 2969 ui.warn(
2955 2970 _(b'error fetching bundle: %s\n')
2956 2971 % stringutil.forcebytestr(e.reason)
2957 2972 )
2958 2973
2959 2974 return False
@@ -1,661 +1,731
1 1 # url.py - HTTP handling for mercurial
2 2 #
3 3 # Copyright 2005, 2006, 2007, 2008 Olivia Mackall <olivia@selenic.com>
4 4 # Copyright 2006, 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 from __future__ import annotations
11 11
12 12 import base64
13 import hashlib
13 14 import socket
14 15
15 16 from .i18n import _
17 from .node import hex
16 18 from . import (
17 19 encoding,
18 20 error,
19 21 httpconnection as httpconnectionmod,
20 22 keepalive,
21 23 pycompat,
22 24 sslutil,
23 25 urllibcompat,
24 26 util,
25 27 )
26 28 from .utils import (
27 29 stringutil,
28 30 urlutil,
29 31 )
30 32
31 33 httplib = util.httplib
32 34 stringio = util.stringio
33 35 urlerr = util.urlerr
34 36 urlreq = util.urlreq
35 37
36 38
37 39 def escape(s, quote=None):
38 40 """Replace special characters "&", "<" and ">" to HTML-safe sequences.
39 41 If the optional flag quote is true, the quotation mark character (")
40 42 is also translated.
41 43
42 44 This is the same as cgi.escape in Python, but always operates on
43 45 bytes, whereas cgi.escape in Python 3 only works on unicodes.
44 46 """
45 47 s = s.replace(b"&", b"&amp;")
46 48 s = s.replace(b"<", b"&lt;")
47 49 s = s.replace(b">", b"&gt;")
48 50 if quote:
49 51 s = s.replace(b'"', b"&quot;")
50 52 return s
51 53
52 54
53 55 class passwordmgr:
54 56 def __init__(self, ui, passwddb):
55 57 self.ui = ui
56 58 self.passwddb = passwddb
57 59
58 60 def add_password(self, realm, uri, user, passwd):
59 61 return self.passwddb.add_password(realm, uri, user, passwd)
60 62
61 63 def find_user_password(self, realm, authuri):
62 64 assert isinstance(realm, (type(None), str))
63 65 assert isinstance(authuri, str)
64 66 authinfo = self.passwddb.find_user_password(realm, authuri)
65 67 user, passwd = authinfo
66 68 user, passwd = pycompat.bytesurl(user), pycompat.bytesurl(passwd)
67 69 if user and passwd:
68 70 self._writedebug(user, passwd)
69 71 return (pycompat.strurl(user), pycompat.strurl(passwd))
70 72
71 73 if not user or not passwd:
72 74 res = httpconnectionmod.readauthforuri(self.ui, authuri, user)
73 75 if res:
74 76 group, auth = res
75 77 user, passwd = auth.get(b'username'), auth.get(b'password')
76 78 self.ui.debug(b"using auth.%s.* for authentication\n" % group)
77 79 if not user or not passwd:
78 80 u = urlutil.url(pycompat.bytesurl(authuri))
79 81 u.query = None
80 82 if not self.ui.interactive():
81 83 raise error.Abort(
82 84 _(b'http authorization required for %s')
83 85 % urlutil.hidepassword(bytes(u))
84 86 )
85 87
86 88 self.ui.write(
87 89 _(b"http authorization required for %s\n")
88 90 % urlutil.hidepassword(bytes(u))
89 91 )
90 92 self.ui.write(_(b"realm: %s\n") % pycompat.bytesurl(realm))
91 93 if user:
92 94 self.ui.write(_(b"user: %s\n") % user)
93 95 else:
94 96 user = self.ui.prompt(_(b"user:"), default=None)
95 97
96 98 if not passwd:
97 99 passwd = self.ui.getpass()
98 100
99 101 # As of Python 3.8, the default implementation of
100 102 # AbstractBasicAuthHandler.retry_http_basic_auth() assumes the user
101 103 # is set if pw is not None. This means (None, str) is not a valid
102 104 # return type of find_user_password().
103 105 if user is None:
104 106 return None, None
105 107
106 108 self.passwddb.add_password(realm, authuri, user, passwd)
107 109 self._writedebug(user, passwd)
108 110 return (pycompat.strurl(user), pycompat.strurl(passwd))
109 111
110 112 def _writedebug(self, user, passwd):
111 113 msg = _(b'http auth: user %s, password %s\n')
112 114 self.ui.debug(msg % (user, passwd and b'*' * len(passwd) or b'not set'))
113 115
114 116 def find_stored_password(self, authuri):
115 117 return self.passwddb.find_user_password(None, authuri)
116 118
117 119
118 120 class proxyhandler(urlreq.proxyhandler):
119 121 def __init__(self, ui):
120 122 proxyurl = ui.config(b"http_proxy", b"host") or encoding.environ.get(
121 123 b'http_proxy'
122 124 )
123 125 # XXX proxyauthinfo = None
124 126
125 127 if proxyurl:
126 128 # proxy can be proper url or host[:port]
127 129 if not (
128 130 proxyurl.startswith(b'http:') or proxyurl.startswith(b'https:')
129 131 ):
130 132 proxyurl = b'http://' + proxyurl + b'/'
131 133 proxy = urlutil.url(proxyurl)
132 134 if not proxy.user:
133 135 proxy.user = ui.config(b"http_proxy", b"user")
134 136 proxy.passwd = ui.config(b"http_proxy", b"passwd")
135 137
136 138 # see if we should use a proxy for this url
137 139 no_list = [b"localhost", b"127.0.0.1"]
138 140 no_list.extend(
139 141 [p.lower() for p in ui.configlist(b"http_proxy", b"no")]
140 142 )
141 143 no_list.extend(
142 144 [
143 145 p.strip().lower()
144 146 for p in encoding.environ.get(b"no_proxy", b'').split(b',')
145 147 if p.strip()
146 148 ]
147 149 )
148 150 # "http_proxy.always" config is for running tests on localhost
149 151 if ui.configbool(b"http_proxy", b"always"):
150 152 self.no_list = []
151 153 else:
152 154 self.no_list = no_list
153 155
154 156 # Keys and values need to be str because the standard library
155 157 # expects them to be.
156 158 proxyurl = str(proxy)
157 159 proxies = {'http': proxyurl, 'https': proxyurl}
158 160 ui.debug(
159 161 b'proxying through %s\n' % urlutil.hidepassword(bytes(proxy))
160 162 )
161 163 else:
162 164 proxies = {}
163 165
164 166 urlreq.proxyhandler.__init__(self, proxies)
165 167 self.ui = ui
166 168
167 169 def proxy_open(self, req, proxy, type_):
168 170 host = pycompat.bytesurl(urllibcompat.gethost(req)).split(b':')[0]
169 171 for e in self.no_list:
170 172 if host == e:
171 173 return None
172 174 if e.startswith(b'*.') and host.endswith(e[2:]):
173 175 return None
174 176 if e.startswith(b'.') and host.endswith(e[1:]):
175 177 return None
176 178
177 179 return urlreq.proxyhandler.proxy_open(self, req, proxy, type_)
178 180
179 181
180 182 def _gen_sendfile(orgsend):
181 183 def _sendfile(self, data):
182 184 # send a file
183 185 if isinstance(data, httpconnectionmod.httpsendfile):
184 186 # if auth required, some data sent twice, so rewind here
185 187 data.seek(0)
186 188 for chunk in util.filechunkiter(data):
187 189 orgsend(self, chunk)
188 190 else:
189 191 orgsend(self, data)
190 192
191 193 return _sendfile
192 194
193 195
194 196 has_https = hasattr(urlreq, 'httpshandler')
195 197
196 198
197 199 class httpconnection(keepalive.HTTPConnection):
198 200 # must be able to send big bundle as stream.
199 201 send = _gen_sendfile(keepalive.HTTPConnection.send)
200 202
201 203
202 204 # Large parts of this function have their origin from before Python 2.6
203 205 # and could potentially be removed.
204 206 def _generic_start_transaction(handler, h, req):
205 207 tunnel_host = req._tunnel_host
206 208 if tunnel_host:
207 209 if tunnel_host[:7] not in ['http://', 'https:/']:
208 210 tunnel_host = 'https://' + tunnel_host
209 211 new_tunnel = True
210 212 else:
211 213 tunnel_host = urllibcompat.getselector(req)
212 214 new_tunnel = False
213 215
214 216 if new_tunnel or tunnel_host == urllibcompat.getfullurl(req): # has proxy
215 217 u = urlutil.url(pycompat.bytesurl(tunnel_host))
216 218 if new_tunnel or u.scheme == b'https': # only use CONNECT for HTTPS
217 219 h.realhostport = b':'.join([u.host, (u.port or b'443')])
218 220 h.headers = req.headers.copy()
219 221 h.headers.update(handler.parent.addheaders)
220 222 return
221 223
222 224 h.realhostport = None
223 225 h.headers = None
224 226
225 227
226 228 class httphandler(keepalive.HTTPHandler):
227 229 def http_open(self, req):
228 230 return self.do_open(httpconnection, req)
229 231
230 232 def _start_transaction(self, h, req):
231 233 _generic_start_transaction(self, h, req)
232 234 return keepalive.HTTPHandler._start_transaction(self, h, req)
233 235
234 236
235 237 class logginghttphandler(httphandler):
236 238 """HTTP(S) handler that logs socket I/O."""
237 239
238 240 def __init__(self, logfh, name, observeropts, *args, **kwargs):
239 241 super().__init__(*args, **kwargs)
240 242
241 243 self._logfh = logfh
242 244 self._logname = name
243 245 self._observeropts = observeropts
244 246
245 247 def do_open(self, http_class, *args, **kwargs):
246 248 _logfh = self._logfh
247 249 _logname = self._logname
248 250 _observeropts = self._observeropts
249 251
250 252 class logginghttpconnection(http_class):
251 253 def connect(self):
252 254 super().connect()
253 255 self.sock = util.makeloggingsocket(
254 256 _logfh, self.sock, _logname, **_observeropts
255 257 )
256 258
257 259 return super().do_open(logginghttpconnection, *args, **kwargs)
258 260
259 261
260 262 if has_https:
261 263
262 264 def _generic_proxytunnel(self: "httpsconnection"):
263 265 headers = self.headers # pytype: disable=attribute-error
264 266 proxyheaders = {
265 267 pycompat.bytestr(x): pycompat.bytestr(headers[x])
266 268 for x in headers
267 269 if x.lower().startswith('proxy-')
268 270 }
269 271 realhostport = self.realhostport # pytype: disable=attribute-error
270 272 self.send(b'CONNECT %s HTTP/1.0\r\n' % realhostport)
271 273
272 274 for header in proxyheaders.items():
273 275 self.send(b'%s: %s\r\n' % header)
274 276 self.send(b'\r\n')
275 277
276 278 # majority of the following code is duplicated from
277 279 # httplib.HTTPConnection as there are no adequate places to
278 280 # override functions to provide the needed functionality.
279 281
280 282 # pytype: disable=attribute-error
281 283 res = self.response_class(self.sock, method=self._method)
282 284 # pytype: enable=attribute-error
283 285
284 286 while True:
285 287 # pytype: disable=attribute-error
286 288 version, status, reason = res._read_status()
287 289 # pytype: enable=attribute-error
288 290 if status != httplib.CONTINUE:
289 291 break
290 292 # skip lines that are all whitespace
291 293 list(iter(lambda: res.fp.readline().strip(), b''))
292 294
293 295 if status == 200:
294 296 # skip lines until we find a blank line
295 297 list(iter(res.fp.readline, b'\r\n'))
296 298 else:
297 299 self.close()
298 300 raise socket.error(
299 301 "Tunnel connection failed: %d %s" % (status, reason.strip())
300 302 )
301 303
302 304 class httpsconnection(keepalive.HTTPConnection):
303 305 response_class = keepalive.HTTPResponse
304 306 default_port = httplib.HTTPS_PORT
305 307 # must be able to send big bundle as stream.
306 308 send = _gen_sendfile(keepalive.safesend)
307 309 getresponse = keepalive.wrapgetresponse(httplib.HTTPConnection)
308 310
309 311 def __init__(
310 312 self,
311 313 host,
312 314 port=None,
313 315 key_file=None,
314 316 cert_file=None,
315 317 *args,
316 318 **kwargs,
317 319 ):
318 320 keepalive.HTTPConnection.__init__(self, host, port, *args, **kwargs)
319 321 self.key_file = key_file
320 322 self.cert_file = cert_file
321 323
322 324 def connect(self):
323 325 self.sock = socket.create_connection(
324 326 (self.host, self.port), self.timeout
325 327 )
326 328
327 329 host = self.host
328 330 realhostport = self.realhostport # pytype: disable=attribute-error
329 331 if realhostport: # use CONNECT proxy
330 332 _generic_proxytunnel(self)
331 333 host = realhostport.rsplit(b':', 1)[0]
332 334 self.sock = sslutil.wrapsocket(
333 335 self.sock,
334 336 self.key_file,
335 337 self.cert_file,
336 338 ui=self.ui, # pytype: disable=attribute-error
337 339 serverhostname=host,
338 340 )
339 341 sslutil.validatesocket(self.sock)
340 342
341 343 class httpshandler(keepalive.KeepAliveHandler, urlreq.httpshandler):
342 344 def __init__(self, ui, timeout=None):
343 345 keepalive.KeepAliveHandler.__init__(self, timeout=timeout)
344 346 urlreq.httpshandler.__init__(self)
345 347 self.ui = ui
346 348 self.pwmgr = passwordmgr(self.ui, self.ui.httppasswordmgrdb)
347 349
348 350 def _start_transaction(self, h, req):
349 351 _generic_start_transaction(self, h, req)
350 352 return keepalive.KeepAliveHandler._start_transaction(self, h, req)
351 353
352 354 def https_open(self, req):
353 355 # urllibcompat.getfullurl() does not contain credentials
354 356 # and we may need them to match the certificates.
355 357 url = urllibcompat.getfullurl(req)
356 358 user, password = self.pwmgr.find_stored_password(url)
357 359 res = httpconnectionmod.readauthforuri(self.ui, url, user)
358 360 if res:
359 361 group, auth = res
360 362 self.auth = auth
361 363 self.ui.debug(b"using auth.%s.* for authentication\n" % group)
362 364 else:
363 365 self.auth = None
364 366 return self.do_open(self._makeconnection, req)
365 367
366 368 def _makeconnection(self, host, port=None, *args, **kwargs):
367 369 keyfile = None
368 370 certfile = None
369 371
370 372 if len(args) >= 1: # key_file
371 373 keyfile = args[0]
372 374 if len(args) >= 2: # cert_file
373 375 certfile = args[1]
374 376 args = args[2:]
375 377
376 378 # if the user has specified different key/cert files in
377 379 # hgrc, we prefer these
378 380 if self.auth and b'key' in self.auth and b'cert' in self.auth:
379 381 keyfile = self.auth[b'key']
380 382 certfile = self.auth[b'cert']
381 383
382 384 conn = httpsconnection(
383 385 host, port, keyfile, certfile, *args, **kwargs
384 386 )
385 387 conn.ui = self.ui
386 388 return conn
387 389
388 390
389 391 class httpdigestauthhandler(urlreq.httpdigestauthhandler):
390 392 def __init__(self, *args, **kwargs):
391 393 urlreq.httpdigestauthhandler.__init__(self, *args, **kwargs)
392 394 self.retried_req = None
393 395
394 396 def reset_retry_count(self):
395 397 # Python 2.6.5 will call this on 401 or 407 errors and thus loop
396 398 # forever. We disable reset_retry_count completely and reset in
397 399 # http_error_auth_reqed instead.
398 400 pass
399 401
400 402 def http_error_auth_reqed(self, auth_header, host, req, headers):
401 403 # Reset the retry counter once for each request.
402 404 if req is not self.retried_req:
403 405 self.retried_req = req
404 406 self.retried = 0
405 407 return urlreq.httpdigestauthhandler.http_error_auth_reqed(
406 408 self, auth_header, host, req, headers
407 409 )
408 410
409 411
410 412 class httpbasicauthhandler(urlreq.httpbasicauthhandler):
411 413 def __init__(self, *args, **kwargs):
412 414 self.auth = None
413 415 urlreq.httpbasicauthhandler.__init__(self, *args, **kwargs)
414 416 self.retried_req = None
415 417
416 418 def http_request(self, request):
417 419 if self.auth:
418 420 request.add_unredirected_header(self.auth_header, self.auth)
419 421
420 422 return request
421 423
422 424 def https_request(self, request):
423 425 if self.auth:
424 426 request.add_unredirected_header(self.auth_header, self.auth)
425 427
426 428 return request
427 429
428 430 def reset_retry_count(self):
429 431 # Python 2.6.5 will call this on 401 or 407 errors and thus loop
430 432 # forever. We disable reset_retry_count completely and reset in
431 433 # http_error_auth_reqed instead.
432 434 pass
433 435
434 436 def http_error_auth_reqed(self, auth_header, host, req, headers):
435 437 # Reset the retry counter once for each request.
436 438 if req is not self.retried_req:
437 439 self.retried_req = req
438 440 self.retried = 0
439 441 return urlreq.httpbasicauthhandler.http_error_auth_reqed(
440 442 self, auth_header, host, req, headers
441 443 )
442 444
443 445 def retry_http_basic_auth(self, host, req, realm):
444 446 user, pw = self.passwd.find_user_password(
445 447 realm, urllibcompat.getfullurl(req)
446 448 )
447 449 if pw is not None:
448 450 raw = b"%s:%s" % (pycompat.bytesurl(user), pycompat.bytesurl(pw))
449 451 auth = 'Basic %s' % pycompat.strurl(base64.b64encode(raw).strip())
450 452 if req.get_header(self.auth_header, None) == auth:
451 453 return None
452 454 self.auth = auth
453 455 req.add_unredirected_header(self.auth_header, auth)
454 456 return self.parent.open(req)
455 457 else:
456 458 return None
457 459
458 460
459 461 def load_cookiejar(ui):
460 462 cookiefile = ui.config(b'auth', b'cookiefile')
461 463 if not cookiefile:
462 464 return
463 465 cookiefile = util.expandpath(cookiefile)
464 466 try:
465 467 cookiejar = util.cookielib.MozillaCookieJar(
466 468 pycompat.fsdecode(cookiefile)
467 469 )
468 470 cookiejar.load()
469 471 return cookiejar
470 472 except util.cookielib.LoadError as e:
471 473 ui.warn(
472 474 _(
473 475 b'(error loading cookie file %s: %s; continuing without '
474 476 b'cookies)\n'
475 477 )
476 478 % (cookiefile, stringutil.forcebytestr(e))
477 479 )
478 480
479 481
480 482 class readlinehandler(urlreq.basehandler):
481 483 def http_response(self, request, response):
482 484 class readlineresponse(response.__class__):
483 485 def readlines(self, sizehint=0):
484 486 total = 0
485 487 list = []
486 488 while True:
487 489 line = self.readline()
488 490 if not line:
489 491 break
490 492 list.append(line)
491 493 total += len(line)
492 494 if sizehint and total >= sizehint:
493 495 break
494 496 return list
495 497
496 498 response.__class__ = readlineresponse
497 499 return response
498 500
499 501 https_response = http_response
500 502
501 503
504 class digesthandler(urlreq.basehandler):
505 # exchange.py assumes the algorithms are listed in order of preference,
506 # earlier entries are prefered.
507 digest_algorithms = {
508 b'sha256': hashlib.sha256,
509 b'sha512': hashlib.sha512,
510 }
511
512 def __init__(self, digest):
513 if b':' not in digest:
514 raise error.Abort(_(b'invalid digest specification'))
515 algo, checksum = digest.split(b':')
516 if algo not in self.digest_algorithms:
517 raise error.Abort(_(b'unsupported digest algorithm: %s') % algo)
518 self._digest = checksum
519 self._hasher = self.digest_algorithms[algo]()
520
521 def http_response(self, request, response):
522 class digestresponse(response.__class__):
523 def _digest_input(self, data):
524 self._hasher.update(data)
525 self._digest_consumed += len(data)
526 if self._digest_finished:
527 digest = hex(self._hasher.digest())
528 if digest != self._digest:
529 raise error.SecurityError(
530 _(
531 b'file with digest %s expected, but %s found for %d bytes'
532 )
533 % (
534 pycompat.bytestr(self._digest),
535 pycompat.bytestr(digest),
536 self._digest_consumed,
537 )
538 )
539
540 def read(self, amt=None):
541 data = super().read(amt)
542 self._digest_input(data)
543 return data
544
545 def readline(self):
546 data = super().readline()
547 self._digest_input(data)
548 return data
549
550 def readinto(self, dest):
551 got = super().readinto(dest)
552 self._digest_input(dest[:got])
553 return got
554
555 def _close_conn(self):
556 self._digest_finished = True
557 return super().close()
558
559 response.__class__ = digestresponse
560 response._digest = self._digest
561 response._digest_consumed = 0
562 response._hasher = self._hasher.copy()
563 response._digest_finished = False
564 return response
565
566 https_response = http_response
567
568
502 569 handlerfuncs = []
503 570
504 571
505 572 def opener(
506 573 ui,
507 574 authinfo=None,
508 575 useragent=None,
509 576 loggingfh=None,
510 577 loggingname=b's',
511 578 loggingopts=None,
512 579 sendaccept=True,
580 digest=None,
513 581 ):
514 582 """
515 583 construct an opener suitable for urllib2
516 584 authinfo will be added to the password manager
517 585
518 586 The opener can be configured to log socket events if the various
519 587 ``logging*`` arguments are specified.
520 588
521 589 ``loggingfh`` denotes a file object to log events to.
522 590 ``loggingname`` denotes the name of the to print when logging.
523 591 ``loggingopts`` is a dict of keyword arguments to pass to the constructed
524 592 ``util.socketobserver`` instance.
525 593
526 594 ``sendaccept`` allows controlling whether the ``Accept`` request header
527 595 is sent. The header is sent by default.
528 596 """
529 597 timeout = ui.configwith(float, b'http', b'timeout')
530 598 handlers = []
531 599
532 600 if loggingfh:
533 601 handlers.append(
534 602 logginghttphandler(
535 603 loggingfh, loggingname, loggingopts or {}, timeout=timeout
536 604 )
537 605 )
538 606 else:
539 607 handlers.append(httphandler(timeout=timeout))
540 608 if has_https:
541 609 # pytype get confused about the conditional existence for httpshandler here.
542 610 handlers.append(
543 611 httpshandler(ui, timeout=timeout) # pytype: disable=name-error
544 612 )
545 613
546 614 handlers.append(proxyhandler(ui))
547 615
548 616 passmgr = passwordmgr(ui, ui.httppasswordmgrdb)
549 617 if authinfo is not None:
550 618 realm, uris, user, passwd = authinfo
551 619 saveduser, savedpass = passmgr.find_stored_password(uris[0])
552 620 if user != saveduser or passwd:
553 621 passmgr.add_password(realm, uris, user, passwd)
554 622 ui.debug(
555 623 b'http auth: user %s, password %s\n'
556 624 % (user, passwd and b'*' * len(passwd) or b'not set')
557 625 )
558 626
559 627 handlers.extend(
560 628 (httpbasicauthhandler(passmgr), httpdigestauthhandler(passmgr))
561 629 )
562 630 handlers.extend([h(ui, passmgr) for h in handlerfuncs])
563 631 handlers.append(urlreq.httpcookieprocessor(cookiejar=load_cookiejar(ui)))
564 632 handlers.append(readlinehandler())
633 if digest:
634 handlers.append(digesthandler(digest))
565 635 opener = urlreq.buildopener(*handlers)
566 636
567 637 # keepalive.py's handlers will populate these attributes if they exist.
568 638 opener.requestscount = 0
569 639 opener.sentbytescount = 0
570 640 opener.receivedbytescount = 0
571 641
572 642 # The user agent should should *NOT* be used by servers for e.g.
573 643 # protocol detection or feature negotiation: there are other
574 644 # facilities for that.
575 645 #
576 646 # "mercurial/proto-1.0" was the original user agent string and
577 647 # exists for backwards compatibility reasons.
578 648 #
579 649 # The "(Mercurial %s)" string contains the distribution
580 650 # name and version. Other client implementations should choose their
581 651 # own distribution name. Since servers should not be using the user
582 652 # agent string for anything, clients should be able to define whatever
583 653 # user agent they deem appropriate.
584 654 #
585 655 # The custom user agent is for lfs, because unfortunately some servers
586 656 # do look at this value.
587 657 if not useragent:
588 658 agent = b'mercurial/proto-1.0 (Mercurial %s)' % util.version()
589 659 opener.addheaders = [('User-agent', pycompat.sysstr(agent))]
590 660 else:
591 661 opener.addheaders = [('User-agent', pycompat.sysstr(useragent))]
592 662
593 663 # This header should only be needed by wire protocol requests. But it has
594 664 # been sent on all requests since forever. We keep sending it for backwards
595 665 # compatibility reasons. Modern versions of the wire protocol use
596 666 # X-HgProto-<N> for advertising client support.
597 667 if sendaccept:
598 668 opener.addheaders.append(('Accept', 'application/mercurial-0.1'))
599 669
600 670 return opener
601 671
602 672
603 def open(ui, url_, data=None, sendaccept=True):
673 def open(ui, url_, data=None, sendaccept=True, digest=None):
604 674 u = urlutil.url(url_)
605 675 if u.scheme:
606 676 u.scheme = u.scheme.lower()
607 677 url_, authinfo = u.authinfo()
608 678 else:
609 679 path = util.normpath(util.abspath(url_))
610 680 url_ = b'file://' + pycompat.bytesurl(
611 681 urlreq.pathname2url(pycompat.fsdecode(path))
612 682 )
613 683 authinfo = None
614 return opener(ui, authinfo, sendaccept=sendaccept).open(
684 return opener(ui, authinfo, sendaccept=sendaccept, digest=digest).open(
615 685 pycompat.strurl(url_), data
616 686 )
617 687
618 688
619 689 def wrapresponse(resp):
620 690 """Wrap a response object with common error handlers.
621 691
622 692 This ensures that any I/O from any consumer raises the appropriate
623 693 error and messaging.
624 694 """
625 695 origread = resp.read
626 696
627 697 class readerproxy(resp.__class__):
628 698 def read(self, size=None):
629 699 try:
630 700 return origread(size)
631 701 except httplib.IncompleteRead as e:
632 702 # e.expected is an integer if length known or None otherwise.
633 703 if e.expected:
634 704 got = len(e.partial)
635 705 total = e.expected + got
636 706 msg = _(
637 707 b'HTTP request error (incomplete response; '
638 708 b'expected %d bytes got %d)'
639 709 ) % (total, got)
640 710 else:
641 711 msg = _(b'HTTP request error (incomplete response)')
642 712
643 713 raise error.PeerTransportError(
644 714 msg,
645 715 hint=_(
646 716 b'this may be an intermittent network failure; '
647 717 b'if the error persists, consider contacting the '
648 718 b'network or server operator'
649 719 ),
650 720 )
651 721 except httplib.HTTPException as e:
652 722 raise error.PeerTransportError(
653 723 _(b'HTTP request error (%s)') % e,
654 724 hint=_(
655 725 b'this may be an intermittent network failure; '
656 726 b'if the error persists, consider contacting the '
657 727 b'network or server operator'
658 728 ),
659 729 )
660 730
661 731 resp.__class__ = readerproxy
@@ -1,855 +1,915
1 1 #require no-reposimplestore no-chg
2 2
3 3 Set up a server
4 4
5 5 $ hg init server
6 6 $ cd server
7 7 $ cat >> .hg/hgrc << EOF
8 8 > [extensions]
9 9 > clonebundles =
10 10 > EOF
11 11
12 12 $ touch foo
13 13 $ hg -q commit -A -m 'add foo'
14 14 $ touch bar
15 15 $ hg -q commit -A -m 'add bar'
16 16
17 17 $ hg serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
18 18 $ cat hg.pid >> $DAEMON_PIDS
19 19 $ cd ..
20 20
21 21 Missing manifest should not result in server lookup
22 22
23 23 $ hg --verbose clone -U http://localhost:$HGPORT no-manifest
24 24 requesting all changes
25 25 adding changesets
26 26 adding manifests
27 27 adding file changes
28 28 added 2 changesets with 2 changes to 2 files
29 29 new changesets 53245c60e682:aaff8d2ffbbf
30 30 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
31 31
32 32 $ cat server/access.log
33 33 * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
34 34 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
35 35 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=1&common=0000000000000000000000000000000000000000&heads=aaff8d2ffbbf07a46dd1f05d8ae7877e3f56e2a2&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
36 36
37 37 Empty manifest file results in retrieval
38 38 (the extension only checks if the manifest file exists)
39 39
40 40 $ touch server/.hg/clonebundles.manifest
41 41 $ hg --verbose clone -U http://localhost:$HGPORT empty-manifest
42 42 no clone bundles available on remote; falling back to regular clone
43 43 requesting all changes
44 44 adding changesets
45 45 adding manifests
46 46 adding file changes
47 47 added 2 changesets with 2 changes to 2 files
48 48 new changesets 53245c60e682:aaff8d2ffbbf
49 49 (sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
50 50
51 51 Manifest file with invalid URL aborts
52 52
53 53 $ echo 'http://does.not.exist/bundle.hg' > server/.hg/clonebundles.manifest
54 54 $ hg clone http://localhost:$HGPORT 404-url
55 55 applying clone bundle from http://does.not.exist/bundle.hg
56 56 error fetching bundle: (.* not known|(\[Errno -?\d+] )?([Nn]o address associated with (host)?name|Temporary failure in name resolution|Name does not resolve)) (re) (no-windows !)
57 57 error fetching bundle: [Errno 1100*] getaddrinfo failed (glob) (windows !)
58 58 abort: error applying bundle
59 59 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
60 60 [255]
61 61
62 62 Manifest file with URL with unknown scheme skips the URL
63 63 $ echo 'weirdscheme://does.not.exist/bundle.hg' > server/.hg/clonebundles.manifest
64 64 $ hg clone http://localhost:$HGPORT unknown-scheme
65 65 no compatible clone bundles available on server; falling back to regular clone
66 66 (you may want to report this to the server operator)
67 67 requesting all changes
68 68 adding changesets
69 69 adding manifests
70 70 adding file changes
71 71 added 2 changesets with 2 changes to 2 files
72 72 new changesets 53245c60e682:aaff8d2ffbbf
73 73 updating to branch default
74 74 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
75 75
76 76 Server is not running aborts
77 77
78 78 $ echo "http://localhost:$HGPORT1/bundle.hg" > server/.hg/clonebundles.manifest
79 79 $ hg clone http://localhost:$HGPORT server-not-runner
80 80 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
81 81 error fetching bundle: (.*\$ECONNREFUSED\$|Protocol not supported|(.* )?\$EADDRNOTAVAIL\$|.* No route to host) (re)
82 82 abort: error applying bundle
83 83 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
84 84 [255]
85 85
86 86 Server returns 404
87 87
88 88 $ "$PYTHON" $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
89 89 $ cat http.pid >> $DAEMON_PIDS
90 90 $ hg clone http://localhost:$HGPORT running-404
91 91 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
92 92 HTTP error fetching bundle: HTTP Error 404: File not found
93 93 abort: error applying bundle
94 94 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
95 95 [255]
96 96
97 97 We can override failure to fall back to regular clone
98 98
99 99 $ hg --config ui.clonebundlefallback=true clone -U http://localhost:$HGPORT 404-fallback
100 100 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
101 101 HTTP error fetching bundle: HTTP Error 404: File not found
102 102 falling back to normal clone
103 103 requesting all changes
104 104 adding changesets
105 105 adding manifests
106 106 adding file changes
107 107 added 2 changesets with 2 changes to 2 files
108 108 new changesets 53245c60e682:aaff8d2ffbbf
109 109
110 110 Bundle with partial content works
111 111
112 112 $ hg -R server bundle --type gzip-v1 --base null -r 53245c60e682 partial.hg
113 113 1 changesets found
114 114
115 115 We verify exact bundle content as an extra check against accidental future
116 116 changes. If this output changes, we could break old clients.
117 117
118 118 $ f --size --hexdump partial.hg
119 119 partial.hg: size=207
120 120 0000: 48 47 31 30 47 5a 78 9c 63 60 60 98 17 ac 12 93 |HG10GZx.c``.....|
121 121 0010: f0 ac a9 23 45 70 cb bf 0d 5f 59 4e 4a 7f 79 21 |...#Ep..._YNJ.y!|
122 122 0020: 9b cc 40 24 20 a0 d7 ce 2c d1 38 25 cd 24 25 d5 |..@$ ...,.8%.$%.|
123 123 0030: d8 c2 22 cd 38 d9 24 cd 22 d5 c8 22 cd 24 cd 32 |..".8.$."..".$.2|
124 124 0040: d1 c2 d0 c4 c8 d2 32 d1 38 39 29 c9 34 cd d4 80 |......2.89).4...|
125 125 0050: ab 24 b5 b8 84 cb 40 c1 80 2b 2d 3f 9f 8b 2b 31 |.$....@..+-?..+1|
126 126 0060: 25 45 01 c8 80 9a d2 9b 65 fb e5 9e 45 bf 8d 7f |%E......e...E...|
127 127 0070: 9f c6 97 9f 2b 44 34 67 d9 ec 8e 0f a0 92 0b 75 |....+D4g.......u|
128 128 0080: 41 d6 24 59 18 a4 a4 9a a6 18 1a 5b 98 9b 5a 98 |A.$Y.......[..Z.|
129 129 0090: 9a 18 26 9b a6 19 98 1a 99 99 26 a6 18 9a 98 24 |..&.......&....$|
130 130 00a0: 26 59 a6 25 5a 98 a5 18 a6 24 71 41 35 b1 43 dc |&Y.%Z....$qA5.C.|
131 131 00b0: 16 b2 83 f7 e9 45 8b d2 56 c7 a3 1f 82 52 d7 8a |.....E..V....R..|
132 132 00c0: 78 ed fc d5 76 f1 36 35 dc 05 00 36 ed 5e c7 |x...v.65...6.^.|
133 133
134 134 $ echo "http://localhost:$HGPORT1/partial.hg" > server/.hg/clonebundles.manifest
135 135 $ hg clone -U http://localhost:$HGPORT partial-bundle
136 136 applying clone bundle from http://localhost:$HGPORT1/partial.hg
137 137 adding changesets
138 138 adding manifests
139 139 adding file changes
140 140 added 1 changesets with 1 changes to 1 files
141 141 finished applying clone bundle
142 142 searching for changes
143 143 adding changesets
144 144 adding manifests
145 145 adding file changes
146 146 added 1 changesets with 1 changes to 1 files
147 147 new changesets aaff8d2ffbbf
148 148 1 local changesets published
149 149
150 150 Incremental pull doesn't fetch bundle
151 151
152 152 $ hg clone -r 53245c60e682 -U http://localhost:$HGPORT partial-clone
153 153 adding changesets
154 154 adding manifests
155 155 adding file changes
156 156 added 1 changesets with 1 changes to 1 files
157 157 new changesets 53245c60e682
158 158
159 159 $ cd partial-clone
160 160 $ hg pull
161 161 pulling from http://localhost:$HGPORT/
162 162 searching for changes
163 163 adding changesets
164 164 adding manifests
165 165 adding file changes
166 166 added 1 changesets with 1 changes to 1 files
167 167 new changesets aaff8d2ffbbf
168 168 (run 'hg update' to get a working copy)
169 169 $ cd ..
170 170
171 171 Bundle with full content works
172 172
173 173 $ hg -R server bundle --type gzip-v2 --base null -r tip full.hg
174 174 2 changesets found
175 175
176 176 Again, we perform an extra check against bundle content changes. If this content
177 177 changes, clone bundles produced by new Mercurial versions may not be readable
178 178 by old clients.
179 179
180 180 $ f --size --hexdump full.hg
181 181 full.hg: size=442
182 182 0000: 48 47 32 30 00 00 00 0e 43 6f 6d 70 72 65 73 73 |HG20....Compress|
183 183 0010: 69 6f 6e 3d 47 5a 78 9c 63 60 60 d0 e4 76 f6 70 |ion=GZx.c``..v.p|
184 184 0020: f4 73 77 75 0f f2 0f 0d 60 00 02 46 46 76 26 4e |.swu....`..FFv&N|
185 185 0030: c6 b2 d4 a2 e2 cc fc 3c 03 a3 bc a4 e4 8c c4 bc |.......<........|
186 186 0040: f4 d4 62 23 06 06 e6 19 40 f9 4d c1 2a 31 09 cf |..b#....@.M.*1..|
187 187 0050: 9a 3a 52 04 b7 fc db f0 95 e5 a4 f4 97 17 b2 c9 |.:R.............|
188 188 0060: 0c 14 00 02 e6 d9 99 25 1a a7 a4 99 a4 a4 1a 5b |.......%.......[|
189 189 0070: 58 a4 19 27 9b a4 59 a4 1a 59 a4 99 a4 59 26 5a |X..'..Y..Y...Y&Z|
190 190 0080: 18 9a 18 59 5a 26 1a 27 27 25 99 a6 99 1a 70 95 |...YZ&.''%....p.|
191 191 0090: a4 16 97 70 19 28 18 70 a5 e5 e7 73 71 25 a6 a4 |...p.(.p...sq%..|
192 192 00a0: 28 00 19 20 17 af fa df ab ff 7b 3f fb 92 dc 8b |(.. ......{?....|
193 193 00b0: 1f 62 bb 9e b7 d7 d9 87 3d 5a 44 89 2f b0 99 87 |.b......=ZD./...|
194 194 00c0: ec e2 54 63 43 e3 b4 64 43 73 23 33 43 53 0b 63 |..TcC..dCs#3CS.c|
195 195 00d0: d3 14 23 03 a0 fb 2c 2c 0c d3 80 1e 30 49 49 b1 |..#...,,....0II.|
196 196 00e0: 4c 4a 32 48 33 30 b0 34 42 b8 38 29 b1 08 e2 62 |LJ2H30.4B.8)...b|
197 197 00f0: 20 03 6a ca c2 2c db 2f f7 2c fa 6d fc fb 34 be | .j..,./.,.m..4.|
198 198 0100: fc 5c 21 a2 39 cb 66 77 7c 00 0d c3 59 17 14 58 |.\!.9.fw|...Y..X|
199 199 0110: 49 16 06 29 a9 a6 29 86 c6 16 e6 a6 16 a6 26 86 |I..)..).......&.|
200 200 0120: c9 a6 69 06 a6 46 66 a6 89 29 86 26 26 89 49 96 |..i..Ff..).&&.I.|
201 201 0130: 69 89 16 66 29 86 29 49 5c 20 07 3e 16 fe 23 ae |i..f).)I\ .>..#.|
202 202 0140: 26 da 1c ab 10 1f d1 f8 e3 b3 ef cd dd fc 0c 93 |&...............|
203 203 0150: 88 75 34 36 75 04 82 55 17 14 36 a4 38 10 04 d8 |.u46u..U..6.8...|
204 204 0160: 21 01 9a b1 83 f7 e9 45 8b d2 56 c7 a3 1f 82 52 |!......E..V....R|
205 205 0170: d7 8a 78 ed fc d5 76 f1 36 25 81 89 c7 ad ec 90 |..x...v.6%......|
206 206 0180: 54 47 75 2b 89 48 b1 b2 62 c9 89 c9 19 a9 56 45 |TGu+.H..b.....VE|
207 207 0190: a9 65 ba 49 45 89 79 c9 19 ba 60 01 a0 14 23 58 |.e.IE.y...`...#X|
208 208 01a0: 81 35 c8 7d 40 cc 04 e2 a4 a4 a6 25 96 e6 94 60 |.5.}@......%...`|
209 209 01b0: 33 17 5f 54 00 00 d3 1b 0d 4c |3._T.....L|
210 210
211 211 $ echo "http://localhost:$HGPORT1/full.hg" > server/.hg/clonebundles.manifest
212 212 $ hg clone -U http://localhost:$HGPORT full-bundle
213 213 applying clone bundle from http://localhost:$HGPORT1/full.hg
214 214 adding changesets
215 215 adding manifests
216 216 adding file changes
217 217 added 2 changesets with 2 changes to 2 files
218 218 finished applying clone bundle
219 219 searching for changes
220 220 no changes found
221 221 2 local changesets published
222 222
223 223 Feature works over SSH
224 224
225 225 $ hg clone -U ssh://user@dummy/server ssh-full-clone
226 226 applying clone bundle from http://localhost:$HGPORT1/full.hg
227 227 adding changesets
228 228 adding manifests
229 229 adding file changes
230 230 added 2 changesets with 2 changes to 2 files
231 231 finished applying clone bundle
232 232 searching for changes
233 233 no changes found
234 234 2 local changesets published
235 235
236 236 Inline bundle
237 237 =============
238 238
239 239 Checking bundle retrieved over the wireprotocol
240 240
241 241 Feature works over SSH with inline bundle
242 242 -----------------------------------------
243 243
244 244 $ mkdir server/.hg/bundle-cache/
245 245 $ cp full.hg server/.hg/bundle-cache/
246 246 $ echo "peer-bundle-cache://full.hg" > server/.hg/clonebundles.manifest
247 247 $ hg clone -U ssh://user@dummy/server ssh-inline-clone
248 248 applying clone bundle from peer-bundle-cache://full.hg
249 249 adding changesets
250 250 adding manifests
251 251 adding file changes
252 252 added 2 changesets with 2 changes to 2 files
253 253 finished applying clone bundle
254 254 searching for changes
255 255 no changes found
256 256 2 local changesets published
257 257
258 258 HTTP Supports
259 259 -------------
260 260
261 261 $ hg clone -U http://localhost:$HGPORT http-inline-clone
262 262 applying clone bundle from peer-bundle-cache://full.hg
263 263 adding changesets
264 264 adding manifests
265 265 adding file changes
266 266 added 2 changesets with 2 changes to 2 files
267 267 finished applying clone bundle
268 268 searching for changes
269 269 no changes found
270 270 2 local changesets published
271 271
272 272
273 273 Check local behavior
274 274 --------------------
275 275
276 276 We don't use the clone bundle, but we do not crash either.
277 277
278 278 $ hg clone -U ./server local-inline-clone-default
279 279 $ hg clone -U ./server local-inline-clone-pull --pull
280 280 requesting all changes
281 281 adding changesets
282 282 adding manifests
283 283 adding file changes
284 284 added 2 changesets with 2 changes to 2 files
285 285 new changesets 53245c60e682:aaff8d2ffbbf
286 286
287 287 Pre-transmit Hook
288 288 -----------------
289 289
290 290 Hooks work with inline bundle
291 291
292 292 $ cp server/.hg/hgrc server/.hg/hgrc-beforeinlinehooks
293 293 $ echo "[hooks]" >> server/.hg/hgrc
294 294 $ echo "pretransmit-inline-clone-bundle=echo foo" >> server/.hg/hgrc
295 295 $ hg clone -U ssh://user@dummy/server ssh-inline-clone-hook
296 296 applying clone bundle from peer-bundle-cache://full.hg
297 297 remote: foo
298 298 adding changesets
299 299 adding manifests
300 300 adding file changes
301 301 added 2 changesets with 2 changes to 2 files
302 302 finished applying clone bundle
303 303 searching for changes
304 304 no changes found
305 305 2 local changesets published
306 306
307 307 Hooks can make an inline bundle fail
308 308
309 309 $ cp server/.hg/hgrc-beforeinlinehooks server/.hg/hgrc
310 310 $ echo "[hooks]" >> server/.hg/hgrc
311 311 $ echo "pretransmit-inline-clone-bundle=echo bar && false" >> server/.hg/hgrc
312 312 $ hg clone -U ssh://user@dummy/server ssh-inline-clone-hook-fail
313 313 applying clone bundle from peer-bundle-cache://full.hg
314 314 remote: bar
315 315 remote: abort: pretransmit-inline-clone-bundle hook exited with status 1
316 316 abort: stream ended unexpectedly (got 0 bytes, expected 1)
317 317 [255]
318 318 $ cp server/.hg/hgrc-beforeinlinehooks server/.hg/hgrc
319 319
320 320 Other tests
321 321 ===========
322 322
323 323 Entry with unknown BUNDLESPEC is filtered and not used
324 324
325 325 $ cat > server/.hg/clonebundles.manifest << EOF
326 326 > http://bad.entry1 BUNDLESPEC=UNKNOWN
327 327 > http://bad.entry2 BUNDLESPEC=xz-v1
328 328 > http://bad.entry3 BUNDLESPEC=none-v100
329 329 > http://localhost:$HGPORT1/full.hg BUNDLESPEC=gzip-v2
330 330 > EOF
331 331
332 332 $ hg clone -U http://localhost:$HGPORT filter-unknown-type
333 333 applying clone bundle from http://localhost:$HGPORT1/full.hg
334 334 adding changesets
335 335 adding manifests
336 336 adding file changes
337 337 added 2 changesets with 2 changes to 2 files
338 338 finished applying clone bundle
339 339 searching for changes
340 340 no changes found
341 341 2 local changesets published
342 342
343 343 Automatic fallback when all entries are filtered
344 344
345 345 $ cat > server/.hg/clonebundles.manifest << EOF
346 346 > http://bad.entry BUNDLESPEC=UNKNOWN
347 347 > EOF
348 348
349 349 $ hg clone -U http://localhost:$HGPORT filter-all
350 350 no compatible clone bundles available on server; falling back to regular clone
351 351 (you may want to report this to the server operator)
352 352 requesting all changes
353 353 adding changesets
354 354 adding manifests
355 355 adding file changes
356 356 added 2 changesets with 2 changes to 2 files
357 357 new changesets 53245c60e682:aaff8d2ffbbf
358 358
359 359 We require a Python version that supports SNI. Therefore, URLs requiring SNI
360 360 are not filtered.
361 361
362 362 $ cp full.hg sni.hg
363 363 $ cat > server/.hg/clonebundles.manifest << EOF
364 364 > http://localhost:$HGPORT1/sni.hg REQUIRESNI=true
365 365 > http://localhost:$HGPORT1/full.hg
366 366 > EOF
367 367
368 368 $ hg clone -U http://localhost:$HGPORT sni-supported
369 369 applying clone bundle from http://localhost:$HGPORT1/sni.hg
370 370 adding changesets
371 371 adding manifests
372 372 adding file changes
373 373 added 2 changesets with 2 changes to 2 files
374 374 finished applying clone bundle
375 375 searching for changes
376 376 no changes found
377 377 2 local changesets published
378 378
379 379 Stream clone bundles are supported
380 380
381 381 $ hg -R server debugcreatestreamclonebundle packed.hg
382 382 writing 613 bytes for 5 files (no-rust !)
383 383 writing 739 bytes for 7 files (rust !)
384 384 bundle requirements: generaldelta, revlogv1, sparserevlog (no-rust no-zstd !)
385 385 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog (no-rust zstd !)
386 386 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog (rust !)
387 387
388 388 No bundle spec should work
389 389
390 390 $ cat > server/.hg/clonebundles.manifest << EOF
391 391 > http://localhost:$HGPORT1/packed.hg
392 392 > EOF
393 393
394 394 $ hg clone -U http://localhost:$HGPORT stream-clone-no-spec
395 395 applying clone bundle from http://localhost:$HGPORT1/packed.hg
396 396 5 files to transfer, 613 bytes of data (no-rust !)
397 397 transferred 613 bytes in * seconds (* */sec) (glob) (no-rust !)
398 398 7 files to transfer, 739 bytes of data (rust !)
399 399 transferred 739 bytes in * seconds (* */sec) (glob) (rust !)
400 400 finished applying clone bundle
401 401 searching for changes
402 402 no changes found
403 403
404 404 Bundle spec without parameters should work
405 405
406 406 $ cat > server/.hg/clonebundles.manifest << EOF
407 407 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1
408 408 > EOF
409 409
410 410 $ hg clone -U http://localhost:$HGPORT stream-clone-vanilla-spec
411 411 applying clone bundle from http://localhost:$HGPORT1/packed.hg
412 412 * files to transfer, * bytes of data (glob)
413 413 transferred * bytes in * seconds (* */sec) (glob)
414 414 finished applying clone bundle
415 415 searching for changes
416 416 no changes found
417 417
418 418 Bundle spec with format requirements should work
419 419
420 420 $ cat > server/.hg/clonebundles.manifest << EOF
421 421 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv1
422 422 > EOF
423 423
424 424 $ hg clone -U http://localhost:$HGPORT stream-clone-supported-requirements
425 425 applying clone bundle from http://localhost:$HGPORT1/packed.hg
426 426 * files to transfer, * bytes of data (glob)
427 427 transferred * bytes in * seconds (* */sec) (glob)
428 428 finished applying clone bundle
429 429 searching for changes
430 430 no changes found
431 431
432 432 Stream bundle spec with unknown requirements should be filtered out
433 433
434 434 $ cat > server/.hg/clonebundles.manifest << EOF
435 435 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv42
436 436 > EOF
437 437
438 438 $ hg clone -U http://localhost:$HGPORT stream-clone-unsupported-requirements
439 439 no compatible clone bundles available on server; falling back to regular clone
440 440 (you may want to report this to the server operator)
441 441 requesting all changes
442 442 adding changesets
443 443 adding manifests
444 444 adding file changes
445 445 added 2 changesets with 2 changes to 2 files
446 446 new changesets 53245c60e682:aaff8d2ffbbf
447 447
448 448 Set up manifest for testing preferences
449 449 (Remember, the TYPE does not have to match reality - the URL is
450 450 important)
451 451
452 452 $ cp full.hg gz-a.hg
453 453 $ cp full.hg gz-b.hg
454 454 $ cp full.hg bz2-a.hg
455 455 $ cp full.hg bz2-b.hg
456 456 $ cat > server/.hg/clonebundles.manifest << EOF
457 457 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2 extra=a
458 458 > http://localhost:$HGPORT1/bz2-a.hg BUNDLESPEC=bzip2-v2 extra=a
459 459 > http://localhost:$HGPORT1/gz-b.hg BUNDLESPEC=gzip-v2 extra=b
460 460 > http://localhost:$HGPORT1/bz2-b.hg BUNDLESPEC=bzip2-v2 extra=b
461 461 > EOF
462 462
463 463 Preferring an undefined attribute will take first entry
464 464
465 465 $ hg --config ui.clonebundleprefers=foo=bar clone -U http://localhost:$HGPORT prefer-foo
466 466 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
467 467 adding changesets
468 468 adding manifests
469 469 adding file changes
470 470 added 2 changesets with 2 changes to 2 files
471 471 finished applying clone bundle
472 472 searching for changes
473 473 no changes found
474 474 2 local changesets published
475 475
476 476 Preferring bz2 type will download first entry of that type
477 477
478 478 $ hg --config ui.clonebundleprefers=COMPRESSION=bzip2 clone -U http://localhost:$HGPORT prefer-bz
479 479 applying clone bundle from http://localhost:$HGPORT1/bz2-a.hg
480 480 adding changesets
481 481 adding manifests
482 482 adding file changes
483 483 added 2 changesets with 2 changes to 2 files
484 484 finished applying clone bundle
485 485 searching for changes
486 486 no changes found
487 487 2 local changesets published
488 488
489 489 Preferring multiple values of an option works
490 490
491 491 $ hg --config ui.clonebundleprefers=COMPRESSION=unknown,COMPRESSION=bzip2 clone -U http://localhost:$HGPORT prefer-multiple-bz
492 492 applying clone bundle from http://localhost:$HGPORT1/bz2-a.hg
493 493 adding changesets
494 494 adding manifests
495 495 adding file changes
496 496 added 2 changesets with 2 changes to 2 files
497 497 finished applying clone bundle
498 498 searching for changes
499 499 no changes found
500 500 2 local changesets published
501 501
502 502 Sorting multiple values should get us back to original first entry
503 503
504 504 $ hg --config ui.clonebundleprefers=BUNDLESPEC=unknown,BUNDLESPEC=gzip-v2,BUNDLESPEC=bzip2-v2 clone -U http://localhost:$HGPORT prefer-multiple-gz
505 505 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
506 506 adding changesets
507 507 adding manifests
508 508 adding file changes
509 509 added 2 changesets with 2 changes to 2 files
510 510 finished applying clone bundle
511 511 searching for changes
512 512 no changes found
513 513 2 local changesets published
514 514
515 515 Preferring multiple attributes has correct order
516 516
517 517 $ hg --config ui.clonebundleprefers=extra=b,BUNDLESPEC=bzip2-v2 clone -U http://localhost:$HGPORT prefer-separate-attributes
518 518 applying clone bundle from http://localhost:$HGPORT1/bz2-b.hg
519 519 adding changesets
520 520 adding manifests
521 521 adding file changes
522 522 added 2 changesets with 2 changes to 2 files
523 523 finished applying clone bundle
524 524 searching for changes
525 525 no changes found
526 526 2 local changesets published
527 527
528 528 Test where attribute is missing from some entries
529 529
530 530 $ cat > server/.hg/clonebundles.manifest << EOF
531 531 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
532 532 > http://localhost:$HGPORT1/bz2-a.hg BUNDLESPEC=bzip2-v2
533 533 > http://localhost:$HGPORT1/gz-b.hg BUNDLESPEC=gzip-v2 extra=b
534 534 > http://localhost:$HGPORT1/bz2-b.hg BUNDLESPEC=bzip2-v2 extra=b
535 535 > EOF
536 536
537 537 $ hg --config ui.clonebundleprefers=extra=b clone -U http://localhost:$HGPORT prefer-partially-defined-attribute
538 538 applying clone bundle from http://localhost:$HGPORT1/gz-b.hg
539 539 adding changesets
540 540 adding manifests
541 541 adding file changes
542 542 added 2 changesets with 2 changes to 2 files
543 543 finished applying clone bundle
544 544 searching for changes
545 545 no changes found
546 546 2 local changesets published
547 547
548 548 Test a bad attribute list
549 549
550 550 $ hg --config ui.clonebundleprefers=bad clone -U http://localhost:$HGPORT bad-input
551 551 abort: invalid ui.clonebundleprefers item: bad
552 552 (each comma separated item should be key=value pairs)
553 553 [255]
554 554 $ hg --config ui.clonebundleprefers=key=val,bad,key2=val2 clone \
555 555 > -U http://localhost:$HGPORT bad-input
556 556 abort: invalid ui.clonebundleprefers item: bad
557 557 (each comma separated item should be key=value pairs)
558 558 [255]
559 559
560 560
561 561 Test interaction between clone bundles and --stream
562 562
563 563 A manifest with just a gzip bundle
564 564
565 565 $ cat > server/.hg/clonebundles.manifest << EOF
566 566 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
567 567 > EOF
568 568
569 569 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip
570 570 no compatible clone bundles available on server; falling back to regular clone
571 571 (you may want to report this to the server operator)
572 572 streaming all changes
573 573 * files to transfer, * bytes of data (glob)
574 574 transferred * bytes in * seconds (* */sec) (glob)
575 575
576 576 A manifest with a stream clone but no BUNDLESPEC
577 577
578 578 $ cat > server/.hg/clonebundles.manifest << EOF
579 579 > http://localhost:$HGPORT1/packed.hg
580 580 > EOF
581 581
582 582 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-no-bundlespec
583 583 no compatible clone bundles available on server; falling back to regular clone
584 584 (you may want to report this to the server operator)
585 585 streaming all changes
586 586 * files to transfer, * bytes of data (glob)
587 587 transferred * bytes in * seconds (* */sec) (glob)
588 588
589 589 A manifest with a gzip bundle and a stream clone
590 590
591 591 $ cat > server/.hg/clonebundles.manifest << EOF
592 592 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
593 593 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1
594 594 > EOF
595 595
596 596 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed
597 597 applying clone bundle from http://localhost:$HGPORT1/packed.hg
598 598 * files to transfer, * bytes of data (glob)
599 599 transferred * bytes in * seconds (* */sec) (glob)
600 600 finished applying clone bundle
601 601 searching for changes
602 602 no changes found
603 603
604 604 A manifest with a gzip bundle and stream clone with supported requirements
605 605
606 606 $ cat > server/.hg/clonebundles.manifest << EOF
607 607 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
608 608 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv1
609 609 > EOF
610 610
611 611 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed-requirements
612 612 applying clone bundle from http://localhost:$HGPORT1/packed.hg
613 613 * files to transfer, * bytes of data (glob)
614 614 transferred * bytes in * seconds (* */sec) (glob)
615 615 finished applying clone bundle
616 616 searching for changes
617 617 no changes found
618 618
619 619 A manifest with a gzip bundle and a stream clone with unsupported requirements
620 620
621 621 $ cat > server/.hg/clonebundles.manifest << EOF
622 622 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
623 623 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv42
624 624 > EOF
625 625
626 626 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed-unsupported-requirements
627 627 no compatible clone bundles available on server; falling back to regular clone
628 628 (you may want to report this to the server operator)
629 629 streaming all changes
630 630 * files to transfer, * bytes of data (glob)
631 631 transferred * bytes in * seconds (* */sec) (glob)
632 632
633 633 Test clone bundle retrieved through bundle2
634 634
635 635 $ cat << EOF >> $HGRCPATH
636 636 > [extensions]
637 637 > largefiles=
638 638 > EOF
639 639 $ killdaemons.py
640 640 $ hg -R server serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
641 641 $ cat hg.pid >> $DAEMON_PIDS
642 642
643 643 $ hg -R server debuglfput gz-a.hg
644 644 1f74b3d08286b9b3a16fb3fa185dd29219cbc6ae
645 645
646 646 $ cat > server/.hg/clonebundles.manifest << EOF
647 647 > largefile://1f74b3d08286b9b3a16fb3fa185dd29219cbc6ae BUNDLESPEC=gzip-v2
648 648 > EOF
649 649
650 650 $ hg clone -U http://localhost:$HGPORT largefile-provided --traceback
651 651 applying clone bundle from largefile://1f74b3d08286b9b3a16fb3fa185dd29219cbc6ae
652 652 adding changesets
653 653 adding manifests
654 654 adding file changes
655 655 added 2 changesets with 2 changes to 2 files
656 656 finished applying clone bundle
657 657 searching for changes
658 658 no changes found
659 659 2 local changesets published
660 660 $ killdaemons.py
661 661
662 662 A manifest with a gzip bundle requiring too much memory for a 16MB system and working
663 663 on a 32MB system.
664 664
665 665 $ "$PYTHON" $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
666 666 $ cat http.pid >> $DAEMON_PIDS
667 667 $ hg -R server serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
668 668 $ cat hg.pid >> $DAEMON_PIDS
669 669
670 670 $ cat > server/.hg/clonebundles.manifest << EOF
671 671 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2 REQUIREDRAM=12MB
672 672 > EOF
673 673
674 674 $ hg clone -U --debug --config ui.available-memory=16MB http://localhost:$HGPORT gzip-too-large
675 675 using http://localhost:$HGPORT/
676 676 sending capabilities command
677 677 sending clonebundles_manifest command
678 678 filtering http://localhost:$HGPORT1/gz-a.hg as it needs more than 2/3 of system memory
679 679 no compatible clone bundles available on server; falling back to regular clone
680 680 (you may want to report this to the server operator)
681 681 query 1; heads
682 682 sending batch command
683 683 requesting all changes
684 684 sending getbundle command
685 685 bundle2-input-bundle: with-transaction
686 686 bundle2-input-part: "changegroup" (params: 1 mandatory 1 advisory) supported
687 687 adding changesets
688 688 add changeset 53245c60e682
689 689 add changeset aaff8d2ffbbf
690 690 adding manifests
691 691 adding file changes
692 692 adding bar revisions
693 693 adding foo revisions
694 694 bundle2-input-part: total payload size 936
695 695 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
696 696 bundle2-input-part: "phase-heads" supported
697 697 bundle2-input-part: total payload size 24
698 698 bundle2-input-bundle: 3 parts total
699 699 checking for updated bookmarks
700 700 updating the branch cache
701 701 added 2 changesets with 2 changes to 2 files
702 702 new changesets 53245c60e682:aaff8d2ffbbf
703 703 calling hook changegroup.lfiles: hgext.largefiles.reposetup.checkrequireslfiles
704 704 updating the branch cache
705 705 (sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
706 706
707 707 $ hg clone -U --debug --config ui.available-memory=32MB http://localhost:$HGPORT gzip-too-large2
708 708 using http://localhost:$HGPORT/
709 709 sending capabilities command
710 710 sending clonebundles_manifest command
711 711 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
712 712 bundle2-input-bundle: 1 params with-transaction
713 713 bundle2-input-part: "changegroup" (params: 1 mandatory 1 advisory) supported
714 714 adding changesets
715 715 add changeset 53245c60e682
716 716 add changeset aaff8d2ffbbf
717 717 adding manifests
718 718 adding file changes
719 719 adding bar revisions
720 720 adding foo revisions
721 721 bundle2-input-part: total payload size 920
722 722 bundle2-input-part: "cache:rev-branch-cache" (advisory) supported
723 723 bundle2-input-part: total payload size 59
724 724 bundle2-input-bundle: 2 parts total
725 725 updating the branch cache
726 726 added 2 changesets with 2 changes to 2 files
727 727 finished applying clone bundle
728 728 query 1; heads
729 729 sending batch command
730 730 searching for changes
731 731 all remote heads known locally
732 732 no changes found
733 733 sending getbundle command
734 734 bundle2-input-bundle: with-transaction
735 735 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
736 736 bundle2-input-part: "phase-heads" supported
737 737 bundle2-input-part: total payload size 24
738 738 bundle2-input-bundle: 2 parts total
739 739 checking for updated bookmarks
740 740 2 local changesets published
741 741 calling hook changegroup.lfiles: hgext.largefiles.reposetup.checkrequireslfiles
742 742 updating the branch cache
743 743 (sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
744 744 $ killdaemons.py
745 745
746 Testing a clone bundle with digest
747 ==================================
748
749 $ "$PYTHON" $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
750 $ cat http.pid >> $DAEMON_PIDS
751 $ hg -R server serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
752 $ cat hg.pid >> $DAEMON_PIDS
753
754 $ digest=$("$PYTHON" -c "import hashlib; print (hashlib.sha256(open('gz-a.hg', 'rb').read()).hexdigest())")
755 $ cat > server/.hg/clonebundles.manifest << EOF
756 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2 DIGEST=sha256:${digest}
757 > EOF
758 $ hg clone -U http://localhost:$HGPORT digest-valid
759 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
760 adding changesets
761 adding manifests
762 adding file changes
763 added 2 changesets with 2 changes to 2 files
764 finished applying clone bundle
765 searching for changes
766 no changes found
767 2 local changesets published
768 $ digest_bad=$("$PYTHON" -c "import hashlib; print (hashlib.sha256(open('gz-a.hg', 'rb').read()+b'.').hexdigest())")
769 $ cat > server/.hg/clonebundles.manifest << EOF
770 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2 DIGEST=sha256:${digest_bad}
771 > EOF
772 $ hg clone -U http://localhost:$HGPORT digest-invalid
773 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
774 abort: file with digest [0-9a-f]* expected, but [0-9a-f]* found for [0-9]* bytes (re)
775 [150]
776 $ cat > server/.hg/clonebundles.manifest << EOF
777 > http://localhost:$HGPORT1/bad-a.hg BUNDLESPEC=gzip-v2 DIGEST=sha256:xx
778 > http://localhost:$HGPORT1/bad-b.hg BUNDLESPEC=gzip-v2 DIGEST=xxx:0000
779 > http://localhost:$HGPORT1/bad-c.hg BUNDLESPEC=gzip-v2 DIGEST=sha256:0000
780 > http://localhost:$HGPORT1/bad-d.hg BUNDLESPEC=gzip-v2 DIGEST=xxx:00,xxx:01
781 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2 DIGEST=sha256:${digest_bad}
782 > EOF
783 $ hg clone --debug -U http://localhost:$HGPORT digest-malformed
784 using http://localhost:$HGPORT/
785 sending capabilities command
786 sending clonebundles_manifest command
787 filtering http://localhost:$HGPORT1/bad-a.hg due to a bad DIGEST attribute
788 filtering http://localhost:$HGPORT1/bad-b.hg due to lack of supported digest
789 filtering http://localhost:$HGPORT1/bad-c.hg due to a bad sha256 digest
790 filtering http://localhost:$HGPORT1/bad-d.hg due to conflicting xxx digests
791 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
792 bundle2-input-bundle: 1 params with-transaction
793 bundle2-input-bundle: 0 parts total
794 \(sent [0-9]* HTTP requests and [0-9]* bytes; received [0-9]* bytes in responses\) (re)
795 abort: file with digest [0-9a-f]* expected, but [0-9a-f]* found for [0-9]* bytes (re)
796 [150]
797 $ cat > server/.hg/clonebundles.manifest << EOF
798 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2 DIGEST=sha512:00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000,sha256:0000000000000000000000000000000000000000000000000000000000000000
799 > EOF
800 $ hg clone -U http://localhost:$HGPORT digest-preference
801 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
802 abort: file with digest 0{64} expected, but [0-9a-f]+ found for [0-9]+ bytes (re)
803 [150]
804 $ killdaemons.py
805
746 806 Testing a clone bundles that involves revlog splitting (issue6811)
747 807 ==================================================================
748 808
749 809 $ cat >> $HGRCPATH << EOF
750 810 > [format]
751 811 > revlog-compression=none
752 812 > use-persistent-nodemap=no
753 813 > EOF
754 814
755 815 $ hg init server-revlog-split/
756 816 $ cd server-revlog-split
757 817 $ cat >> .hg/hgrc << EOF
758 818 > [extensions]
759 819 > clonebundles =
760 820 > EOF
761 821 $ echo foo > A
762 822 $ hg add A
763 823 $ hg commit -m 'initial commit'
764 824 IMPORTANT: the revlogs must not be split
765 825 $ ls -1 .hg/store/00manifest.*
766 826 .hg/store/00manifest.i
767 827 $ ls -1 .hg/store/data/_a.*
768 828 .hg/store/data/_a.i
769 829
770 830 do big enough update to split the revlogs
771 831
772 832 $ $TESTDIR/seq.py 100000 > A
773 833 $ mkdir foo
774 834 $ cd foo
775 835 $ touch `$TESTDIR/seq.py 10000`
776 836 $ cd ..
777 837 $ hg add -q foo
778 838 $ hg commit -m 'split the manifest and one filelog'
779 839
780 840 IMPORTANT: now the revlogs must be split
781 841 $ ls -1 .hg/store/00manifest.*
782 842 .hg/store/00manifest.d
783 843 .hg/store/00manifest.i
784 844 $ ls -1 .hg/store/data/_a.*
785 845 .hg/store/data/_a.d
786 846 .hg/store/data/_a.i
787 847
788 848 Add an extra commit on top of that
789 849
790 850 $ echo foo >> A
791 851 $ hg commit -m 'one extra commit'
792 852
793 853 $ cd ..
794 854
795 855 Do a bundle that contains the split, but not the update
796 856
797 857 $ hg bundle --exact --rev '::(default~1)' -R server-revlog-split/ --type gzip-v2 split-test.hg
798 858 2 changesets found
799 859
800 860 $ cat > server-revlog-split/.hg/clonebundles.manifest << EOF
801 861 > http://localhost:$HGPORT1/split-test.hg BUNDLESPEC=gzip-v2
802 862 > EOF
803 863
804 864 start the necessary server
805 865
806 866 $ "$PYTHON" $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
807 867 $ cat http.pid >> $DAEMON_PIDS
808 868 $ hg -R server-revlog-split serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
809 869 $ cat hg.pid >> $DAEMON_PIDS
810 870
811 871 Check that clone works fine
812 872 ===========================
813 873
814 874 Here, the initial clone will trigger a revlog split (which is a bit clowny it
815 875 itself, but whatever). The split revlogs will see additionnal data added to
816 876 them in the subsequent pull. This should not be a problem
817 877
818 878 $ hg clone http://localhost:$HGPORT revlog-split-in-the-bundle
819 879 applying clone bundle from http://localhost:$HGPORT1/split-test.hg
820 880 adding changesets
821 881 adding manifests
822 882 adding file changes
823 883 added 2 changesets with 10002 changes to 10001 files
824 884 finished applying clone bundle
825 885 searching for changes
826 886 adding changesets
827 887 adding manifests
828 888 adding file changes
829 889 added 1 changesets with 1 changes to 1 files
830 890 new changesets e3879eaa1db7
831 891 2 local changesets published
832 892 updating to branch default
833 893 10001 files updated, 0 files merged, 0 files removed, 0 files unresolved
834 894
835 895 check the results
836 896
837 897 $ cd revlog-split-in-the-bundle
838 898 $ f --size .hg/store/00manifest.*
839 899 .hg/store/00manifest.d: size=499037
840 900 .hg/store/00manifest.i: size=192
841 901 $ f --size .hg/store/data/_a.*
842 902 .hg/store/data/_a.d: size=588917
843 903 .hg/store/data/_a.i: size=192
844 904
845 905 manifest should work
846 906
847 907 $ hg files -r tip | wc -l
848 908 \s*10001 (re)
849 909
850 910 file content should work
851 911
852 912 $ hg cat -r tip A | wc -l
853 913 \s*100001 (re)
854 914
855 915
General Comments 0
You need to be logged in to leave comments. Login now