##// END OF EJS Templates
exchange: guard against method invocation on `b2caps=None` args...
Matt Harbison -
r44158:0dbea180 default draft
parent child Browse files
Show More
@@ -1,3084 +1,3085 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import hashlib
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 hex,
16 16 nullid,
17 17 nullrev,
18 18 )
19 19 from .thirdparty import attr
20 20 from . import (
21 21 bookmarks as bookmod,
22 22 bundle2,
23 23 changegroup,
24 24 discovery,
25 25 error,
26 26 exchangev2,
27 27 lock as lockmod,
28 28 logexchange,
29 29 narrowspec,
30 30 obsolete,
31 31 obsutil,
32 32 phases,
33 33 pushkey,
34 34 pycompat,
35 35 scmutil,
36 36 sslutil,
37 37 streamclone,
38 38 url as urlmod,
39 39 util,
40 40 wireprototypes,
41 41 )
42 42 from .interfaces import repository
43 43 from .utils import stringutil
44 44
45 45 urlerr = util.urlerr
46 46 urlreq = util.urlreq
47 47
48 48 _NARROWACL_SECTION = b'narrowacl'
49 49
50 50 # Maps bundle version human names to changegroup versions.
51 51 _bundlespeccgversions = {
52 52 b'v1': b'01',
53 53 b'v2': b'02',
54 54 b'packed1': b's1',
55 55 b'bundle2': b'02', # legacy
56 56 }
57 57
58 58 # Maps bundle version with content opts to choose which part to bundle
59 59 _bundlespeccontentopts = {
60 60 b'v1': {
61 61 b'changegroup': True,
62 62 b'cg.version': b'01',
63 63 b'obsolescence': False,
64 64 b'phases': False,
65 65 b'tagsfnodescache': False,
66 66 b'revbranchcache': False,
67 67 },
68 68 b'v2': {
69 69 b'changegroup': True,
70 70 b'cg.version': b'02',
71 71 b'obsolescence': False,
72 72 b'phases': False,
73 73 b'tagsfnodescache': True,
74 74 b'revbranchcache': True,
75 75 },
76 76 b'packed1': {b'cg.version': b's1'},
77 77 }
78 78 _bundlespeccontentopts[b'bundle2'] = _bundlespeccontentopts[b'v2']
79 79
80 80 _bundlespecvariants = {
81 81 b"streamv2": {
82 82 b"changegroup": False,
83 83 b"streamv2": True,
84 84 b"tagsfnodescache": False,
85 85 b"revbranchcache": False,
86 86 }
87 87 }
88 88
89 89 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
90 90 _bundlespecv1compengines = {b'gzip', b'bzip2', b'none'}
91 91
92 92
93 93 @attr.s
94 94 class bundlespec(object):
95 95 compression = attr.ib()
96 96 wirecompression = attr.ib()
97 97 version = attr.ib()
98 98 wireversion = attr.ib()
99 99 params = attr.ib()
100 100 contentopts = attr.ib()
101 101
102 102
103 103 def parsebundlespec(repo, spec, strict=True):
104 104 """Parse a bundle string specification into parts.
105 105
106 106 Bundle specifications denote a well-defined bundle/exchange format.
107 107 The content of a given specification should not change over time in
108 108 order to ensure that bundles produced by a newer version of Mercurial are
109 109 readable from an older version.
110 110
111 111 The string currently has the form:
112 112
113 113 <compression>-<type>[;<parameter0>[;<parameter1>]]
114 114
115 115 Where <compression> is one of the supported compression formats
116 116 and <type> is (currently) a version string. A ";" can follow the type and
117 117 all text afterwards is interpreted as URI encoded, ";" delimited key=value
118 118 pairs.
119 119
120 120 If ``strict`` is True (the default) <compression> is required. Otherwise,
121 121 it is optional.
122 122
123 123 Returns a bundlespec object of (compression, version, parameters).
124 124 Compression will be ``None`` if not in strict mode and a compression isn't
125 125 defined.
126 126
127 127 An ``InvalidBundleSpecification`` is raised when the specification is
128 128 not syntactically well formed.
129 129
130 130 An ``UnsupportedBundleSpecification`` is raised when the compression or
131 131 bundle type/version is not recognized.
132 132
133 133 Note: this function will likely eventually return a more complex data
134 134 structure, including bundle2 part information.
135 135 """
136 136
137 137 def parseparams(s):
138 138 if b';' not in s:
139 139 return s, {}
140 140
141 141 params = {}
142 142 version, paramstr = s.split(b';', 1)
143 143
144 144 for p in paramstr.split(b';'):
145 145 if b'=' not in p:
146 146 raise error.InvalidBundleSpecification(
147 147 _(
148 148 b'invalid bundle specification: '
149 149 b'missing "=" in parameter: %s'
150 150 )
151 151 % p
152 152 )
153 153
154 154 key, value = p.split(b'=', 1)
155 155 key = urlreq.unquote(key)
156 156 value = urlreq.unquote(value)
157 157 params[key] = value
158 158
159 159 return version, params
160 160
161 161 if strict and b'-' not in spec:
162 162 raise error.InvalidBundleSpecification(
163 163 _(
164 164 b'invalid bundle specification; '
165 165 b'must be prefixed with compression: %s'
166 166 )
167 167 % spec
168 168 )
169 169
170 170 if b'-' in spec:
171 171 compression, version = spec.split(b'-', 1)
172 172
173 173 if compression not in util.compengines.supportedbundlenames:
174 174 raise error.UnsupportedBundleSpecification(
175 175 _(b'%s compression is not supported') % compression
176 176 )
177 177
178 178 version, params = parseparams(version)
179 179
180 180 if version not in _bundlespeccgversions:
181 181 raise error.UnsupportedBundleSpecification(
182 182 _(b'%s is not a recognized bundle version') % version
183 183 )
184 184 else:
185 185 # Value could be just the compression or just the version, in which
186 186 # case some defaults are assumed (but only when not in strict mode).
187 187 assert not strict
188 188
189 189 spec, params = parseparams(spec)
190 190
191 191 if spec in util.compengines.supportedbundlenames:
192 192 compression = spec
193 193 version = b'v1'
194 194 # Generaldelta repos require v2.
195 195 if b'generaldelta' in repo.requirements:
196 196 version = b'v2'
197 197 # Modern compression engines require v2.
198 198 if compression not in _bundlespecv1compengines:
199 199 version = b'v2'
200 200 elif spec in _bundlespeccgversions:
201 201 if spec == b'packed1':
202 202 compression = b'none'
203 203 else:
204 204 compression = b'bzip2'
205 205 version = spec
206 206 else:
207 207 raise error.UnsupportedBundleSpecification(
208 208 _(b'%s is not a recognized bundle specification') % spec
209 209 )
210 210
211 211 # Bundle version 1 only supports a known set of compression engines.
212 212 if version == b'v1' and compression not in _bundlespecv1compengines:
213 213 raise error.UnsupportedBundleSpecification(
214 214 _(b'compression engine %s is not supported on v1 bundles')
215 215 % compression
216 216 )
217 217
218 218 # The specification for packed1 can optionally declare the data formats
219 219 # required to apply it. If we see this metadata, compare against what the
220 220 # repo supports and error if the bundle isn't compatible.
221 221 if version == b'packed1' and b'requirements' in params:
222 222 requirements = set(params[b'requirements'].split(b','))
223 223 missingreqs = requirements - repo.supportedformats
224 224 if missingreqs:
225 225 raise error.UnsupportedBundleSpecification(
226 226 _(b'missing support for repository features: %s')
227 227 % b', '.join(sorted(missingreqs))
228 228 )
229 229
230 230 # Compute contentopts based on the version
231 231 contentopts = _bundlespeccontentopts.get(version, {}).copy()
232 232
233 233 # Process the variants
234 234 if b"stream" in params and params[b"stream"] == b"v2":
235 235 variant = _bundlespecvariants[b"streamv2"]
236 236 contentopts.update(variant)
237 237
238 238 engine = util.compengines.forbundlename(compression)
239 239 compression, wirecompression = engine.bundletype()
240 240 wireversion = _bundlespeccgversions[version]
241 241
242 242 return bundlespec(
243 243 compression, wirecompression, version, wireversion, params, contentopts
244 244 )
245 245
246 246
247 247 def readbundle(ui, fh, fname, vfs=None):
248 248 header = changegroup.readexactly(fh, 4)
249 249
250 250 alg = None
251 251 if not fname:
252 252 fname = b"stream"
253 253 if not header.startswith(b'HG') and header.startswith(b'\0'):
254 254 fh = changegroup.headerlessfixup(fh, header)
255 255 header = b"HG10"
256 256 alg = b'UN'
257 257 elif vfs:
258 258 fname = vfs.join(fname)
259 259
260 260 magic, version = header[0:2], header[2:4]
261 261
262 262 if magic != b'HG':
263 263 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
264 264 if version == b'10':
265 265 if alg is None:
266 266 alg = changegroup.readexactly(fh, 2)
267 267 return changegroup.cg1unpacker(fh, alg)
268 268 elif version.startswith(b'2'):
269 269 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
270 270 elif version == b'S1':
271 271 return streamclone.streamcloneapplier(fh)
272 272 else:
273 273 raise error.Abort(
274 274 _(b'%s: unknown bundle version %s') % (fname, version)
275 275 )
276 276
277 277
278 278 def getbundlespec(ui, fh):
279 279 """Infer the bundlespec from a bundle file handle.
280 280
281 281 The input file handle is seeked and the original seek position is not
282 282 restored.
283 283 """
284 284
285 285 def speccompression(alg):
286 286 try:
287 287 return util.compengines.forbundletype(alg).bundletype()[0]
288 288 except KeyError:
289 289 return None
290 290
291 291 b = readbundle(ui, fh, None)
292 292 if isinstance(b, changegroup.cg1unpacker):
293 293 alg = b._type
294 294 if alg == b'_truncatedBZ':
295 295 alg = b'BZ'
296 296 comp = speccompression(alg)
297 297 if not comp:
298 298 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
299 299 return b'%s-v1' % comp
300 300 elif isinstance(b, bundle2.unbundle20):
301 301 if b'Compression' in b.params:
302 302 comp = speccompression(b.params[b'Compression'])
303 303 if not comp:
304 304 raise error.Abort(
305 305 _(b'unknown compression algorithm: %s') % comp
306 306 )
307 307 else:
308 308 comp = b'none'
309 309
310 310 version = None
311 311 for part in b.iterparts():
312 312 if part.type == b'changegroup':
313 313 version = part.params[b'version']
314 314 if version in (b'01', b'02'):
315 315 version = b'v2'
316 316 else:
317 317 raise error.Abort(
318 318 _(
319 319 b'changegroup version %s does not have '
320 320 b'a known bundlespec'
321 321 )
322 322 % version,
323 323 hint=_(b'try upgrading your Mercurial client'),
324 324 )
325 325 elif part.type == b'stream2' and version is None:
326 326 # A stream2 part requires to be part of a v2 bundle
327 327 requirements = urlreq.unquote(part.params[b'requirements'])
328 328 splitted = requirements.split()
329 329 params = bundle2._formatrequirementsparams(splitted)
330 330 return b'none-v2;stream=v2;%s' % params
331 331
332 332 if not version:
333 333 raise error.Abort(
334 334 _(b'could not identify changegroup version in bundle')
335 335 )
336 336
337 337 return b'%s-%s' % (comp, version)
338 338 elif isinstance(b, streamclone.streamcloneapplier):
339 339 requirements = streamclone.readbundle1header(fh)[2]
340 340 formatted = bundle2._formatrequirementsparams(requirements)
341 341 return b'none-packed1;%s' % formatted
342 342 else:
343 343 raise error.Abort(_(b'unknown bundle type: %s') % b)
344 344
345 345
346 346 def _computeoutgoing(repo, heads, common):
347 347 """Computes which revs are outgoing given a set of common
348 348 and a set of heads.
349 349
350 350 This is a separate function so extensions can have access to
351 351 the logic.
352 352
353 353 Returns a discovery.outgoing object.
354 354 """
355 355 cl = repo.changelog
356 356 if common:
357 357 hasnode = cl.hasnode
358 358 common = [n for n in common if hasnode(n)]
359 359 else:
360 360 common = [nullid]
361 361 if not heads:
362 362 heads = cl.heads()
363 363 return discovery.outgoing(repo, common, heads)
364 364
365 365
366 366 def _checkpublish(pushop):
367 367 repo = pushop.repo
368 368 ui = repo.ui
369 369 behavior = ui.config(b'experimental', b'auto-publish')
370 370 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
371 371 return
372 372 remotephases = listkeys(pushop.remote, b'phases')
373 373 if not remotephases.get(b'publishing', False):
374 374 return
375 375
376 376 if pushop.revs is None:
377 377 published = repo.filtered(b'served').revs(b'not public()')
378 378 else:
379 379 published = repo.revs(b'::%ln - public()', pushop.revs)
380 380 if published:
381 381 if behavior == b'warn':
382 382 ui.warn(
383 383 _(b'%i changesets about to be published\n') % len(published)
384 384 )
385 385 elif behavior == b'confirm':
386 386 if ui.promptchoice(
387 387 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
388 388 % len(published)
389 389 ):
390 390 raise error.Abort(_(b'user quit'))
391 391 elif behavior == b'abort':
392 392 msg = _(b'push would publish %i changesets') % len(published)
393 393 hint = _(
394 394 b"use --publish or adjust 'experimental.auto-publish'"
395 395 b" config"
396 396 )
397 397 raise error.Abort(msg, hint=hint)
398 398
399 399
400 400 def _forcebundle1(op):
401 401 """return true if a pull/push must use bundle1
402 402
403 403 This function is used to allow testing of the older bundle version"""
404 404 ui = op.repo.ui
405 405 # The goal is this config is to allow developer to choose the bundle
406 406 # version used during exchanged. This is especially handy during test.
407 407 # Value is a list of bundle version to be picked from, highest version
408 408 # should be used.
409 409 #
410 410 # developer config: devel.legacy.exchange
411 411 exchange = ui.configlist(b'devel', b'legacy.exchange')
412 412 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
413 413 return forcebundle1 or not op.remote.capable(b'bundle2')
414 414
415 415
416 416 class pushoperation(object):
417 417 """A object that represent a single push operation
418 418
419 419 Its purpose is to carry push related state and very common operations.
420 420
421 421 A new pushoperation should be created at the beginning of each push and
422 422 discarded afterward.
423 423 """
424 424
425 425 def __init__(
426 426 self,
427 427 repo,
428 428 remote,
429 429 force=False,
430 430 revs=None,
431 431 newbranch=False,
432 432 bookmarks=(),
433 433 publish=False,
434 434 pushvars=None,
435 435 ):
436 436 # repo we push from
437 437 self.repo = repo
438 438 self.ui = repo.ui
439 439 # repo we push to
440 440 self.remote = remote
441 441 # force option provided
442 442 self.force = force
443 443 # revs to be pushed (None is "all")
444 444 self.revs = revs
445 445 # bookmark explicitly pushed
446 446 self.bookmarks = bookmarks
447 447 # allow push of new branch
448 448 self.newbranch = newbranch
449 449 # step already performed
450 450 # (used to check what steps have been already performed through bundle2)
451 451 self.stepsdone = set()
452 452 # Integer version of the changegroup push result
453 453 # - None means nothing to push
454 454 # - 0 means HTTP error
455 455 # - 1 means we pushed and remote head count is unchanged *or*
456 456 # we have outgoing changesets but refused to push
457 457 # - other values as described by addchangegroup()
458 458 self.cgresult = None
459 459 # Boolean value for the bookmark push
460 460 self.bkresult = None
461 461 # discover.outgoing object (contains common and outgoing data)
462 462 self.outgoing = None
463 463 # all remote topological heads before the push
464 464 self.remoteheads = None
465 465 # Details of the remote branch pre and post push
466 466 #
467 467 # mapping: {'branch': ([remoteheads],
468 468 # [newheads],
469 469 # [unsyncedheads],
470 470 # [discardedheads])}
471 471 # - branch: the branch name
472 472 # - remoteheads: the list of remote heads known locally
473 473 # None if the branch is new
474 474 # - newheads: the new remote heads (known locally) with outgoing pushed
475 475 # - unsyncedheads: the list of remote heads unknown locally.
476 476 # - discardedheads: the list of remote heads made obsolete by the push
477 477 self.pushbranchmap = None
478 478 # testable as a boolean indicating if any nodes are missing locally.
479 479 self.incoming = None
480 480 # summary of the remote phase situation
481 481 self.remotephases = None
482 482 # phases changes that must be pushed along side the changesets
483 483 self.outdatedphases = None
484 484 # phases changes that must be pushed if changeset push fails
485 485 self.fallbackoutdatedphases = None
486 486 # outgoing obsmarkers
487 487 self.outobsmarkers = set()
488 488 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
489 489 self.outbookmarks = []
490 490 # transaction manager
491 491 self.trmanager = None
492 492 # map { pushkey partid -> callback handling failure}
493 493 # used to handle exception from mandatory pushkey part failure
494 494 self.pkfailcb = {}
495 495 # an iterable of pushvars or None
496 496 self.pushvars = pushvars
497 497 # publish pushed changesets
498 498 self.publish = publish
499 499
500 500 @util.propertycache
501 501 def futureheads(self):
502 502 """future remote heads if the changeset push succeeds"""
503 503 return self.outgoing.missingheads
504 504
505 505 @util.propertycache
506 506 def fallbackheads(self):
507 507 """future remote heads if the changeset push fails"""
508 508 if self.revs is None:
509 509 # not target to push, all common are relevant
510 510 return self.outgoing.commonheads
511 511 unfi = self.repo.unfiltered()
512 512 # I want cheads = heads(::missingheads and ::commonheads)
513 513 # (missingheads is revs with secret changeset filtered out)
514 514 #
515 515 # This can be expressed as:
516 516 # cheads = ( (missingheads and ::commonheads)
517 517 # + (commonheads and ::missingheads))"
518 518 # )
519 519 #
520 520 # while trying to push we already computed the following:
521 521 # common = (::commonheads)
522 522 # missing = ((commonheads::missingheads) - commonheads)
523 523 #
524 524 # We can pick:
525 525 # * missingheads part of common (::commonheads)
526 526 common = self.outgoing.common
527 527 rev = self.repo.changelog.index.rev
528 528 cheads = [node for node in self.revs if rev(node) in common]
529 529 # and
530 530 # * commonheads parents on missing
531 531 revset = unfi.set(
532 532 b'%ln and parents(roots(%ln))',
533 533 self.outgoing.commonheads,
534 534 self.outgoing.missing,
535 535 )
536 536 cheads.extend(c.node() for c in revset)
537 537 return cheads
538 538
539 539 @property
540 540 def commonheads(self):
541 541 """set of all common heads after changeset bundle push"""
542 542 if self.cgresult:
543 543 return self.futureheads
544 544 else:
545 545 return self.fallbackheads
546 546
547 547
548 548 # mapping of message used when pushing bookmark
549 549 bookmsgmap = {
550 550 b'update': (
551 551 _(b"updating bookmark %s\n"),
552 552 _(b'updating bookmark %s failed!\n'),
553 553 ),
554 554 b'export': (
555 555 _(b"exporting bookmark %s\n"),
556 556 _(b'exporting bookmark %s failed!\n'),
557 557 ),
558 558 b'delete': (
559 559 _(b"deleting remote bookmark %s\n"),
560 560 _(b'deleting remote bookmark %s failed!\n'),
561 561 ),
562 562 }
563 563
564 564
565 565 def push(
566 566 repo,
567 567 remote,
568 568 force=False,
569 569 revs=None,
570 570 newbranch=False,
571 571 bookmarks=(),
572 572 publish=False,
573 573 opargs=None,
574 574 ):
575 575 '''Push outgoing changesets (limited by revs) from a local
576 576 repository to remote. Return an integer:
577 577 - None means nothing to push
578 578 - 0 means HTTP error
579 579 - 1 means we pushed and remote head count is unchanged *or*
580 580 we have outgoing changesets but refused to push
581 581 - other values as described by addchangegroup()
582 582 '''
583 583 if opargs is None:
584 584 opargs = {}
585 585 pushop = pushoperation(
586 586 repo,
587 587 remote,
588 588 force,
589 589 revs,
590 590 newbranch,
591 591 bookmarks,
592 592 publish,
593 593 **pycompat.strkwargs(opargs)
594 594 )
595 595 if pushop.remote.local():
596 596 missing = (
597 597 set(pushop.repo.requirements) - pushop.remote.local().supported
598 598 )
599 599 if missing:
600 600 msg = _(
601 601 b"required features are not"
602 602 b" supported in the destination:"
603 603 b" %s"
604 604 ) % (b', '.join(sorted(missing)))
605 605 raise error.Abort(msg)
606 606
607 607 if not pushop.remote.canpush():
608 608 raise error.Abort(_(b"destination does not support push"))
609 609
610 610 if not pushop.remote.capable(b'unbundle'):
611 611 raise error.Abort(
612 612 _(
613 613 b'cannot push: destination does not support the '
614 614 b'unbundle wire protocol command'
615 615 )
616 616 )
617 617
618 618 # get lock as we might write phase data
619 619 wlock = lock = None
620 620 try:
621 621 # bundle2 push may receive a reply bundle touching bookmarks
622 622 # requiring the wlock. Take it now to ensure proper ordering.
623 623 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
624 624 if (
625 625 (not _forcebundle1(pushop))
626 626 and maypushback
627 627 and not bookmod.bookmarksinstore(repo)
628 628 ):
629 629 wlock = pushop.repo.wlock()
630 630 lock = pushop.repo.lock()
631 631 pushop.trmanager = transactionmanager(
632 632 pushop.repo, b'push-response', pushop.remote.url()
633 633 )
634 634 except error.LockUnavailable as err:
635 635 # source repo cannot be locked.
636 636 # We do not abort the push, but just disable the local phase
637 637 # synchronisation.
638 638 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
639 639 err
640 640 )
641 641 pushop.ui.debug(msg)
642 642
643 643 with wlock or util.nullcontextmanager():
644 644 with lock or util.nullcontextmanager():
645 645 with pushop.trmanager or util.nullcontextmanager():
646 646 pushop.repo.checkpush(pushop)
647 647 _checkpublish(pushop)
648 648 _pushdiscovery(pushop)
649 649 if not _forcebundle1(pushop):
650 650 _pushbundle2(pushop)
651 651 _pushchangeset(pushop)
652 652 _pushsyncphase(pushop)
653 653 _pushobsolete(pushop)
654 654 _pushbookmark(pushop)
655 655
656 656 if repo.ui.configbool(b'experimental', b'remotenames'):
657 657 logexchange.pullremotenames(repo, remote)
658 658
659 659 return pushop
660 660
661 661
662 662 # list of steps to perform discovery before push
663 663 pushdiscoveryorder = []
664 664
665 665 # Mapping between step name and function
666 666 #
667 667 # This exists to help extensions wrap steps if necessary
668 668 pushdiscoverymapping = {}
669 669
670 670
671 671 def pushdiscovery(stepname):
672 672 """decorator for function performing discovery before push
673 673
674 674 The function is added to the step -> function mapping and appended to the
675 675 list of steps. Beware that decorated function will be added in order (this
676 676 may matter).
677 677
678 678 You can only use this decorator for a new step, if you want to wrap a step
679 679 from an extension, change the pushdiscovery dictionary directly."""
680 680
681 681 def dec(func):
682 682 assert stepname not in pushdiscoverymapping
683 683 pushdiscoverymapping[stepname] = func
684 684 pushdiscoveryorder.append(stepname)
685 685 return func
686 686
687 687 return dec
688 688
689 689
690 690 def _pushdiscovery(pushop):
691 691 """Run all discovery steps"""
692 692 for stepname in pushdiscoveryorder:
693 693 step = pushdiscoverymapping[stepname]
694 694 step(pushop)
695 695
696 696
697 697 @pushdiscovery(b'changeset')
698 698 def _pushdiscoverychangeset(pushop):
699 699 """discover the changeset that need to be pushed"""
700 700 fci = discovery.findcommonincoming
701 701 if pushop.revs:
702 702 commoninc = fci(
703 703 pushop.repo,
704 704 pushop.remote,
705 705 force=pushop.force,
706 706 ancestorsof=pushop.revs,
707 707 )
708 708 else:
709 709 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
710 710 common, inc, remoteheads = commoninc
711 711 fco = discovery.findcommonoutgoing
712 712 outgoing = fco(
713 713 pushop.repo,
714 714 pushop.remote,
715 715 onlyheads=pushop.revs,
716 716 commoninc=commoninc,
717 717 force=pushop.force,
718 718 )
719 719 pushop.outgoing = outgoing
720 720 pushop.remoteheads = remoteheads
721 721 pushop.incoming = inc
722 722
723 723
724 724 @pushdiscovery(b'phase')
725 725 def _pushdiscoveryphase(pushop):
726 726 """discover the phase that needs to be pushed
727 727
728 728 (computed for both success and failure case for changesets push)"""
729 729 outgoing = pushop.outgoing
730 730 unfi = pushop.repo.unfiltered()
731 731 remotephases = listkeys(pushop.remote, b'phases')
732 732
733 733 if (
734 734 pushop.ui.configbool(b'ui', b'_usedassubrepo')
735 735 and remotephases # server supports phases
736 736 and not pushop.outgoing.missing # no changesets to be pushed
737 737 and remotephases.get(b'publishing', False)
738 738 ):
739 739 # When:
740 740 # - this is a subrepo push
741 741 # - and remote support phase
742 742 # - and no changeset are to be pushed
743 743 # - and remote is publishing
744 744 # We may be in issue 3781 case!
745 745 # We drop the possible phase synchronisation done by
746 746 # courtesy to publish changesets possibly locally draft
747 747 # on the remote.
748 748 pushop.outdatedphases = []
749 749 pushop.fallbackoutdatedphases = []
750 750 return
751 751
752 752 pushop.remotephases = phases.remotephasessummary(
753 753 pushop.repo, pushop.fallbackheads, remotephases
754 754 )
755 755 droots = pushop.remotephases.draftroots
756 756
757 757 extracond = b''
758 758 if not pushop.remotephases.publishing:
759 759 extracond = b' and public()'
760 760 revset = b'heads((%%ln::%%ln) %s)' % extracond
761 761 # Get the list of all revs draft on remote by public here.
762 762 # XXX Beware that revset break if droots is not strictly
763 763 # XXX root we may want to ensure it is but it is costly
764 764 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
765 765 if not pushop.remotephases.publishing and pushop.publish:
766 766 future = list(
767 767 unfi.set(
768 768 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
769 769 )
770 770 )
771 771 elif not outgoing.missing:
772 772 future = fallback
773 773 else:
774 774 # adds changeset we are going to push as draft
775 775 #
776 776 # should not be necessary for publishing server, but because of an
777 777 # issue fixed in xxxxx we have to do it anyway.
778 778 fdroots = list(
779 779 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
780 780 )
781 781 fdroots = [f.node() for f in fdroots]
782 782 future = list(unfi.set(revset, fdroots, pushop.futureheads))
783 783 pushop.outdatedphases = future
784 784 pushop.fallbackoutdatedphases = fallback
785 785
786 786
787 787 @pushdiscovery(b'obsmarker')
788 788 def _pushdiscoveryobsmarkers(pushop):
789 789 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
790 790 return
791 791
792 792 if not pushop.repo.obsstore:
793 793 return
794 794
795 795 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
796 796 return
797 797
798 798 repo = pushop.repo
799 799 # very naive computation, that can be quite expensive on big repo.
800 800 # However: evolution is currently slow on them anyway.
801 801 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
802 802 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
803 803
804 804
805 805 @pushdiscovery(b'bookmarks')
806 806 def _pushdiscoverybookmarks(pushop):
807 807 ui = pushop.ui
808 808 repo = pushop.repo.unfiltered()
809 809 remote = pushop.remote
810 810 ui.debug(b"checking for updated bookmarks\n")
811 811 ancestors = ()
812 812 if pushop.revs:
813 813 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
814 814 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
815 815
816 816 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
817 817
818 818 explicit = {
819 819 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
820 820 }
821 821
822 822 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
823 823 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
824 824
825 825
826 826 def _processcompared(pushop, pushed, explicit, remotebms, comp):
827 827 """take decision on bookmarks to push to the remote repo
828 828
829 829 Exists to help extensions alter this behavior.
830 830 """
831 831 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
832 832
833 833 repo = pushop.repo
834 834
835 835 for b, scid, dcid in advsrc:
836 836 if b in explicit:
837 837 explicit.remove(b)
838 838 if not pushed or repo[scid].rev() in pushed:
839 839 pushop.outbookmarks.append((b, dcid, scid))
840 840 # search added bookmark
841 841 for b, scid, dcid in addsrc:
842 842 if b in explicit:
843 843 explicit.remove(b)
844 844 pushop.outbookmarks.append((b, b'', scid))
845 845 # search for overwritten bookmark
846 846 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
847 847 if b in explicit:
848 848 explicit.remove(b)
849 849 pushop.outbookmarks.append((b, dcid, scid))
850 850 # search for bookmark to delete
851 851 for b, scid, dcid in adddst:
852 852 if b in explicit:
853 853 explicit.remove(b)
854 854 # treat as "deleted locally"
855 855 pushop.outbookmarks.append((b, dcid, b''))
856 856 # identical bookmarks shouldn't get reported
857 857 for b, scid, dcid in same:
858 858 if b in explicit:
859 859 explicit.remove(b)
860 860
861 861 if explicit:
862 862 explicit = sorted(explicit)
863 863 # we should probably list all of them
864 864 pushop.ui.warn(
865 865 _(
866 866 b'bookmark %s does not exist on the local '
867 867 b'or remote repository!\n'
868 868 )
869 869 % explicit[0]
870 870 )
871 871 pushop.bkresult = 2
872 872
873 873 pushop.outbookmarks.sort()
874 874
875 875
876 876 def _pushcheckoutgoing(pushop):
877 877 outgoing = pushop.outgoing
878 878 unfi = pushop.repo.unfiltered()
879 879 if not outgoing.missing:
880 880 # nothing to push
881 881 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
882 882 return False
883 883 # something to push
884 884 if not pushop.force:
885 885 # if repo.obsstore == False --> no obsolete
886 886 # then, save the iteration
887 887 if unfi.obsstore:
888 888 # this message are here for 80 char limit reason
889 889 mso = _(b"push includes obsolete changeset: %s!")
890 890 mspd = _(b"push includes phase-divergent changeset: %s!")
891 891 mscd = _(b"push includes content-divergent changeset: %s!")
892 892 mst = {
893 893 b"orphan": _(b"push includes orphan changeset: %s!"),
894 894 b"phase-divergent": mspd,
895 895 b"content-divergent": mscd,
896 896 }
897 897 # If we are to push if there is at least one
898 898 # obsolete or unstable changeset in missing, at
899 899 # least one of the missinghead will be obsolete or
900 900 # unstable. So checking heads only is ok
901 901 for node in outgoing.missingheads:
902 902 ctx = unfi[node]
903 903 if ctx.obsolete():
904 904 raise error.Abort(mso % ctx)
905 905 elif ctx.isunstable():
906 906 # TODO print more than one instability in the abort
907 907 # message
908 908 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
909 909
910 910 discovery.checkheads(pushop)
911 911 return True
912 912
913 913
914 914 # List of names of steps to perform for an outgoing bundle2, order matters.
915 915 b2partsgenorder = []
916 916
917 917 # Mapping between step name and function
918 918 #
919 919 # This exists to help extensions wrap steps if necessary
920 920 b2partsgenmapping = {}
921 921
922 922
923 923 def b2partsgenerator(stepname, idx=None):
924 924 """decorator for function generating bundle2 part
925 925
926 926 The function is added to the step -> function mapping and appended to the
927 927 list of steps. Beware that decorated functions will be added in order
928 928 (this may matter).
929 929
930 930 You can only use this decorator for new steps, if you want to wrap a step
931 931 from an extension, attack the b2partsgenmapping dictionary directly."""
932 932
933 933 def dec(func):
934 934 assert stepname not in b2partsgenmapping
935 935 b2partsgenmapping[stepname] = func
936 936 if idx is None:
937 937 b2partsgenorder.append(stepname)
938 938 else:
939 939 b2partsgenorder.insert(idx, stepname)
940 940 return func
941 941
942 942 return dec
943 943
944 944
945 945 def _pushb2ctxcheckheads(pushop, bundler):
946 946 """Generate race condition checking parts
947 947
948 948 Exists as an independent function to aid extensions
949 949 """
950 950 # * 'force' do not check for push race,
951 951 # * if we don't push anything, there are nothing to check.
952 952 if not pushop.force and pushop.outgoing.missingheads:
953 953 allowunrelated = b'related' in bundler.capabilities.get(
954 954 b'checkheads', ()
955 955 )
956 956 emptyremote = pushop.pushbranchmap is None
957 957 if not allowunrelated or emptyremote:
958 958 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
959 959 else:
960 960 affected = set()
961 961 for branch, heads in pycompat.iteritems(pushop.pushbranchmap):
962 962 remoteheads, newheads, unsyncedheads, discardedheads = heads
963 963 if remoteheads is not None:
964 964 remote = set(remoteheads)
965 965 affected |= set(discardedheads) & remote
966 966 affected |= remote - set(newheads)
967 967 if affected:
968 968 data = iter(sorted(affected))
969 969 bundler.newpart(b'check:updated-heads', data=data)
970 970
971 971
972 972 def _pushing(pushop):
973 973 """return True if we are pushing anything"""
974 974 return bool(
975 975 pushop.outgoing.missing
976 976 or pushop.outdatedphases
977 977 or pushop.outobsmarkers
978 978 or pushop.outbookmarks
979 979 )
980 980
981 981
982 982 @b2partsgenerator(b'check-bookmarks')
983 983 def _pushb2checkbookmarks(pushop, bundler):
984 984 """insert bookmark move checking"""
985 985 if not _pushing(pushop) or pushop.force:
986 986 return
987 987 b2caps = bundle2.bundle2caps(pushop.remote)
988 988 hasbookmarkcheck = b'bookmarks' in b2caps
989 989 if not (pushop.outbookmarks and hasbookmarkcheck):
990 990 return
991 991 data = []
992 992 for book, old, new in pushop.outbookmarks:
993 993 data.append((book, old))
994 994 checkdata = bookmod.binaryencode(data)
995 995 bundler.newpart(b'check:bookmarks', data=checkdata)
996 996
997 997
998 998 @b2partsgenerator(b'check-phases')
999 999 def _pushb2checkphases(pushop, bundler):
1000 1000 """insert phase move checking"""
1001 1001 if not _pushing(pushop) or pushop.force:
1002 1002 return
1003 1003 b2caps = bundle2.bundle2caps(pushop.remote)
1004 1004 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1005 1005 if pushop.remotephases is not None and hasphaseheads:
1006 1006 # check that the remote phase has not changed
1007 1007 checks = [[] for p in phases.allphases]
1008 1008 checks[phases.public].extend(pushop.remotephases.publicheads)
1009 1009 checks[phases.draft].extend(pushop.remotephases.draftroots)
1010 1010 if any(checks):
1011 1011 for nodes in checks:
1012 1012 nodes.sort()
1013 1013 checkdata = phases.binaryencode(checks)
1014 1014 bundler.newpart(b'check:phases', data=checkdata)
1015 1015
1016 1016
1017 1017 @b2partsgenerator(b'changeset')
1018 1018 def _pushb2ctx(pushop, bundler):
1019 1019 """handle changegroup push through bundle2
1020 1020
1021 1021 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
1022 1022 """
1023 1023 if b'changesets' in pushop.stepsdone:
1024 1024 return
1025 1025 pushop.stepsdone.add(b'changesets')
1026 1026 # Send known heads to the server for race detection.
1027 1027 if not _pushcheckoutgoing(pushop):
1028 1028 return
1029 1029 pushop.repo.prepushoutgoinghooks(pushop)
1030 1030
1031 1031 _pushb2ctxcheckheads(pushop, bundler)
1032 1032
1033 1033 b2caps = bundle2.bundle2caps(pushop.remote)
1034 1034 version = b'01'
1035 1035 cgversions = b2caps.get(b'changegroup')
1036 1036 if cgversions: # 3.1 and 3.2 ship with an empty value
1037 1037 cgversions = [
1038 1038 v
1039 1039 for v in cgversions
1040 1040 if v in changegroup.supportedoutgoingversions(pushop.repo)
1041 1041 ]
1042 1042 if not cgversions:
1043 1043 raise error.Abort(_(b'no common changegroup version'))
1044 1044 version = max(cgversions)
1045 1045 cgstream = changegroup.makestream(
1046 1046 pushop.repo, pushop.outgoing, version, b'push'
1047 1047 )
1048 1048 cgpart = bundler.newpart(b'changegroup', data=cgstream)
1049 1049 if cgversions:
1050 1050 cgpart.addparam(b'version', version)
1051 1051 if b'treemanifest' in pushop.repo.requirements:
1052 1052 cgpart.addparam(b'treemanifest', b'1')
1053 1053 if b'exp-sidedata-flag' in pushop.repo.requirements:
1054 1054 cgpart.addparam(b'exp-sidedata', b'1')
1055 1055
1056 1056 def handlereply(op):
1057 1057 """extract addchangegroup returns from server reply"""
1058 1058 cgreplies = op.records.getreplies(cgpart.id)
1059 1059 assert len(cgreplies[b'changegroup']) == 1
1060 1060 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
1061 1061
1062 1062 return handlereply
1063 1063
1064 1064
1065 1065 @b2partsgenerator(b'phase')
1066 1066 def _pushb2phases(pushop, bundler):
1067 1067 """handle phase push through bundle2"""
1068 1068 if b'phases' in pushop.stepsdone:
1069 1069 return
1070 1070 b2caps = bundle2.bundle2caps(pushop.remote)
1071 1071 ui = pushop.repo.ui
1072 1072
1073 1073 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1074 1074 haspushkey = b'pushkey' in b2caps
1075 1075 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1076 1076
1077 1077 if hasphaseheads and not legacyphase:
1078 1078 return _pushb2phaseheads(pushop, bundler)
1079 1079 elif haspushkey:
1080 1080 return _pushb2phasespushkey(pushop, bundler)
1081 1081
1082 1082
1083 1083 def _pushb2phaseheads(pushop, bundler):
1084 1084 """push phase information through a bundle2 - binary part"""
1085 1085 pushop.stepsdone.add(b'phases')
1086 1086 if pushop.outdatedphases:
1087 1087 updates = [[] for p in phases.allphases]
1088 1088 updates[0].extend(h.node() for h in pushop.outdatedphases)
1089 1089 phasedata = phases.binaryencode(updates)
1090 1090 bundler.newpart(b'phase-heads', data=phasedata)
1091 1091
1092 1092
1093 1093 def _pushb2phasespushkey(pushop, bundler):
1094 1094 """push phase information through a bundle2 - pushkey part"""
1095 1095 pushop.stepsdone.add(b'phases')
1096 1096 part2node = []
1097 1097
1098 1098 def handlefailure(pushop, exc):
1099 1099 targetid = int(exc.partid)
1100 1100 for partid, node in part2node:
1101 1101 if partid == targetid:
1102 1102 raise error.Abort(_(b'updating %s to public failed') % node)
1103 1103
1104 1104 enc = pushkey.encode
1105 1105 for newremotehead in pushop.outdatedphases:
1106 1106 part = bundler.newpart(b'pushkey')
1107 1107 part.addparam(b'namespace', enc(b'phases'))
1108 1108 part.addparam(b'key', enc(newremotehead.hex()))
1109 1109 part.addparam(b'old', enc(b'%d' % phases.draft))
1110 1110 part.addparam(b'new', enc(b'%d' % phases.public))
1111 1111 part2node.append((part.id, newremotehead))
1112 1112 pushop.pkfailcb[part.id] = handlefailure
1113 1113
1114 1114 def handlereply(op):
1115 1115 for partid, node in part2node:
1116 1116 partrep = op.records.getreplies(partid)
1117 1117 results = partrep[b'pushkey']
1118 1118 assert len(results) <= 1
1119 1119 msg = None
1120 1120 if not results:
1121 1121 msg = _(b'server ignored update of %s to public!\n') % node
1122 1122 elif not int(results[0][b'return']):
1123 1123 msg = _(b'updating %s to public failed!\n') % node
1124 1124 if msg is not None:
1125 1125 pushop.ui.warn(msg)
1126 1126
1127 1127 return handlereply
1128 1128
1129 1129
1130 1130 @b2partsgenerator(b'obsmarkers')
1131 1131 def _pushb2obsmarkers(pushop, bundler):
1132 1132 if b'obsmarkers' in pushop.stepsdone:
1133 1133 return
1134 1134 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1135 1135 if obsolete.commonversion(remoteversions) is None:
1136 1136 return
1137 1137 pushop.stepsdone.add(b'obsmarkers')
1138 1138 if pushop.outobsmarkers:
1139 1139 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1140 1140 bundle2.buildobsmarkerspart(bundler, markers)
1141 1141
1142 1142
1143 1143 @b2partsgenerator(b'bookmarks')
1144 1144 def _pushb2bookmarks(pushop, bundler):
1145 1145 """handle bookmark push through bundle2"""
1146 1146 if b'bookmarks' in pushop.stepsdone:
1147 1147 return
1148 1148 b2caps = bundle2.bundle2caps(pushop.remote)
1149 1149
1150 1150 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1151 1151 legacybooks = b'bookmarks' in legacy
1152 1152
1153 1153 if not legacybooks and b'bookmarks' in b2caps:
1154 1154 return _pushb2bookmarkspart(pushop, bundler)
1155 1155 elif b'pushkey' in b2caps:
1156 1156 return _pushb2bookmarkspushkey(pushop, bundler)
1157 1157
1158 1158
1159 1159 def _bmaction(old, new):
1160 1160 """small utility for bookmark pushing"""
1161 1161 if not old:
1162 1162 return b'export'
1163 1163 elif not new:
1164 1164 return b'delete'
1165 1165 return b'update'
1166 1166
1167 1167
1168 1168 def _abortonsecretctx(pushop, node, b):
1169 1169 """abort if a given bookmark points to a secret changeset"""
1170 1170 if node and pushop.repo[node].phase() == phases.secret:
1171 1171 raise error.Abort(
1172 1172 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1173 1173 )
1174 1174
1175 1175
1176 1176 def _pushb2bookmarkspart(pushop, bundler):
1177 1177 pushop.stepsdone.add(b'bookmarks')
1178 1178 if not pushop.outbookmarks:
1179 1179 return
1180 1180
1181 1181 allactions = []
1182 1182 data = []
1183 1183 for book, old, new in pushop.outbookmarks:
1184 1184 _abortonsecretctx(pushop, new, book)
1185 1185 data.append((book, new))
1186 1186 allactions.append((book, _bmaction(old, new)))
1187 1187 checkdata = bookmod.binaryencode(data)
1188 1188 bundler.newpart(b'bookmarks', data=checkdata)
1189 1189
1190 1190 def handlereply(op):
1191 1191 ui = pushop.ui
1192 1192 # if success
1193 1193 for book, action in allactions:
1194 1194 ui.status(bookmsgmap[action][0] % book)
1195 1195
1196 1196 return handlereply
1197 1197
1198 1198
1199 1199 def _pushb2bookmarkspushkey(pushop, bundler):
1200 1200 pushop.stepsdone.add(b'bookmarks')
1201 1201 part2book = []
1202 1202 enc = pushkey.encode
1203 1203
1204 1204 def handlefailure(pushop, exc):
1205 1205 targetid = int(exc.partid)
1206 1206 for partid, book, action in part2book:
1207 1207 if partid == targetid:
1208 1208 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1209 1209 # we should not be called for part we did not generated
1210 1210 assert False
1211 1211
1212 1212 for book, old, new in pushop.outbookmarks:
1213 1213 _abortonsecretctx(pushop, new, book)
1214 1214 part = bundler.newpart(b'pushkey')
1215 1215 part.addparam(b'namespace', enc(b'bookmarks'))
1216 1216 part.addparam(b'key', enc(book))
1217 1217 part.addparam(b'old', enc(hex(old)))
1218 1218 part.addparam(b'new', enc(hex(new)))
1219 1219 action = b'update'
1220 1220 if not old:
1221 1221 action = b'export'
1222 1222 elif not new:
1223 1223 action = b'delete'
1224 1224 part2book.append((part.id, book, action))
1225 1225 pushop.pkfailcb[part.id] = handlefailure
1226 1226
1227 1227 def handlereply(op):
1228 1228 ui = pushop.ui
1229 1229 for partid, book, action in part2book:
1230 1230 partrep = op.records.getreplies(partid)
1231 1231 results = partrep[b'pushkey']
1232 1232 assert len(results) <= 1
1233 1233 if not results:
1234 1234 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1235 1235 else:
1236 1236 ret = int(results[0][b'return'])
1237 1237 if ret:
1238 1238 ui.status(bookmsgmap[action][0] % book)
1239 1239 else:
1240 1240 ui.warn(bookmsgmap[action][1] % book)
1241 1241 if pushop.bkresult is not None:
1242 1242 pushop.bkresult = 1
1243 1243
1244 1244 return handlereply
1245 1245
1246 1246
1247 1247 @b2partsgenerator(b'pushvars', idx=0)
1248 1248 def _getbundlesendvars(pushop, bundler):
1249 1249 '''send shellvars via bundle2'''
1250 1250 pushvars = pushop.pushvars
1251 1251 if pushvars:
1252 1252 shellvars = {}
1253 1253 for raw in pushvars:
1254 1254 if b'=' not in raw:
1255 1255 msg = (
1256 1256 b"unable to parse variable '%s', should follow "
1257 1257 b"'KEY=VALUE' or 'KEY=' format"
1258 1258 )
1259 1259 raise error.Abort(msg % raw)
1260 1260 k, v = raw.split(b'=', 1)
1261 1261 shellvars[k] = v
1262 1262
1263 1263 part = bundler.newpart(b'pushvars')
1264 1264
1265 1265 for key, value in pycompat.iteritems(shellvars):
1266 1266 part.addparam(key, value, mandatory=False)
1267 1267
1268 1268
1269 1269 def _pushbundle2(pushop):
1270 1270 """push data to the remote using bundle2
1271 1271
1272 1272 The only currently supported type of data is changegroup but this will
1273 1273 evolve in the future."""
1274 1274 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1275 1275 pushback = pushop.trmanager and pushop.ui.configbool(
1276 1276 b'experimental', b'bundle2.pushback'
1277 1277 )
1278 1278
1279 1279 # create reply capability
1280 1280 capsblob = bundle2.encodecaps(
1281 1281 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1282 1282 )
1283 1283 bundler.newpart(b'replycaps', data=capsblob)
1284 1284 replyhandlers = []
1285 1285 for partgenname in b2partsgenorder:
1286 1286 partgen = b2partsgenmapping[partgenname]
1287 1287 ret = partgen(pushop, bundler)
1288 1288 if callable(ret):
1289 1289 replyhandlers.append(ret)
1290 1290 # do not push if nothing to push
1291 1291 if bundler.nbparts <= 1:
1292 1292 return
1293 1293 stream = util.chunkbuffer(bundler.getchunks())
1294 1294 try:
1295 1295 try:
1296 1296 with pushop.remote.commandexecutor() as e:
1297 1297 reply = e.callcommand(
1298 1298 b'unbundle',
1299 1299 {
1300 1300 b'bundle': stream,
1301 1301 b'heads': [b'force'],
1302 1302 b'url': pushop.remote.url(),
1303 1303 },
1304 1304 ).result()
1305 1305 except error.BundleValueError as exc:
1306 1306 raise error.Abort(_(b'missing support for %s') % exc)
1307 1307 try:
1308 1308 trgetter = None
1309 1309 if pushback:
1310 1310 trgetter = pushop.trmanager.transaction
1311 1311 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1312 1312 except error.BundleValueError as exc:
1313 1313 raise error.Abort(_(b'missing support for %s') % exc)
1314 1314 except bundle2.AbortFromPart as exc:
1315 1315 pushop.ui.status(_(b'remote: %s\n') % exc)
1316 1316 if exc.hint is not None:
1317 1317 pushop.ui.status(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1318 1318 raise error.Abort(_(b'push failed on remote'))
1319 1319 except error.PushkeyFailed as exc:
1320 1320 partid = int(exc.partid)
1321 1321 if partid not in pushop.pkfailcb:
1322 1322 raise
1323 1323 pushop.pkfailcb[partid](pushop, exc)
1324 1324 for rephand in replyhandlers:
1325 1325 rephand(op)
1326 1326
1327 1327
1328 1328 def _pushchangeset(pushop):
1329 1329 """Make the actual push of changeset bundle to remote repo"""
1330 1330 if b'changesets' in pushop.stepsdone:
1331 1331 return
1332 1332 pushop.stepsdone.add(b'changesets')
1333 1333 if not _pushcheckoutgoing(pushop):
1334 1334 return
1335 1335
1336 1336 # Should have verified this in push().
1337 1337 assert pushop.remote.capable(b'unbundle')
1338 1338
1339 1339 pushop.repo.prepushoutgoinghooks(pushop)
1340 1340 outgoing = pushop.outgoing
1341 1341 # TODO: get bundlecaps from remote
1342 1342 bundlecaps = None
1343 1343 # create a changegroup from local
1344 1344 if pushop.revs is None and not (
1345 1345 outgoing.excluded or pushop.repo.changelog.filteredrevs
1346 1346 ):
1347 1347 # push everything,
1348 1348 # use the fast path, no race possible on push
1349 1349 cg = changegroup.makechangegroup(
1350 1350 pushop.repo,
1351 1351 outgoing,
1352 1352 b'01',
1353 1353 b'push',
1354 1354 fastpath=True,
1355 1355 bundlecaps=bundlecaps,
1356 1356 )
1357 1357 else:
1358 1358 cg = changegroup.makechangegroup(
1359 1359 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1360 1360 )
1361 1361
1362 1362 # apply changegroup to remote
1363 1363 # local repo finds heads on server, finds out what
1364 1364 # revs it must push. once revs transferred, if server
1365 1365 # finds it has different heads (someone else won
1366 1366 # commit/push race), server aborts.
1367 1367 if pushop.force:
1368 1368 remoteheads = [b'force']
1369 1369 else:
1370 1370 remoteheads = pushop.remoteheads
1371 1371 # ssh: return remote's addchangegroup()
1372 1372 # http: return remote's addchangegroup() or 0 for error
1373 1373 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1374 1374
1375 1375
1376 1376 def _pushsyncphase(pushop):
1377 1377 """synchronise phase information locally and remotely"""
1378 1378 cheads = pushop.commonheads
1379 1379 # even when we don't push, exchanging phase data is useful
1380 1380 remotephases = listkeys(pushop.remote, b'phases')
1381 1381 if (
1382 1382 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1383 1383 and remotephases # server supports phases
1384 1384 and pushop.cgresult is None # nothing was pushed
1385 1385 and remotephases.get(b'publishing', False)
1386 1386 ):
1387 1387 # When:
1388 1388 # - this is a subrepo push
1389 1389 # - and remote support phase
1390 1390 # - and no changeset was pushed
1391 1391 # - and remote is publishing
1392 1392 # We may be in issue 3871 case!
1393 1393 # We drop the possible phase synchronisation done by
1394 1394 # courtesy to publish changesets possibly locally draft
1395 1395 # on the remote.
1396 1396 remotephases = {b'publishing': b'True'}
1397 1397 if not remotephases: # old server or public only reply from non-publishing
1398 1398 _localphasemove(pushop, cheads)
1399 1399 # don't push any phase data as there is nothing to push
1400 1400 else:
1401 1401 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1402 1402 pheads, droots = ana
1403 1403 ### Apply remote phase on local
1404 1404 if remotephases.get(b'publishing', False):
1405 1405 _localphasemove(pushop, cheads)
1406 1406 else: # publish = False
1407 1407 _localphasemove(pushop, pheads)
1408 1408 _localphasemove(pushop, cheads, phases.draft)
1409 1409 ### Apply local phase on remote
1410 1410
1411 1411 if pushop.cgresult:
1412 1412 if b'phases' in pushop.stepsdone:
1413 1413 # phases already pushed though bundle2
1414 1414 return
1415 1415 outdated = pushop.outdatedphases
1416 1416 else:
1417 1417 outdated = pushop.fallbackoutdatedphases
1418 1418
1419 1419 pushop.stepsdone.add(b'phases')
1420 1420
1421 1421 # filter heads already turned public by the push
1422 1422 outdated = [c for c in outdated if c.node() not in pheads]
1423 1423 # fallback to independent pushkey command
1424 1424 for newremotehead in outdated:
1425 1425 with pushop.remote.commandexecutor() as e:
1426 1426 r = e.callcommand(
1427 1427 b'pushkey',
1428 1428 {
1429 1429 b'namespace': b'phases',
1430 1430 b'key': newremotehead.hex(),
1431 1431 b'old': b'%d' % phases.draft,
1432 1432 b'new': b'%d' % phases.public,
1433 1433 },
1434 1434 ).result()
1435 1435
1436 1436 if not r:
1437 1437 pushop.ui.warn(
1438 1438 _(b'updating %s to public failed!\n') % newremotehead
1439 1439 )
1440 1440
1441 1441
1442 1442 def _localphasemove(pushop, nodes, phase=phases.public):
1443 1443 """move <nodes> to <phase> in the local source repo"""
1444 1444 if pushop.trmanager:
1445 1445 phases.advanceboundary(
1446 1446 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1447 1447 )
1448 1448 else:
1449 1449 # repo is not locked, do not change any phases!
1450 1450 # Informs the user that phases should have been moved when
1451 1451 # applicable.
1452 1452 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1453 1453 phasestr = phases.phasenames[phase]
1454 1454 if actualmoves:
1455 1455 pushop.ui.status(
1456 1456 _(
1457 1457 b'cannot lock source repo, skipping '
1458 1458 b'local %s phase update\n'
1459 1459 )
1460 1460 % phasestr
1461 1461 )
1462 1462
1463 1463
1464 1464 def _pushobsolete(pushop):
1465 1465 """utility function to push obsolete markers to a remote"""
1466 1466 if b'obsmarkers' in pushop.stepsdone:
1467 1467 return
1468 1468 repo = pushop.repo
1469 1469 remote = pushop.remote
1470 1470 pushop.stepsdone.add(b'obsmarkers')
1471 1471 if pushop.outobsmarkers:
1472 1472 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1473 1473 rslts = []
1474 1474 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1475 1475 remotedata = obsolete._pushkeyescape(markers)
1476 1476 for key in sorted(remotedata, reverse=True):
1477 1477 # reverse sort to ensure we end with dump0
1478 1478 data = remotedata[key]
1479 1479 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1480 1480 if [r for r in rslts if not r]:
1481 1481 msg = _(b'failed to push some obsolete markers!\n')
1482 1482 repo.ui.warn(msg)
1483 1483
1484 1484
1485 1485 def _pushbookmark(pushop):
1486 1486 """Update bookmark position on remote"""
1487 1487 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1488 1488 return
1489 1489 pushop.stepsdone.add(b'bookmarks')
1490 1490 ui = pushop.ui
1491 1491 remote = pushop.remote
1492 1492
1493 1493 for b, old, new in pushop.outbookmarks:
1494 1494 action = b'update'
1495 1495 if not old:
1496 1496 action = b'export'
1497 1497 elif not new:
1498 1498 action = b'delete'
1499 1499
1500 1500 with remote.commandexecutor() as e:
1501 1501 r = e.callcommand(
1502 1502 b'pushkey',
1503 1503 {
1504 1504 b'namespace': b'bookmarks',
1505 1505 b'key': b,
1506 1506 b'old': hex(old),
1507 1507 b'new': hex(new),
1508 1508 },
1509 1509 ).result()
1510 1510
1511 1511 if r:
1512 1512 ui.status(bookmsgmap[action][0] % b)
1513 1513 else:
1514 1514 ui.warn(bookmsgmap[action][1] % b)
1515 1515 # discovery can have set the value form invalid entry
1516 1516 if pushop.bkresult is not None:
1517 1517 pushop.bkresult = 1
1518 1518
1519 1519
1520 1520 class pulloperation(object):
1521 1521 """A object that represent a single pull operation
1522 1522
1523 1523 It purpose is to carry pull related state and very common operation.
1524 1524
1525 1525 A new should be created at the beginning of each pull and discarded
1526 1526 afterward.
1527 1527 """
1528 1528
1529 1529 def __init__(
1530 1530 self,
1531 1531 repo,
1532 1532 remote,
1533 1533 heads=None,
1534 1534 force=False,
1535 1535 bookmarks=(),
1536 1536 remotebookmarks=None,
1537 1537 streamclonerequested=None,
1538 1538 includepats=None,
1539 1539 excludepats=None,
1540 1540 depth=None,
1541 1541 ):
1542 1542 # repo we pull into
1543 1543 self.repo = repo
1544 1544 # repo we pull from
1545 1545 self.remote = remote
1546 1546 # revision we try to pull (None is "all")
1547 1547 self.heads = heads
1548 1548 # bookmark pulled explicitly
1549 1549 self.explicitbookmarks = [
1550 1550 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1551 1551 ]
1552 1552 # do we force pull?
1553 1553 self.force = force
1554 1554 # whether a streaming clone was requested
1555 1555 self.streamclonerequested = streamclonerequested
1556 1556 # transaction manager
1557 1557 self.trmanager = None
1558 1558 # set of common changeset between local and remote before pull
1559 1559 self.common = None
1560 1560 # set of pulled head
1561 1561 self.rheads = None
1562 1562 # list of missing changeset to fetch remotely
1563 1563 self.fetch = None
1564 1564 # remote bookmarks data
1565 1565 self.remotebookmarks = remotebookmarks
1566 1566 # result of changegroup pulling (used as return code by pull)
1567 1567 self.cgresult = None
1568 1568 # list of step already done
1569 1569 self.stepsdone = set()
1570 1570 # Whether we attempted a clone from pre-generated bundles.
1571 1571 self.clonebundleattempted = False
1572 1572 # Set of file patterns to include.
1573 1573 self.includepats = includepats
1574 1574 # Set of file patterns to exclude.
1575 1575 self.excludepats = excludepats
1576 1576 # Number of ancestor changesets to pull from each pulled head.
1577 1577 self.depth = depth
1578 1578
1579 1579 @util.propertycache
1580 1580 def pulledsubset(self):
1581 1581 """heads of the set of changeset target by the pull"""
1582 1582 # compute target subset
1583 1583 if self.heads is None:
1584 1584 # We pulled every thing possible
1585 1585 # sync on everything common
1586 1586 c = set(self.common)
1587 1587 ret = list(self.common)
1588 1588 for n in self.rheads:
1589 1589 if n not in c:
1590 1590 ret.append(n)
1591 1591 return ret
1592 1592 else:
1593 1593 # We pulled a specific subset
1594 1594 # sync on this subset
1595 1595 return self.heads
1596 1596
1597 1597 @util.propertycache
1598 1598 def canusebundle2(self):
1599 1599 return not _forcebundle1(self)
1600 1600
1601 1601 @util.propertycache
1602 1602 def remotebundle2caps(self):
1603 1603 return bundle2.bundle2caps(self.remote)
1604 1604
1605 1605 def gettransaction(self):
1606 1606 # deprecated; talk to trmanager directly
1607 1607 return self.trmanager.transaction()
1608 1608
1609 1609
1610 1610 class transactionmanager(util.transactional):
1611 1611 """An object to manage the life cycle of a transaction
1612 1612
1613 1613 It creates the transaction on demand and calls the appropriate hooks when
1614 1614 closing the transaction."""
1615 1615
1616 1616 def __init__(self, repo, source, url):
1617 1617 self.repo = repo
1618 1618 self.source = source
1619 1619 self.url = url
1620 1620 self._tr = None
1621 1621
1622 1622 def transaction(self):
1623 1623 """Return an open transaction object, constructing if necessary"""
1624 1624 if not self._tr:
1625 1625 trname = b'%s\n%s' % (self.source, util.hidepassword(self.url))
1626 1626 self._tr = self.repo.transaction(trname)
1627 1627 self._tr.hookargs[b'source'] = self.source
1628 1628 self._tr.hookargs[b'url'] = self.url
1629 1629 return self._tr
1630 1630
1631 1631 def close(self):
1632 1632 """close transaction if created"""
1633 1633 if self._tr is not None:
1634 1634 self._tr.close()
1635 1635
1636 1636 def release(self):
1637 1637 """release transaction if created"""
1638 1638 if self._tr is not None:
1639 1639 self._tr.release()
1640 1640
1641 1641
1642 1642 def listkeys(remote, namespace):
1643 1643 with remote.commandexecutor() as e:
1644 1644 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1645 1645
1646 1646
1647 1647 def _fullpullbundle2(repo, pullop):
1648 1648 # The server may send a partial reply, i.e. when inlining
1649 1649 # pre-computed bundles. In that case, update the common
1650 1650 # set based on the results and pull another bundle.
1651 1651 #
1652 1652 # There are two indicators that the process is finished:
1653 1653 # - no changeset has been added, or
1654 1654 # - all remote heads are known locally.
1655 1655 # The head check must use the unfiltered view as obsoletion
1656 1656 # markers can hide heads.
1657 1657 unfi = repo.unfiltered()
1658 1658 unficl = unfi.changelog
1659 1659
1660 1660 def headsofdiff(h1, h2):
1661 1661 """Returns heads(h1 % h2)"""
1662 1662 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1663 1663 return set(ctx.node() for ctx in res)
1664 1664
1665 1665 def headsofunion(h1, h2):
1666 1666 """Returns heads((h1 + h2) - null)"""
1667 1667 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1668 1668 return set(ctx.node() for ctx in res)
1669 1669
1670 1670 while True:
1671 1671 old_heads = unficl.heads()
1672 1672 clstart = len(unficl)
1673 1673 _pullbundle2(pullop)
1674 1674 if repository.NARROW_REQUIREMENT in repo.requirements:
1675 1675 # XXX narrow clones filter the heads on the server side during
1676 1676 # XXX getbundle and result in partial replies as well.
1677 1677 # XXX Disable pull bundles in this case as band aid to avoid
1678 1678 # XXX extra round trips.
1679 1679 break
1680 1680 if clstart == len(unficl):
1681 1681 break
1682 1682 if all(unficl.hasnode(n) for n in pullop.rheads):
1683 1683 break
1684 1684 new_heads = headsofdiff(unficl.heads(), old_heads)
1685 1685 pullop.common = headsofunion(new_heads, pullop.common)
1686 1686 pullop.rheads = set(pullop.rheads) - pullop.common
1687 1687
1688 1688
1689 1689 def pull(
1690 1690 repo,
1691 1691 remote,
1692 1692 heads=None,
1693 1693 force=False,
1694 1694 bookmarks=(),
1695 1695 opargs=None,
1696 1696 streamclonerequested=None,
1697 1697 includepats=None,
1698 1698 excludepats=None,
1699 1699 depth=None,
1700 1700 ):
1701 1701 """Fetch repository data from a remote.
1702 1702
1703 1703 This is the main function used to retrieve data from a remote repository.
1704 1704
1705 1705 ``repo`` is the local repository to clone into.
1706 1706 ``remote`` is a peer instance.
1707 1707 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1708 1708 default) means to pull everything from the remote.
1709 1709 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1710 1710 default, all remote bookmarks are pulled.
1711 1711 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1712 1712 initialization.
1713 1713 ``streamclonerequested`` is a boolean indicating whether a "streaming
1714 1714 clone" is requested. A "streaming clone" is essentially a raw file copy
1715 1715 of revlogs from the server. This only works when the local repository is
1716 1716 empty. The default value of ``None`` means to respect the server
1717 1717 configuration for preferring stream clones.
1718 1718 ``includepats`` and ``excludepats`` define explicit file patterns to
1719 1719 include and exclude in storage, respectively. If not defined, narrow
1720 1720 patterns from the repo instance are used, if available.
1721 1721 ``depth`` is an integer indicating the DAG depth of history we're
1722 1722 interested in. If defined, for each revision specified in ``heads``, we
1723 1723 will fetch up to this many of its ancestors and data associated with them.
1724 1724
1725 1725 Returns the ``pulloperation`` created for this pull.
1726 1726 """
1727 1727 if opargs is None:
1728 1728 opargs = {}
1729 1729
1730 1730 # We allow the narrow patterns to be passed in explicitly to provide more
1731 1731 # flexibility for API consumers.
1732 1732 if includepats or excludepats:
1733 1733 includepats = includepats or set()
1734 1734 excludepats = excludepats or set()
1735 1735 else:
1736 1736 includepats, excludepats = repo.narrowpats
1737 1737
1738 1738 narrowspec.validatepatterns(includepats)
1739 1739 narrowspec.validatepatterns(excludepats)
1740 1740
1741 1741 pullop = pulloperation(
1742 1742 repo,
1743 1743 remote,
1744 1744 heads,
1745 1745 force,
1746 1746 bookmarks=bookmarks,
1747 1747 streamclonerequested=streamclonerequested,
1748 1748 includepats=includepats,
1749 1749 excludepats=excludepats,
1750 1750 depth=depth,
1751 1751 **pycompat.strkwargs(opargs)
1752 1752 )
1753 1753
1754 1754 peerlocal = pullop.remote.local()
1755 1755 if peerlocal:
1756 1756 missing = set(peerlocal.requirements) - pullop.repo.supported
1757 1757 if missing:
1758 1758 msg = _(
1759 1759 b"required features are not"
1760 1760 b" supported in the destination:"
1761 1761 b" %s"
1762 1762 ) % (b', '.join(sorted(missing)))
1763 1763 raise error.Abort(msg)
1764 1764
1765 1765 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1766 1766 wlock = util.nullcontextmanager()
1767 1767 if not bookmod.bookmarksinstore(repo):
1768 1768 wlock = repo.wlock()
1769 1769 with wlock, repo.lock(), pullop.trmanager:
1770 1770 # Use the modern wire protocol, if available.
1771 1771 if remote.capable(b'command-changesetdata'):
1772 1772 exchangev2.pull(pullop)
1773 1773 else:
1774 1774 # This should ideally be in _pullbundle2(). However, it needs to run
1775 1775 # before discovery to avoid extra work.
1776 1776 _maybeapplyclonebundle(pullop)
1777 1777 streamclone.maybeperformlegacystreamclone(pullop)
1778 1778 _pulldiscovery(pullop)
1779 1779 if pullop.canusebundle2:
1780 1780 _fullpullbundle2(repo, pullop)
1781 1781 _pullchangeset(pullop)
1782 1782 _pullphase(pullop)
1783 1783 _pullbookmarks(pullop)
1784 1784 _pullobsolete(pullop)
1785 1785
1786 1786 # storing remotenames
1787 1787 if repo.ui.configbool(b'experimental', b'remotenames'):
1788 1788 logexchange.pullremotenames(repo, remote)
1789 1789
1790 1790 return pullop
1791 1791
1792 1792
1793 1793 # list of steps to perform discovery before pull
1794 1794 pulldiscoveryorder = []
1795 1795
1796 1796 # Mapping between step name and function
1797 1797 #
1798 1798 # This exists to help extensions wrap steps if necessary
1799 1799 pulldiscoverymapping = {}
1800 1800
1801 1801
1802 1802 def pulldiscovery(stepname):
1803 1803 """decorator for function performing discovery before pull
1804 1804
1805 1805 The function is added to the step -> function mapping and appended to the
1806 1806 list of steps. Beware that decorated function will be added in order (this
1807 1807 may matter).
1808 1808
1809 1809 You can only use this decorator for a new step, if you want to wrap a step
1810 1810 from an extension, change the pulldiscovery dictionary directly."""
1811 1811
1812 1812 def dec(func):
1813 1813 assert stepname not in pulldiscoverymapping
1814 1814 pulldiscoverymapping[stepname] = func
1815 1815 pulldiscoveryorder.append(stepname)
1816 1816 return func
1817 1817
1818 1818 return dec
1819 1819
1820 1820
1821 1821 def _pulldiscovery(pullop):
1822 1822 """Run all discovery steps"""
1823 1823 for stepname in pulldiscoveryorder:
1824 1824 step = pulldiscoverymapping[stepname]
1825 1825 step(pullop)
1826 1826
1827 1827
1828 1828 @pulldiscovery(b'b1:bookmarks')
1829 1829 def _pullbookmarkbundle1(pullop):
1830 1830 """fetch bookmark data in bundle1 case
1831 1831
1832 1832 If not using bundle2, we have to fetch bookmarks before changeset
1833 1833 discovery to reduce the chance and impact of race conditions."""
1834 1834 if pullop.remotebookmarks is not None:
1835 1835 return
1836 1836 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1837 1837 # all known bundle2 servers now support listkeys, but lets be nice with
1838 1838 # new implementation.
1839 1839 return
1840 1840 books = listkeys(pullop.remote, b'bookmarks')
1841 1841 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1842 1842
1843 1843
1844 1844 @pulldiscovery(b'changegroup')
1845 1845 def _pulldiscoverychangegroup(pullop):
1846 1846 """discovery phase for the pull
1847 1847
1848 1848 Current handle changeset discovery only, will change handle all discovery
1849 1849 at some point."""
1850 1850 tmp = discovery.findcommonincoming(
1851 1851 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1852 1852 )
1853 1853 common, fetch, rheads = tmp
1854 1854 has_node = pullop.repo.unfiltered().changelog.index.has_node
1855 1855 if fetch and rheads:
1856 1856 # If a remote heads is filtered locally, put in back in common.
1857 1857 #
1858 1858 # This is a hackish solution to catch most of "common but locally
1859 1859 # hidden situation". We do not performs discovery on unfiltered
1860 1860 # repository because it end up doing a pathological amount of round
1861 1861 # trip for w huge amount of changeset we do not care about.
1862 1862 #
1863 1863 # If a set of such "common but filtered" changeset exist on the server
1864 1864 # but are not including a remote heads, we'll not be able to detect it,
1865 1865 scommon = set(common)
1866 1866 for n in rheads:
1867 1867 if has_node(n):
1868 1868 if n not in scommon:
1869 1869 common.append(n)
1870 1870 if set(rheads).issubset(set(common)):
1871 1871 fetch = []
1872 1872 pullop.common = common
1873 1873 pullop.fetch = fetch
1874 1874 pullop.rheads = rheads
1875 1875
1876 1876
1877 1877 def _pullbundle2(pullop):
1878 1878 """pull data using bundle2
1879 1879
1880 1880 For now, the only supported data are changegroup."""
1881 1881 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1882 1882
1883 1883 # make ui easier to access
1884 1884 ui = pullop.repo.ui
1885 1885
1886 1886 # At the moment we don't do stream clones over bundle2. If that is
1887 1887 # implemented then here's where the check for that will go.
1888 1888 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1889 1889
1890 1890 # declare pull perimeters
1891 1891 kwargs[b'common'] = pullop.common
1892 1892 kwargs[b'heads'] = pullop.heads or pullop.rheads
1893 1893
1894 1894 # check server supports narrow and then adding includepats and excludepats
1895 1895 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1896 1896 if servernarrow and pullop.includepats:
1897 1897 kwargs[b'includepats'] = pullop.includepats
1898 1898 if servernarrow and pullop.excludepats:
1899 1899 kwargs[b'excludepats'] = pullop.excludepats
1900 1900
1901 1901 if streaming:
1902 1902 kwargs[b'cg'] = False
1903 1903 kwargs[b'stream'] = True
1904 1904 pullop.stepsdone.add(b'changegroup')
1905 1905 pullop.stepsdone.add(b'phases')
1906 1906
1907 1907 else:
1908 1908 # pulling changegroup
1909 1909 pullop.stepsdone.add(b'changegroup')
1910 1910
1911 1911 kwargs[b'cg'] = pullop.fetch
1912 1912
1913 1913 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1914 1914 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1915 1915 if not legacyphase and hasbinaryphase:
1916 1916 kwargs[b'phases'] = True
1917 1917 pullop.stepsdone.add(b'phases')
1918 1918
1919 1919 if b'listkeys' in pullop.remotebundle2caps:
1920 1920 if b'phases' not in pullop.stepsdone:
1921 1921 kwargs[b'listkeys'] = [b'phases']
1922 1922
1923 1923 bookmarksrequested = False
1924 1924 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1925 1925 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1926 1926
1927 1927 if pullop.remotebookmarks is not None:
1928 1928 pullop.stepsdone.add(b'request-bookmarks')
1929 1929
1930 1930 if (
1931 1931 b'request-bookmarks' not in pullop.stepsdone
1932 1932 and pullop.remotebookmarks is None
1933 1933 and not legacybookmark
1934 1934 and hasbinarybook
1935 1935 ):
1936 1936 kwargs[b'bookmarks'] = True
1937 1937 bookmarksrequested = True
1938 1938
1939 1939 if b'listkeys' in pullop.remotebundle2caps:
1940 1940 if b'request-bookmarks' not in pullop.stepsdone:
1941 1941 # make sure to always includes bookmark data when migrating
1942 1942 # `hg incoming --bundle` to using this function.
1943 1943 pullop.stepsdone.add(b'request-bookmarks')
1944 1944 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1945 1945
1946 1946 # If this is a full pull / clone and the server supports the clone bundles
1947 1947 # feature, tell the server whether we attempted a clone bundle. The
1948 1948 # presence of this flag indicates the client supports clone bundles. This
1949 1949 # will enable the server to treat clients that support clone bundles
1950 1950 # differently from those that don't.
1951 1951 if (
1952 1952 pullop.remote.capable(b'clonebundles')
1953 1953 and pullop.heads is None
1954 1954 and list(pullop.common) == [nullid]
1955 1955 ):
1956 1956 kwargs[b'cbattempted'] = pullop.clonebundleattempted
1957 1957
1958 1958 if streaming:
1959 1959 pullop.repo.ui.status(_(b'streaming all changes\n'))
1960 1960 elif not pullop.fetch:
1961 1961 pullop.repo.ui.status(_(b"no changes found\n"))
1962 1962 pullop.cgresult = 0
1963 1963 else:
1964 1964 if pullop.heads is None and list(pullop.common) == [nullid]:
1965 1965 pullop.repo.ui.status(_(b"requesting all changes\n"))
1966 1966 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1967 1967 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1968 1968 if obsolete.commonversion(remoteversions) is not None:
1969 1969 kwargs[b'obsmarkers'] = True
1970 1970 pullop.stepsdone.add(b'obsmarkers')
1971 1971 _pullbundle2extraprepare(pullop, kwargs)
1972 1972
1973 1973 with pullop.remote.commandexecutor() as e:
1974 1974 args = dict(kwargs)
1975 1975 args[b'source'] = b'pull'
1976 1976 bundle = e.callcommand(b'getbundle', args).result()
1977 1977
1978 1978 try:
1979 1979 op = bundle2.bundleoperation(
1980 1980 pullop.repo, pullop.gettransaction, source=b'pull'
1981 1981 )
1982 1982 op.modes[b'bookmarks'] = b'records'
1983 1983 bundle2.processbundle(pullop.repo, bundle, op=op)
1984 1984 except bundle2.AbortFromPart as exc:
1985 1985 pullop.repo.ui.status(_(b'remote: abort: %s\n') % exc)
1986 1986 raise error.Abort(_(b'pull failed on remote'), hint=exc.hint)
1987 1987 except error.BundleValueError as exc:
1988 1988 raise error.Abort(_(b'missing support for %s') % exc)
1989 1989
1990 1990 if pullop.fetch:
1991 1991 pullop.cgresult = bundle2.combinechangegroupresults(op)
1992 1992
1993 1993 # processing phases change
1994 1994 for namespace, value in op.records[b'listkeys']:
1995 1995 if namespace == b'phases':
1996 1996 _pullapplyphases(pullop, value)
1997 1997
1998 1998 # processing bookmark update
1999 1999 if bookmarksrequested:
2000 2000 books = {}
2001 2001 for record in op.records[b'bookmarks']:
2002 2002 books[record[b'bookmark']] = record[b"node"]
2003 2003 pullop.remotebookmarks = books
2004 2004 else:
2005 2005 for namespace, value in op.records[b'listkeys']:
2006 2006 if namespace == b'bookmarks':
2007 2007 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
2008 2008
2009 2009 # bookmark data were either already there or pulled in the bundle
2010 2010 if pullop.remotebookmarks is not None:
2011 2011 _pullbookmarks(pullop)
2012 2012
2013 2013
2014 2014 def _pullbundle2extraprepare(pullop, kwargs):
2015 2015 """hook function so that extensions can extend the getbundle call"""
2016 2016
2017 2017
2018 2018 def _pullchangeset(pullop):
2019 2019 """pull changeset from unbundle into the local repo"""
2020 2020 # We delay the open of the transaction as late as possible so we
2021 2021 # don't open transaction for nothing or you break future useful
2022 2022 # rollback call
2023 2023 if b'changegroup' in pullop.stepsdone:
2024 2024 return
2025 2025 pullop.stepsdone.add(b'changegroup')
2026 2026 if not pullop.fetch:
2027 2027 pullop.repo.ui.status(_(b"no changes found\n"))
2028 2028 pullop.cgresult = 0
2029 2029 return
2030 2030 tr = pullop.gettransaction()
2031 2031 if pullop.heads is None and list(pullop.common) == [nullid]:
2032 2032 pullop.repo.ui.status(_(b"requesting all changes\n"))
2033 2033 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
2034 2034 # issue1320, avoid a race if remote changed after discovery
2035 2035 pullop.heads = pullop.rheads
2036 2036
2037 2037 if pullop.remote.capable(b'getbundle'):
2038 2038 # TODO: get bundlecaps from remote
2039 2039 cg = pullop.remote.getbundle(
2040 2040 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
2041 2041 )
2042 2042 elif pullop.heads is None:
2043 2043 with pullop.remote.commandexecutor() as e:
2044 2044 cg = e.callcommand(
2045 2045 b'changegroup', {b'nodes': pullop.fetch, b'source': b'pull',}
2046 2046 ).result()
2047 2047
2048 2048 elif not pullop.remote.capable(b'changegroupsubset'):
2049 2049 raise error.Abort(
2050 2050 _(
2051 2051 b"partial pull cannot be done because "
2052 2052 b"other repository doesn't support "
2053 2053 b"changegroupsubset."
2054 2054 )
2055 2055 )
2056 2056 else:
2057 2057 with pullop.remote.commandexecutor() as e:
2058 2058 cg = e.callcommand(
2059 2059 b'changegroupsubset',
2060 2060 {
2061 2061 b'bases': pullop.fetch,
2062 2062 b'heads': pullop.heads,
2063 2063 b'source': b'pull',
2064 2064 },
2065 2065 ).result()
2066 2066
2067 2067 bundleop = bundle2.applybundle(
2068 2068 pullop.repo, cg, tr, b'pull', pullop.remote.url()
2069 2069 )
2070 2070 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
2071 2071
2072 2072
2073 2073 def _pullphase(pullop):
2074 2074 # Get remote phases data from remote
2075 2075 if b'phases' in pullop.stepsdone:
2076 2076 return
2077 2077 remotephases = listkeys(pullop.remote, b'phases')
2078 2078 _pullapplyphases(pullop, remotephases)
2079 2079
2080 2080
2081 2081 def _pullapplyphases(pullop, remotephases):
2082 2082 """apply phase movement from observed remote state"""
2083 2083 if b'phases' in pullop.stepsdone:
2084 2084 return
2085 2085 pullop.stepsdone.add(b'phases')
2086 2086 publishing = bool(remotephases.get(b'publishing', False))
2087 2087 if remotephases and not publishing:
2088 2088 # remote is new and non-publishing
2089 2089 pheads, _dr = phases.analyzeremotephases(
2090 2090 pullop.repo, pullop.pulledsubset, remotephases
2091 2091 )
2092 2092 dheads = pullop.pulledsubset
2093 2093 else:
2094 2094 # Remote is old or publishing all common changesets
2095 2095 # should be seen as public
2096 2096 pheads = pullop.pulledsubset
2097 2097 dheads = []
2098 2098 unfi = pullop.repo.unfiltered()
2099 2099 phase = unfi._phasecache.phase
2100 2100 rev = unfi.changelog.index.get_rev
2101 2101 public = phases.public
2102 2102 draft = phases.draft
2103 2103
2104 2104 # exclude changesets already public locally and update the others
2105 2105 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2106 2106 if pheads:
2107 2107 tr = pullop.gettransaction()
2108 2108 phases.advanceboundary(pullop.repo, tr, public, pheads)
2109 2109
2110 2110 # exclude changesets already draft locally and update the others
2111 2111 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2112 2112 if dheads:
2113 2113 tr = pullop.gettransaction()
2114 2114 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2115 2115
2116 2116
2117 2117 def _pullbookmarks(pullop):
2118 2118 """process the remote bookmark information to update the local one"""
2119 2119 if b'bookmarks' in pullop.stepsdone:
2120 2120 return
2121 2121 pullop.stepsdone.add(b'bookmarks')
2122 2122 repo = pullop.repo
2123 2123 remotebookmarks = pullop.remotebookmarks
2124 2124 bookmod.updatefromremote(
2125 2125 repo.ui,
2126 2126 repo,
2127 2127 remotebookmarks,
2128 2128 pullop.remote.url(),
2129 2129 pullop.gettransaction,
2130 2130 explicit=pullop.explicitbookmarks,
2131 2131 )
2132 2132
2133 2133
2134 2134 def _pullobsolete(pullop):
2135 2135 """utility function to pull obsolete markers from a remote
2136 2136
2137 2137 The `gettransaction` is function that return the pull transaction, creating
2138 2138 one if necessary. We return the transaction to inform the calling code that
2139 2139 a new transaction have been created (when applicable).
2140 2140
2141 2141 Exists mostly to allow overriding for experimentation purpose"""
2142 2142 if b'obsmarkers' in pullop.stepsdone:
2143 2143 return
2144 2144 pullop.stepsdone.add(b'obsmarkers')
2145 2145 tr = None
2146 2146 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2147 2147 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2148 2148 remoteobs = listkeys(pullop.remote, b'obsolete')
2149 2149 if b'dump0' in remoteobs:
2150 2150 tr = pullop.gettransaction()
2151 2151 markers = []
2152 2152 for key in sorted(remoteobs, reverse=True):
2153 2153 if key.startswith(b'dump'):
2154 2154 data = util.b85decode(remoteobs[key])
2155 2155 version, newmarks = obsolete._readmarkers(data)
2156 2156 markers += newmarks
2157 2157 if markers:
2158 2158 pullop.repo.obsstore.add(tr, markers)
2159 2159 pullop.repo.invalidatevolatilesets()
2160 2160 return tr
2161 2161
2162 2162
2163 2163 def applynarrowacl(repo, kwargs):
2164 2164 """Apply narrow fetch access control.
2165 2165
2166 2166 This massages the named arguments for getbundle wire protocol commands
2167 2167 so requested data is filtered through access control rules.
2168 2168 """
2169 2169 ui = repo.ui
2170 2170 # TODO this assumes existence of HTTP and is a layering violation.
2171 2171 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2172 2172 user_includes = ui.configlist(
2173 2173 _NARROWACL_SECTION,
2174 2174 username + b'.includes',
2175 2175 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2176 2176 )
2177 2177 user_excludes = ui.configlist(
2178 2178 _NARROWACL_SECTION,
2179 2179 username + b'.excludes',
2180 2180 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2181 2181 )
2182 2182 if not user_includes:
2183 2183 raise error.Abort(
2184 2184 _(b"%s configuration for user %s is empty")
2185 2185 % (_NARROWACL_SECTION, username)
2186 2186 )
2187 2187
2188 2188 user_includes = [
2189 2189 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2190 2190 ]
2191 2191 user_excludes = [
2192 2192 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2193 2193 ]
2194 2194
2195 2195 req_includes = set(kwargs.get('includepats', []))
2196 2196 req_excludes = set(kwargs.get('excludepats', []))
2197 2197
2198 2198 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2199 2199 req_includes, req_excludes, user_includes, user_excludes
2200 2200 )
2201 2201
2202 2202 if invalid_includes:
2203 2203 raise error.Abort(
2204 2204 _(b"The following includes are not accessible for %s: %s")
2205 2205 % (username, invalid_includes)
2206 2206 )
2207 2207
2208 2208 new_args = {}
2209 2209 new_args.update(kwargs)
2210 2210 new_args['narrow'] = True
2211 2211 new_args['narrow_acl'] = True
2212 2212 new_args['includepats'] = req_includes
2213 2213 if req_excludes:
2214 2214 new_args['excludepats'] = req_excludes
2215 2215
2216 2216 return new_args
2217 2217
2218 2218
2219 2219 def _computeellipsis(repo, common, heads, known, match, depth=None):
2220 2220 """Compute the shape of a narrowed DAG.
2221 2221
2222 2222 Args:
2223 2223 repo: The repository we're transferring.
2224 2224 common: The roots of the DAG range we're transferring.
2225 2225 May be just [nullid], which means all ancestors of heads.
2226 2226 heads: The heads of the DAG range we're transferring.
2227 2227 match: The narrowmatcher that allows us to identify relevant changes.
2228 2228 depth: If not None, only consider nodes to be full nodes if they are at
2229 2229 most depth changesets away from one of heads.
2230 2230
2231 2231 Returns:
2232 2232 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2233 2233
2234 2234 visitnodes: The list of nodes (either full or ellipsis) which
2235 2235 need to be sent to the client.
2236 2236 relevant_nodes: The set of changelog nodes which change a file inside
2237 2237 the narrowspec. The client needs these as non-ellipsis nodes.
2238 2238 ellipsisroots: A dict of {rev: parents} that is used in
2239 2239 narrowchangegroup to produce ellipsis nodes with the
2240 2240 correct parents.
2241 2241 """
2242 2242 cl = repo.changelog
2243 2243 mfl = repo.manifestlog
2244 2244
2245 2245 clrev = cl.rev
2246 2246
2247 2247 commonrevs = {clrev(n) for n in common} | {nullrev}
2248 2248 headsrevs = {clrev(n) for n in heads}
2249 2249
2250 2250 if depth:
2251 2251 revdepth = {h: 0 for h in headsrevs}
2252 2252
2253 2253 ellipsisheads = collections.defaultdict(set)
2254 2254 ellipsisroots = collections.defaultdict(set)
2255 2255
2256 2256 def addroot(head, curchange):
2257 2257 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2258 2258 ellipsisroots[head].add(curchange)
2259 2259 # Recursively split ellipsis heads with 3 roots by finding the
2260 2260 # roots' youngest common descendant which is an elided merge commit.
2261 2261 # That descendant takes 2 of the 3 roots as its own, and becomes a
2262 2262 # root of the head.
2263 2263 while len(ellipsisroots[head]) > 2:
2264 2264 child, roots = splithead(head)
2265 2265 splitroots(head, child, roots)
2266 2266 head = child # Recurse in case we just added a 3rd root
2267 2267
2268 2268 def splitroots(head, child, roots):
2269 2269 ellipsisroots[head].difference_update(roots)
2270 2270 ellipsisroots[head].add(child)
2271 2271 ellipsisroots[child].update(roots)
2272 2272 ellipsisroots[child].discard(child)
2273 2273
2274 2274 def splithead(head):
2275 2275 r1, r2, r3 = sorted(ellipsisroots[head])
2276 2276 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2277 2277 mid = repo.revs(
2278 2278 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2279 2279 )
2280 2280 for j in mid:
2281 2281 if j == nr2:
2282 2282 return nr2, (nr1, nr2)
2283 2283 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2284 2284 return j, (nr1, nr2)
2285 2285 raise error.Abort(
2286 2286 _(
2287 2287 b'Failed to split up ellipsis node! head: %d, '
2288 2288 b'roots: %d %d %d'
2289 2289 )
2290 2290 % (head, r1, r2, r3)
2291 2291 )
2292 2292
2293 2293 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2294 2294 visit = reversed(missing)
2295 2295 relevant_nodes = set()
2296 2296 visitnodes = [cl.node(m) for m in missing]
2297 2297 required = set(headsrevs) | known
2298 2298 for rev in visit:
2299 2299 clrev = cl.changelogrevision(rev)
2300 2300 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2301 2301 if depth is not None:
2302 2302 curdepth = revdepth[rev]
2303 2303 for p in ps:
2304 2304 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2305 2305 needed = False
2306 2306 shallow_enough = depth is None or revdepth[rev] <= depth
2307 2307 if shallow_enough:
2308 2308 curmf = mfl[clrev.manifest].read()
2309 2309 if ps:
2310 2310 # We choose to not trust the changed files list in
2311 2311 # changesets because it's not always correct. TODO: could
2312 2312 # we trust it for the non-merge case?
2313 2313 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2314 2314 needed = bool(curmf.diff(p1mf, match))
2315 2315 if not needed and len(ps) > 1:
2316 2316 # For merge changes, the list of changed files is not
2317 2317 # helpful, since we need to emit the merge if a file
2318 2318 # in the narrow spec has changed on either side of the
2319 2319 # merge. As a result, we do a manifest diff to check.
2320 2320 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2321 2321 needed = bool(curmf.diff(p2mf, match))
2322 2322 else:
2323 2323 # For a root node, we need to include the node if any
2324 2324 # files in the node match the narrowspec.
2325 2325 needed = any(curmf.walk(match))
2326 2326
2327 2327 if needed:
2328 2328 for head in ellipsisheads[rev]:
2329 2329 addroot(head, rev)
2330 2330 for p in ps:
2331 2331 required.add(p)
2332 2332 relevant_nodes.add(cl.node(rev))
2333 2333 else:
2334 2334 if not ps:
2335 2335 ps = [nullrev]
2336 2336 if rev in required:
2337 2337 for head in ellipsisheads[rev]:
2338 2338 addroot(head, rev)
2339 2339 for p in ps:
2340 2340 ellipsisheads[p].add(rev)
2341 2341 else:
2342 2342 for p in ps:
2343 2343 ellipsisheads[p] |= ellipsisheads[rev]
2344 2344
2345 2345 # add common changesets as roots of their reachable ellipsis heads
2346 2346 for c in commonrevs:
2347 2347 for head in ellipsisheads[c]:
2348 2348 addroot(head, c)
2349 2349 return visitnodes, relevant_nodes, ellipsisroots
2350 2350
2351 2351
2352 2352 def caps20to10(repo, role):
2353 2353 """return a set with appropriate options to use bundle20 during getbundle"""
2354 2354 caps = {b'HG20'}
2355 2355 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2356 2356 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2357 2357 return caps
2358 2358
2359 2359
2360 2360 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2361 2361 getbundle2partsorder = []
2362 2362
2363 2363 # Mapping between step name and function
2364 2364 #
2365 2365 # This exists to help extensions wrap steps if necessary
2366 2366 getbundle2partsmapping = {}
2367 2367
2368 2368
2369 2369 def getbundle2partsgenerator(stepname, idx=None):
2370 2370 """decorator for function generating bundle2 part for getbundle
2371 2371
2372 2372 The function is added to the step -> function mapping and appended to the
2373 2373 list of steps. Beware that decorated functions will be added in order
2374 2374 (this may matter).
2375 2375
2376 2376 You can only use this decorator for new steps, if you want to wrap a step
2377 2377 from an extension, attack the getbundle2partsmapping dictionary directly."""
2378 2378
2379 2379 def dec(func):
2380 2380 assert stepname not in getbundle2partsmapping
2381 2381 getbundle2partsmapping[stepname] = func
2382 2382 if idx is None:
2383 2383 getbundle2partsorder.append(stepname)
2384 2384 else:
2385 2385 getbundle2partsorder.insert(idx, stepname)
2386 2386 return func
2387 2387
2388 2388 return dec
2389 2389
2390 2390
2391 2391 def bundle2requested(bundlecaps):
2392 2392 if bundlecaps is not None:
2393 2393 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2394 2394 return False
2395 2395
2396 2396
2397 2397 def getbundlechunks(
2398 2398 repo, source, heads=None, common=None, bundlecaps=None, **kwargs
2399 2399 ):
2400 2400 """Return chunks constituting a bundle's raw data.
2401 2401
2402 2402 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2403 2403 passed.
2404 2404
2405 2405 Returns a 2-tuple of a dict with metadata about the generated bundle
2406 2406 and an iterator over raw chunks (of varying sizes).
2407 2407 """
2408 2408 kwargs = pycompat.byteskwargs(kwargs)
2409 2409 info = {}
2410 2410 usebundle2 = bundle2requested(bundlecaps)
2411 2411 # bundle10 case
2412 2412 if not usebundle2:
2413 2413 if bundlecaps and not kwargs.get(b'cg', True):
2414 2414 raise ValueError(
2415 2415 _(b'request for bundle10 must include changegroup')
2416 2416 )
2417 2417
2418 2418 if kwargs:
2419 2419 raise ValueError(
2420 2420 _(b'unsupported getbundle arguments: %s')
2421 2421 % b', '.join(sorted(kwargs.keys()))
2422 2422 )
2423 2423 outgoing = _computeoutgoing(repo, heads, common)
2424 2424 info[b'bundleversion'] = 1
2425 2425 return (
2426 2426 info,
2427 2427 changegroup.makestream(
2428 2428 repo, outgoing, b'01', source, bundlecaps=bundlecaps
2429 2429 ),
2430 2430 )
2431 2431
2432 2432 # bundle20 case
2433 2433 info[b'bundleversion'] = 2
2434 2434 b2caps = {}
2435 2435 for bcaps in bundlecaps:
2436 2436 if bcaps.startswith(b'bundle2='):
2437 2437 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2438 2438 b2caps.update(bundle2.decodecaps(blob))
2439 2439 bundler = bundle2.bundle20(repo.ui, b2caps)
2440 2440
2441 2441 kwargs[b'heads'] = heads
2442 2442 kwargs[b'common'] = common
2443 2443
2444 2444 for name in getbundle2partsorder:
2445 2445 func = getbundle2partsmapping[name]
2446 2446 func(
2447 2447 bundler,
2448 2448 repo,
2449 2449 source,
2450 2450 bundlecaps=bundlecaps,
2451 2451 b2caps=b2caps,
2452 2452 **pycompat.strkwargs(kwargs)
2453 2453 )
2454 2454
2455 2455 info[b'prefercompressed'] = bundler.prefercompressed
2456 2456
2457 2457 return info, bundler.getchunks()
2458 2458
2459 2459
2460 2460 @getbundle2partsgenerator(b'stream2')
2461 2461 def _getbundlestream2(bundler, repo, *args, **kwargs):
2462 2462 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2463 2463
2464 2464
2465 2465 @getbundle2partsgenerator(b'changegroup')
2466 2466 def _getbundlechangegrouppart(
2467 2467 bundler,
2468 2468 repo,
2469 2469 source,
2470 2470 bundlecaps=None,
2471 2471 b2caps=None,
2472 2472 heads=None,
2473 2473 common=None,
2474 2474 **kwargs
2475 2475 ):
2476 2476 """add a changegroup part to the requested bundle"""
2477 if not kwargs.get('cg', True):
2477 if not kwargs.get('cg', True) or not b2caps:
2478 2478 return
2479 2479
2480 2480 version = b'01'
2481 2481 cgversions = b2caps.get(b'changegroup')
2482 2482 if cgversions: # 3.1 and 3.2 ship with an empty value
2483 2483 cgversions = [
2484 2484 v
2485 2485 for v in cgversions
2486 2486 if v in changegroup.supportedoutgoingversions(repo)
2487 2487 ]
2488 2488 if not cgversions:
2489 2489 raise error.Abort(_(b'no common changegroup version'))
2490 2490 version = max(cgversions)
2491 2491
2492 2492 outgoing = _computeoutgoing(repo, heads, common)
2493 2493 if not outgoing.missing:
2494 2494 return
2495 2495
2496 2496 if kwargs.get('narrow', False):
2497 2497 include = sorted(filter(bool, kwargs.get('includepats', [])))
2498 2498 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2499 2499 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2500 2500 else:
2501 2501 matcher = None
2502 2502
2503 2503 cgstream = changegroup.makestream(
2504 2504 repo, outgoing, version, source, bundlecaps=bundlecaps, matcher=matcher
2505 2505 )
2506 2506
2507 2507 part = bundler.newpart(b'changegroup', data=cgstream)
2508 2508 if cgversions:
2509 2509 part.addparam(b'version', version)
2510 2510
2511 2511 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2512 2512
2513 2513 if b'treemanifest' in repo.requirements:
2514 2514 part.addparam(b'treemanifest', b'1')
2515 2515
2516 2516 if b'exp-sidedata-flag' in repo.requirements:
2517 2517 part.addparam(b'exp-sidedata', b'1')
2518 2518
2519 2519 if (
2520 2520 kwargs.get('narrow', False)
2521 2521 and kwargs.get('narrow_acl', False)
2522 2522 and (include or exclude)
2523 2523 ):
2524 2524 # this is mandatory because otherwise ACL clients won't work
2525 2525 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2526 2526 narrowspecpart.data = b'%s\0%s' % (
2527 2527 b'\n'.join(include),
2528 2528 b'\n'.join(exclude),
2529 2529 )
2530 2530
2531 2531
2532 2532 @getbundle2partsgenerator(b'bookmarks')
2533 2533 def _getbundlebookmarkpart(
2534 2534 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2535 2535 ):
2536 2536 """add a bookmark part to the requested bundle"""
2537 2537 if not kwargs.get('bookmarks', False):
2538 2538 return
2539 if b'bookmarks' not in b2caps:
2539 if not b2caps or b'bookmarks' not in b2caps:
2540 2540 raise error.Abort(_(b'no common bookmarks exchange method'))
2541 2541 books = bookmod.listbinbookmarks(repo)
2542 2542 data = bookmod.binaryencode(books)
2543 2543 if data:
2544 2544 bundler.newpart(b'bookmarks', data=data)
2545 2545
2546 2546
2547 2547 @getbundle2partsgenerator(b'listkeys')
2548 2548 def _getbundlelistkeysparts(
2549 2549 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2550 2550 ):
2551 2551 """add parts containing listkeys namespaces to the requested bundle"""
2552 2552 listkeys = kwargs.get('listkeys', ())
2553 2553 for namespace in listkeys:
2554 2554 part = bundler.newpart(b'listkeys')
2555 2555 part.addparam(b'namespace', namespace)
2556 2556 keys = repo.listkeys(namespace).items()
2557 2557 part.data = pushkey.encodekeys(keys)
2558 2558
2559 2559
2560 2560 @getbundle2partsgenerator(b'obsmarkers')
2561 2561 def _getbundleobsmarkerpart(
2562 2562 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2563 2563 ):
2564 2564 """add an obsolescence markers part to the requested bundle"""
2565 2565 if kwargs.get('obsmarkers', False):
2566 2566 if heads is None:
2567 2567 heads = repo.heads()
2568 2568 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2569 2569 markers = repo.obsstore.relevantmarkers(subset)
2570 2570 markers = obsutil.sortedmarkers(markers)
2571 2571 bundle2.buildobsmarkerspart(bundler, markers)
2572 2572
2573 2573
2574 2574 @getbundle2partsgenerator(b'phases')
2575 2575 def _getbundlephasespart(
2576 2576 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2577 2577 ):
2578 2578 """add phase heads part to the requested bundle"""
2579 2579 if kwargs.get('phases', False):
2580 if not b'heads' in b2caps.get(b'phases'):
2580 if not b2caps or not b'heads' in b2caps.get(b'phases'):
2581 2581 raise error.Abort(_(b'no common phases exchange method'))
2582 2582 if heads is None:
2583 2583 heads = repo.heads()
2584 2584
2585 2585 headsbyphase = collections.defaultdict(set)
2586 2586 if repo.publishing():
2587 2587 headsbyphase[phases.public] = heads
2588 2588 else:
2589 2589 # find the appropriate heads to move
2590 2590
2591 2591 phase = repo._phasecache.phase
2592 2592 node = repo.changelog.node
2593 2593 rev = repo.changelog.rev
2594 2594 for h in heads:
2595 2595 headsbyphase[phase(repo, rev(h))].add(h)
2596 2596 seenphases = list(headsbyphase.keys())
2597 2597
2598 2598 # We do not handle anything but public and draft phase for now)
2599 2599 if seenphases:
2600 2600 assert max(seenphases) <= phases.draft
2601 2601
2602 2602 # if client is pulling non-public changesets, we need to find
2603 2603 # intermediate public heads.
2604 2604 draftheads = headsbyphase.get(phases.draft, set())
2605 2605 if draftheads:
2606 2606 publicheads = headsbyphase.get(phases.public, set())
2607 2607
2608 2608 revset = b'heads(only(%ln, %ln) and public())'
2609 2609 extraheads = repo.revs(revset, draftheads, publicheads)
2610 2610 for r in extraheads:
2611 2611 headsbyphase[phases.public].add(node(r))
2612 2612
2613 2613 # transform data in a format used by the encoding function
2614 2614 phasemapping = []
2615 2615 for phase in phases.allphases:
2616 2616 phasemapping.append(sorted(headsbyphase[phase]))
2617 2617
2618 2618 # generate the actual part
2619 2619 phasedata = phases.binaryencode(phasemapping)
2620 2620 bundler.newpart(b'phase-heads', data=phasedata)
2621 2621
2622 2622
2623 2623 @getbundle2partsgenerator(b'hgtagsfnodes')
2624 2624 def _getbundletagsfnodes(
2625 2625 bundler,
2626 2626 repo,
2627 2627 source,
2628 2628 bundlecaps=None,
2629 2629 b2caps=None,
2630 2630 heads=None,
2631 2631 common=None,
2632 2632 **kwargs
2633 2633 ):
2634 2634 """Transfer the .hgtags filenodes mapping.
2635 2635
2636 2636 Only values for heads in this bundle will be transferred.
2637 2637
2638 2638 The part data consists of pairs of 20 byte changeset node and .hgtags
2639 2639 filenodes raw values.
2640 2640 """
2641 2641 # Don't send unless:
2642 2642 # - changeset are being exchanged,
2643 2643 # - the client supports it.
2644 if not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2644 if not b2caps or not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2645 2645 return
2646 2646
2647 2647 outgoing = _computeoutgoing(repo, heads, common)
2648 2648 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2649 2649
2650 2650
2651 2651 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2652 2652 def _getbundlerevbranchcache(
2653 2653 bundler,
2654 2654 repo,
2655 2655 source,
2656 2656 bundlecaps=None,
2657 2657 b2caps=None,
2658 2658 heads=None,
2659 2659 common=None,
2660 2660 **kwargs
2661 2661 ):
2662 2662 """Transfer the rev-branch-cache mapping
2663 2663
2664 2664 The payload is a series of data related to each branch
2665 2665
2666 2666 1) branch name length
2667 2667 2) number of open heads
2668 2668 3) number of closed heads
2669 2669 4) open heads nodes
2670 2670 5) closed heads nodes
2671 2671 """
2672 2672 # Don't send unless:
2673 2673 # - changeset are being exchanged,
2674 2674 # - the client supports it.
2675 2675 # - narrow bundle isn't in play (not currently compatible).
2676 2676 if (
2677 2677 not kwargs.get('cg', True)
2678 or not b2caps
2678 2679 or b'rev-branch-cache' not in b2caps
2679 2680 or kwargs.get('narrow', False)
2680 2681 or repo.ui.has_section(_NARROWACL_SECTION)
2681 2682 ):
2682 2683 return
2683 2684
2684 2685 outgoing = _computeoutgoing(repo, heads, common)
2685 2686 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2686 2687
2687 2688
2688 2689 def check_heads(repo, their_heads, context):
2689 2690 """check if the heads of a repo have been modified
2690 2691
2691 2692 Used by peer for unbundling.
2692 2693 """
2693 2694 heads = repo.heads()
2694 2695 heads_hash = hashlib.sha1(b''.join(sorted(heads))).digest()
2695 2696 if not (
2696 2697 their_heads == [b'force']
2697 2698 or their_heads == heads
2698 2699 or their_heads == [b'hashed', heads_hash]
2699 2700 ):
2700 2701 # someone else committed/pushed/unbundled while we
2701 2702 # were transferring data
2702 2703 raise error.PushRaced(
2703 2704 b'repository changed while %s - please try again' % context
2704 2705 )
2705 2706
2706 2707
2707 2708 def unbundle(repo, cg, heads, source, url):
2708 2709 """Apply a bundle to a repo.
2709 2710
2710 2711 this function makes sure the repo is locked during the application and have
2711 2712 mechanism to check that no push race occurred between the creation of the
2712 2713 bundle and its application.
2713 2714
2714 2715 If the push was raced as PushRaced exception is raised."""
2715 2716 r = 0
2716 2717 # need a transaction when processing a bundle2 stream
2717 2718 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2718 2719 lockandtr = [None, None, None]
2719 2720 recordout = None
2720 2721 # quick fix for output mismatch with bundle2 in 3.4
2721 2722 captureoutput = repo.ui.configbool(
2722 2723 b'experimental', b'bundle2-output-capture'
2723 2724 )
2724 2725 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2725 2726 captureoutput = True
2726 2727 try:
2727 2728 # note: outside bundle1, 'heads' is expected to be empty and this
2728 2729 # 'check_heads' call wil be a no-op
2729 2730 check_heads(repo, heads, b'uploading changes')
2730 2731 # push can proceed
2731 2732 if not isinstance(cg, bundle2.unbundle20):
2732 2733 # legacy case: bundle1 (changegroup 01)
2733 2734 txnname = b"\n".join([source, util.hidepassword(url)])
2734 2735 with repo.lock(), repo.transaction(txnname) as tr:
2735 2736 op = bundle2.applybundle(repo, cg, tr, source, url)
2736 2737 r = bundle2.combinechangegroupresults(op)
2737 2738 else:
2738 2739 r = None
2739 2740 try:
2740 2741
2741 2742 def gettransaction():
2742 2743 if not lockandtr[2]:
2743 2744 if not bookmod.bookmarksinstore(repo):
2744 2745 lockandtr[0] = repo.wlock()
2745 2746 lockandtr[1] = repo.lock()
2746 2747 lockandtr[2] = repo.transaction(source)
2747 2748 lockandtr[2].hookargs[b'source'] = source
2748 2749 lockandtr[2].hookargs[b'url'] = url
2749 2750 lockandtr[2].hookargs[b'bundle2'] = b'1'
2750 2751 return lockandtr[2]
2751 2752
2752 2753 # Do greedy locking by default until we're satisfied with lazy
2753 2754 # locking.
2754 2755 if not repo.ui.configbool(
2755 2756 b'experimental', b'bundle2lazylocking'
2756 2757 ):
2757 2758 gettransaction()
2758 2759
2759 2760 op = bundle2.bundleoperation(
2760 2761 repo,
2761 2762 gettransaction,
2762 2763 captureoutput=captureoutput,
2763 2764 source=b'push',
2764 2765 )
2765 2766 try:
2766 2767 op = bundle2.processbundle(repo, cg, op=op)
2767 2768 finally:
2768 2769 r = op.reply
2769 2770 if captureoutput and r is not None:
2770 2771 repo.ui.pushbuffer(error=True, subproc=True)
2771 2772
2772 2773 def recordout(output):
2773 2774 r.newpart(b'output', data=output, mandatory=False)
2774 2775
2775 2776 if lockandtr[2] is not None:
2776 2777 lockandtr[2].close()
2777 2778 except BaseException as exc:
2778 2779 exc.duringunbundle2 = True
2779 2780 if captureoutput and r is not None:
2780 2781 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2781 2782
2782 2783 def recordout(output):
2783 2784 part = bundle2.bundlepart(
2784 2785 b'output', data=output, mandatory=False
2785 2786 )
2786 2787 parts.append(part)
2787 2788
2788 2789 raise
2789 2790 finally:
2790 2791 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2791 2792 if recordout is not None:
2792 2793 recordout(repo.ui.popbuffer())
2793 2794 return r
2794 2795
2795 2796
2796 2797 def _maybeapplyclonebundle(pullop):
2797 2798 """Apply a clone bundle from a remote, if possible."""
2798 2799
2799 2800 repo = pullop.repo
2800 2801 remote = pullop.remote
2801 2802
2802 2803 if not repo.ui.configbool(b'ui', b'clonebundles'):
2803 2804 return
2804 2805
2805 2806 # Only run if local repo is empty.
2806 2807 if len(repo):
2807 2808 return
2808 2809
2809 2810 if pullop.heads:
2810 2811 return
2811 2812
2812 2813 if not remote.capable(b'clonebundles'):
2813 2814 return
2814 2815
2815 2816 with remote.commandexecutor() as e:
2816 2817 res = e.callcommand(b'clonebundles', {}).result()
2817 2818
2818 2819 # If we call the wire protocol command, that's good enough to record the
2819 2820 # attempt.
2820 2821 pullop.clonebundleattempted = True
2821 2822
2822 2823 entries = parseclonebundlesmanifest(repo, res)
2823 2824 if not entries:
2824 2825 repo.ui.note(
2825 2826 _(
2826 2827 b'no clone bundles available on remote; '
2827 2828 b'falling back to regular clone\n'
2828 2829 )
2829 2830 )
2830 2831 return
2831 2832
2832 2833 entries = filterclonebundleentries(
2833 2834 repo, entries, streamclonerequested=pullop.streamclonerequested
2834 2835 )
2835 2836
2836 2837 if not entries:
2837 2838 # There is a thundering herd concern here. However, if a server
2838 2839 # operator doesn't advertise bundles appropriate for its clients,
2839 2840 # they deserve what's coming. Furthermore, from a client's
2840 2841 # perspective, no automatic fallback would mean not being able to
2841 2842 # clone!
2842 2843 repo.ui.warn(
2843 2844 _(
2844 2845 b'no compatible clone bundles available on server; '
2845 2846 b'falling back to regular clone\n'
2846 2847 )
2847 2848 )
2848 2849 repo.ui.warn(
2849 2850 _(b'(you may want to report this to the server operator)\n')
2850 2851 )
2851 2852 return
2852 2853
2853 2854 entries = sortclonebundleentries(repo.ui, entries)
2854 2855
2855 2856 url = entries[0][b'URL']
2856 2857 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2857 2858 if trypullbundlefromurl(repo.ui, repo, url):
2858 2859 repo.ui.status(_(b'finished applying clone bundle\n'))
2859 2860 # Bundle failed.
2860 2861 #
2861 2862 # We abort by default to avoid the thundering herd of
2862 2863 # clients flooding a server that was expecting expensive
2863 2864 # clone load to be offloaded.
2864 2865 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2865 2866 repo.ui.warn(_(b'falling back to normal clone\n'))
2866 2867 else:
2867 2868 raise error.Abort(
2868 2869 _(b'error applying bundle'),
2869 2870 hint=_(
2870 2871 b'if this error persists, consider contacting '
2871 2872 b'the server operator or disable clone '
2872 2873 b'bundles via '
2873 2874 b'"--config ui.clonebundles=false"'
2874 2875 ),
2875 2876 )
2876 2877
2877 2878
2878 2879 def parseclonebundlesmanifest(repo, s):
2879 2880 """Parses the raw text of a clone bundles manifest.
2880 2881
2881 2882 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2882 2883 to the URL and other keys are the attributes for the entry.
2883 2884 """
2884 2885 m = []
2885 2886 for line in s.splitlines():
2886 2887 fields = line.split()
2887 2888 if not fields:
2888 2889 continue
2889 2890 attrs = {b'URL': fields[0]}
2890 2891 for rawattr in fields[1:]:
2891 2892 key, value = rawattr.split(b'=', 1)
2892 2893 key = urlreq.unquote(key)
2893 2894 value = urlreq.unquote(value)
2894 2895 attrs[key] = value
2895 2896
2896 2897 # Parse BUNDLESPEC into components. This makes client-side
2897 2898 # preferences easier to specify since you can prefer a single
2898 2899 # component of the BUNDLESPEC.
2899 2900 if key == b'BUNDLESPEC':
2900 2901 try:
2901 2902 bundlespec = parsebundlespec(repo, value)
2902 2903 attrs[b'COMPRESSION'] = bundlespec.compression
2903 2904 attrs[b'VERSION'] = bundlespec.version
2904 2905 except error.InvalidBundleSpecification:
2905 2906 pass
2906 2907 except error.UnsupportedBundleSpecification:
2907 2908 pass
2908 2909
2909 2910 m.append(attrs)
2910 2911
2911 2912 return m
2912 2913
2913 2914
2914 2915 def isstreamclonespec(bundlespec):
2915 2916 # Stream clone v1
2916 2917 if bundlespec.wirecompression == b'UN' and bundlespec.wireversion == b's1':
2917 2918 return True
2918 2919
2919 2920 # Stream clone v2
2920 2921 if (
2921 2922 bundlespec.wirecompression == b'UN'
2922 2923 and bundlespec.wireversion == b'02'
2923 2924 and bundlespec.contentopts.get(b'streamv2')
2924 2925 ):
2925 2926 return True
2926 2927
2927 2928 return False
2928 2929
2929 2930
2930 2931 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2931 2932 """Remove incompatible clone bundle manifest entries.
2932 2933
2933 2934 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2934 2935 and returns a new list consisting of only the entries that this client
2935 2936 should be able to apply.
2936 2937
2937 2938 There is no guarantee we'll be able to apply all returned entries because
2938 2939 the metadata we use to filter on may be missing or wrong.
2939 2940 """
2940 2941 newentries = []
2941 2942 for entry in entries:
2942 2943 spec = entry.get(b'BUNDLESPEC')
2943 2944 if spec:
2944 2945 try:
2945 2946 bundlespec = parsebundlespec(repo, spec, strict=True)
2946 2947
2947 2948 # If a stream clone was requested, filter out non-streamclone
2948 2949 # entries.
2949 2950 if streamclonerequested and not isstreamclonespec(bundlespec):
2950 2951 repo.ui.debug(
2951 2952 b'filtering %s because not a stream clone\n'
2952 2953 % entry[b'URL']
2953 2954 )
2954 2955 continue
2955 2956
2956 2957 except error.InvalidBundleSpecification as e:
2957 2958 repo.ui.debug(stringutil.forcebytestr(e) + b'\n')
2958 2959 continue
2959 2960 except error.UnsupportedBundleSpecification as e:
2960 2961 repo.ui.debug(
2961 2962 b'filtering %s because unsupported bundle '
2962 2963 b'spec: %s\n' % (entry[b'URL'], stringutil.forcebytestr(e))
2963 2964 )
2964 2965 continue
2965 2966 # If we don't have a spec and requested a stream clone, we don't know
2966 2967 # what the entry is so don't attempt to apply it.
2967 2968 elif streamclonerequested:
2968 2969 repo.ui.debug(
2969 2970 b'filtering %s because cannot determine if a stream '
2970 2971 b'clone bundle\n' % entry[b'URL']
2971 2972 )
2972 2973 continue
2973 2974
2974 2975 if b'REQUIRESNI' in entry and not sslutil.hassni:
2975 2976 repo.ui.debug(
2976 2977 b'filtering %s because SNI not supported\n' % entry[b'URL']
2977 2978 )
2978 2979 continue
2979 2980
2980 2981 newentries.append(entry)
2981 2982
2982 2983 return newentries
2983 2984
2984 2985
2985 2986 class clonebundleentry(object):
2986 2987 """Represents an item in a clone bundles manifest.
2987 2988
2988 2989 This rich class is needed to support sorting since sorted() in Python 3
2989 2990 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2990 2991 won't work.
2991 2992 """
2992 2993
2993 2994 def __init__(self, value, prefers):
2994 2995 self.value = value
2995 2996 self.prefers = prefers
2996 2997
2997 2998 def _cmp(self, other):
2998 2999 for prefkey, prefvalue in self.prefers:
2999 3000 avalue = self.value.get(prefkey)
3000 3001 bvalue = other.value.get(prefkey)
3001 3002
3002 3003 # Special case for b missing attribute and a matches exactly.
3003 3004 if avalue is not None and bvalue is None and avalue == prefvalue:
3004 3005 return -1
3005 3006
3006 3007 # Special case for a missing attribute and b matches exactly.
3007 3008 if bvalue is not None and avalue is None and bvalue == prefvalue:
3008 3009 return 1
3009 3010
3010 3011 # We can't compare unless attribute present on both.
3011 3012 if avalue is None or bvalue is None:
3012 3013 continue
3013 3014
3014 3015 # Same values should fall back to next attribute.
3015 3016 if avalue == bvalue:
3016 3017 continue
3017 3018
3018 3019 # Exact matches come first.
3019 3020 if avalue == prefvalue:
3020 3021 return -1
3021 3022 if bvalue == prefvalue:
3022 3023 return 1
3023 3024
3024 3025 # Fall back to next attribute.
3025 3026 continue
3026 3027
3027 3028 # If we got here we couldn't sort by attributes and prefers. Fall
3028 3029 # back to index order.
3029 3030 return 0
3030 3031
3031 3032 def __lt__(self, other):
3032 3033 return self._cmp(other) < 0
3033 3034
3034 3035 def __gt__(self, other):
3035 3036 return self._cmp(other) > 0
3036 3037
3037 3038 def __eq__(self, other):
3038 3039 return self._cmp(other) == 0
3039 3040
3040 3041 def __le__(self, other):
3041 3042 return self._cmp(other) <= 0
3042 3043
3043 3044 def __ge__(self, other):
3044 3045 return self._cmp(other) >= 0
3045 3046
3046 3047 def __ne__(self, other):
3047 3048 return self._cmp(other) != 0
3048 3049
3049 3050
3050 3051 def sortclonebundleentries(ui, entries):
3051 3052 prefers = ui.configlist(b'ui', b'clonebundleprefers')
3052 3053 if not prefers:
3053 3054 return list(entries)
3054 3055
3055 3056 prefers = [p.split(b'=', 1) for p in prefers]
3056 3057
3057 3058 items = sorted(clonebundleentry(v, prefers) for v in entries)
3058 3059 return [i.value for i in items]
3059 3060
3060 3061
3061 3062 def trypullbundlefromurl(ui, repo, url):
3062 3063 """Attempt to apply a bundle from a URL."""
3063 3064 with repo.lock(), repo.transaction(b'bundleurl') as tr:
3064 3065 try:
3065 3066 fh = urlmod.open(ui, url)
3066 3067 cg = readbundle(ui, fh, b'stream')
3067 3068
3068 3069 if isinstance(cg, streamclone.streamcloneapplier):
3069 3070 cg.apply(repo)
3070 3071 else:
3071 3072 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
3072 3073 return True
3073 3074 except urlerr.httperror as e:
3074 3075 ui.warn(
3075 3076 _(b'HTTP error fetching bundle: %s\n')
3076 3077 % stringutil.forcebytestr(e)
3077 3078 )
3078 3079 except urlerr.urlerror as e:
3079 3080 ui.warn(
3080 3081 _(b'error fetching bundle: %s\n')
3081 3082 % stringutil.forcebytestr(e.reason)
3082 3083 )
3083 3084
3084 3085 return False
General Comments 0
You need to be logged in to leave comments. Login now