##// END OF EJS Templates
index: use `index.get_rev` in `exchange._pullapplyphases`...
marmoute -
r43963:4d8a4ecb default
parent child Browse files
Show More
@@ -1,3090 +1,3090 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import hashlib
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 hex,
16 16 nullid,
17 17 nullrev,
18 18 )
19 19 from .thirdparty import attr
20 20 from . import (
21 21 bookmarks as bookmod,
22 22 bundle2,
23 23 changegroup,
24 24 discovery,
25 25 error,
26 26 exchangev2,
27 27 lock as lockmod,
28 28 logexchange,
29 29 narrowspec,
30 30 obsolete,
31 31 phases,
32 32 pushkey,
33 33 pycompat,
34 34 scmutil,
35 35 sslutil,
36 36 streamclone,
37 37 url as urlmod,
38 38 util,
39 39 wireprototypes,
40 40 )
41 41 from .interfaces import repository
42 42 from .utils import stringutil
43 43
44 44 urlerr = util.urlerr
45 45 urlreq = util.urlreq
46 46
47 47 _NARROWACL_SECTION = b'narrowacl'
48 48
49 49 # Maps bundle version human names to changegroup versions.
50 50 _bundlespeccgversions = {
51 51 b'v1': b'01',
52 52 b'v2': b'02',
53 53 b'packed1': b's1',
54 54 b'bundle2': b'02', # legacy
55 55 }
56 56
57 57 # Maps bundle version with content opts to choose which part to bundle
58 58 _bundlespeccontentopts = {
59 59 b'v1': {
60 60 b'changegroup': True,
61 61 b'cg.version': b'01',
62 62 b'obsolescence': False,
63 63 b'phases': False,
64 64 b'tagsfnodescache': False,
65 65 b'revbranchcache': False,
66 66 },
67 67 b'v2': {
68 68 b'changegroup': True,
69 69 b'cg.version': b'02',
70 70 b'obsolescence': False,
71 71 b'phases': False,
72 72 b'tagsfnodescache': True,
73 73 b'revbranchcache': True,
74 74 },
75 75 b'packed1': {b'cg.version': b's1'},
76 76 }
77 77 _bundlespeccontentopts[b'bundle2'] = _bundlespeccontentopts[b'v2']
78 78
79 79 _bundlespecvariants = {
80 80 b"streamv2": {
81 81 b"changegroup": False,
82 82 b"streamv2": True,
83 83 b"tagsfnodescache": False,
84 84 b"revbranchcache": False,
85 85 }
86 86 }
87 87
88 88 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
89 89 _bundlespecv1compengines = {b'gzip', b'bzip2', b'none'}
90 90
91 91
92 92 @attr.s
93 93 class bundlespec(object):
94 94 compression = attr.ib()
95 95 wirecompression = attr.ib()
96 96 version = attr.ib()
97 97 wireversion = attr.ib()
98 98 params = attr.ib()
99 99 contentopts = attr.ib()
100 100
101 101
102 102 def _sortedmarkers(markers):
103 103 # last item of marker tuple ('parents') may be None or a tuple
104 104 return sorted(markers, key=lambda m: m[:-1] + (m[-1] or (),))
105 105
106 106
107 107 def parsebundlespec(repo, spec, strict=True):
108 108 """Parse a bundle string specification into parts.
109 109
110 110 Bundle specifications denote a well-defined bundle/exchange format.
111 111 The content of a given specification should not change over time in
112 112 order to ensure that bundles produced by a newer version of Mercurial are
113 113 readable from an older version.
114 114
115 115 The string currently has the form:
116 116
117 117 <compression>-<type>[;<parameter0>[;<parameter1>]]
118 118
119 119 Where <compression> is one of the supported compression formats
120 120 and <type> is (currently) a version string. A ";" can follow the type and
121 121 all text afterwards is interpreted as URI encoded, ";" delimited key=value
122 122 pairs.
123 123
124 124 If ``strict`` is True (the default) <compression> is required. Otherwise,
125 125 it is optional.
126 126
127 127 Returns a bundlespec object of (compression, version, parameters).
128 128 Compression will be ``None`` if not in strict mode and a compression isn't
129 129 defined.
130 130
131 131 An ``InvalidBundleSpecification`` is raised when the specification is
132 132 not syntactically well formed.
133 133
134 134 An ``UnsupportedBundleSpecification`` is raised when the compression or
135 135 bundle type/version is not recognized.
136 136
137 137 Note: this function will likely eventually return a more complex data
138 138 structure, including bundle2 part information.
139 139 """
140 140
141 141 def parseparams(s):
142 142 if b';' not in s:
143 143 return s, {}
144 144
145 145 params = {}
146 146 version, paramstr = s.split(b';', 1)
147 147
148 148 for p in paramstr.split(b';'):
149 149 if b'=' not in p:
150 150 raise error.InvalidBundleSpecification(
151 151 _(
152 152 b'invalid bundle specification: '
153 153 b'missing "=" in parameter: %s'
154 154 )
155 155 % p
156 156 )
157 157
158 158 key, value = p.split(b'=', 1)
159 159 key = urlreq.unquote(key)
160 160 value = urlreq.unquote(value)
161 161 params[key] = value
162 162
163 163 return version, params
164 164
165 165 if strict and b'-' not in spec:
166 166 raise error.InvalidBundleSpecification(
167 167 _(
168 168 b'invalid bundle specification; '
169 169 b'must be prefixed with compression: %s'
170 170 )
171 171 % spec
172 172 )
173 173
174 174 if b'-' in spec:
175 175 compression, version = spec.split(b'-', 1)
176 176
177 177 if compression not in util.compengines.supportedbundlenames:
178 178 raise error.UnsupportedBundleSpecification(
179 179 _(b'%s compression is not supported') % compression
180 180 )
181 181
182 182 version, params = parseparams(version)
183 183
184 184 if version not in _bundlespeccgversions:
185 185 raise error.UnsupportedBundleSpecification(
186 186 _(b'%s is not a recognized bundle version') % version
187 187 )
188 188 else:
189 189 # Value could be just the compression or just the version, in which
190 190 # case some defaults are assumed (but only when not in strict mode).
191 191 assert not strict
192 192
193 193 spec, params = parseparams(spec)
194 194
195 195 if spec in util.compengines.supportedbundlenames:
196 196 compression = spec
197 197 version = b'v1'
198 198 # Generaldelta repos require v2.
199 199 if b'generaldelta' in repo.requirements:
200 200 version = b'v2'
201 201 # Modern compression engines require v2.
202 202 if compression not in _bundlespecv1compengines:
203 203 version = b'v2'
204 204 elif spec in _bundlespeccgversions:
205 205 if spec == b'packed1':
206 206 compression = b'none'
207 207 else:
208 208 compression = b'bzip2'
209 209 version = spec
210 210 else:
211 211 raise error.UnsupportedBundleSpecification(
212 212 _(b'%s is not a recognized bundle specification') % spec
213 213 )
214 214
215 215 # Bundle version 1 only supports a known set of compression engines.
216 216 if version == b'v1' and compression not in _bundlespecv1compengines:
217 217 raise error.UnsupportedBundleSpecification(
218 218 _(b'compression engine %s is not supported on v1 bundles')
219 219 % compression
220 220 )
221 221
222 222 # The specification for packed1 can optionally declare the data formats
223 223 # required to apply it. If we see this metadata, compare against what the
224 224 # repo supports and error if the bundle isn't compatible.
225 225 if version == b'packed1' and b'requirements' in params:
226 226 requirements = set(params[b'requirements'].split(b','))
227 227 missingreqs = requirements - repo.supportedformats
228 228 if missingreqs:
229 229 raise error.UnsupportedBundleSpecification(
230 230 _(b'missing support for repository features: %s')
231 231 % b', '.join(sorted(missingreqs))
232 232 )
233 233
234 234 # Compute contentopts based on the version
235 235 contentopts = _bundlespeccontentopts.get(version, {}).copy()
236 236
237 237 # Process the variants
238 238 if b"stream" in params and params[b"stream"] == b"v2":
239 239 variant = _bundlespecvariants[b"streamv2"]
240 240 contentopts.update(variant)
241 241
242 242 engine = util.compengines.forbundlename(compression)
243 243 compression, wirecompression = engine.bundletype()
244 244 wireversion = _bundlespeccgversions[version]
245 245
246 246 return bundlespec(
247 247 compression, wirecompression, version, wireversion, params, contentopts
248 248 )
249 249
250 250
251 251 def readbundle(ui, fh, fname, vfs=None):
252 252 header = changegroup.readexactly(fh, 4)
253 253
254 254 alg = None
255 255 if not fname:
256 256 fname = b"stream"
257 257 if not header.startswith(b'HG') and header.startswith(b'\0'):
258 258 fh = changegroup.headerlessfixup(fh, header)
259 259 header = b"HG10"
260 260 alg = b'UN'
261 261 elif vfs:
262 262 fname = vfs.join(fname)
263 263
264 264 magic, version = header[0:2], header[2:4]
265 265
266 266 if magic != b'HG':
267 267 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
268 268 if version == b'10':
269 269 if alg is None:
270 270 alg = changegroup.readexactly(fh, 2)
271 271 return changegroup.cg1unpacker(fh, alg)
272 272 elif version.startswith(b'2'):
273 273 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
274 274 elif version == b'S1':
275 275 return streamclone.streamcloneapplier(fh)
276 276 else:
277 277 raise error.Abort(
278 278 _(b'%s: unknown bundle version %s') % (fname, version)
279 279 )
280 280
281 281
282 282 def getbundlespec(ui, fh):
283 283 """Infer the bundlespec from a bundle file handle.
284 284
285 285 The input file handle is seeked and the original seek position is not
286 286 restored.
287 287 """
288 288
289 289 def speccompression(alg):
290 290 try:
291 291 return util.compengines.forbundletype(alg).bundletype()[0]
292 292 except KeyError:
293 293 return None
294 294
295 295 b = readbundle(ui, fh, None)
296 296 if isinstance(b, changegroup.cg1unpacker):
297 297 alg = b._type
298 298 if alg == b'_truncatedBZ':
299 299 alg = b'BZ'
300 300 comp = speccompression(alg)
301 301 if not comp:
302 302 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
303 303 return b'%s-v1' % comp
304 304 elif isinstance(b, bundle2.unbundle20):
305 305 if b'Compression' in b.params:
306 306 comp = speccompression(b.params[b'Compression'])
307 307 if not comp:
308 308 raise error.Abort(
309 309 _(b'unknown compression algorithm: %s') % comp
310 310 )
311 311 else:
312 312 comp = b'none'
313 313
314 314 version = None
315 315 for part in b.iterparts():
316 316 if part.type == b'changegroup':
317 317 version = part.params[b'version']
318 318 if version in (b'01', b'02'):
319 319 version = b'v2'
320 320 else:
321 321 raise error.Abort(
322 322 _(
323 323 b'changegroup version %s does not have '
324 324 b'a known bundlespec'
325 325 )
326 326 % version,
327 327 hint=_(b'try upgrading your Mercurial client'),
328 328 )
329 329 elif part.type == b'stream2' and version is None:
330 330 # A stream2 part requires to be part of a v2 bundle
331 331 requirements = urlreq.unquote(part.params[b'requirements'])
332 332 splitted = requirements.split()
333 333 params = bundle2._formatrequirementsparams(splitted)
334 334 return b'none-v2;stream=v2;%s' % params
335 335
336 336 if not version:
337 337 raise error.Abort(
338 338 _(b'could not identify changegroup version in bundle')
339 339 )
340 340
341 341 return b'%s-%s' % (comp, version)
342 342 elif isinstance(b, streamclone.streamcloneapplier):
343 343 requirements = streamclone.readbundle1header(fh)[2]
344 344 formatted = bundle2._formatrequirementsparams(requirements)
345 345 return b'none-packed1;%s' % formatted
346 346 else:
347 347 raise error.Abort(_(b'unknown bundle type: %s') % b)
348 348
349 349
350 350 def _computeoutgoing(repo, heads, common):
351 351 """Computes which revs are outgoing given a set of common
352 352 and a set of heads.
353 353
354 354 This is a separate function so extensions can have access to
355 355 the logic.
356 356
357 357 Returns a discovery.outgoing object.
358 358 """
359 359 cl = repo.changelog
360 360 if common:
361 361 hasnode = cl.hasnode
362 362 common = [n for n in common if hasnode(n)]
363 363 else:
364 364 common = [nullid]
365 365 if not heads:
366 366 heads = cl.heads()
367 367 return discovery.outgoing(repo, common, heads)
368 368
369 369
370 370 def _checkpublish(pushop):
371 371 repo = pushop.repo
372 372 ui = repo.ui
373 373 behavior = ui.config(b'experimental', b'auto-publish')
374 374 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
375 375 return
376 376 remotephases = listkeys(pushop.remote, b'phases')
377 377 if not remotephases.get(b'publishing', False):
378 378 return
379 379
380 380 if pushop.revs is None:
381 381 published = repo.filtered(b'served').revs(b'not public()')
382 382 else:
383 383 published = repo.revs(b'::%ln - public()', pushop.revs)
384 384 if published:
385 385 if behavior == b'warn':
386 386 ui.warn(
387 387 _(b'%i changesets about to be published\n') % len(published)
388 388 )
389 389 elif behavior == b'confirm':
390 390 if ui.promptchoice(
391 391 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
392 392 % len(published)
393 393 ):
394 394 raise error.Abort(_(b'user quit'))
395 395 elif behavior == b'abort':
396 396 msg = _(b'push would publish %i changesets') % len(published)
397 397 hint = _(
398 398 b"use --publish or adjust 'experimental.auto-publish'"
399 399 b" config"
400 400 )
401 401 raise error.Abort(msg, hint=hint)
402 402
403 403
404 404 def _forcebundle1(op):
405 405 """return true if a pull/push must use bundle1
406 406
407 407 This function is used to allow testing of the older bundle version"""
408 408 ui = op.repo.ui
409 409 # The goal is this config is to allow developer to choose the bundle
410 410 # version used during exchanged. This is especially handy during test.
411 411 # Value is a list of bundle version to be picked from, highest version
412 412 # should be used.
413 413 #
414 414 # developer config: devel.legacy.exchange
415 415 exchange = ui.configlist(b'devel', b'legacy.exchange')
416 416 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
417 417 return forcebundle1 or not op.remote.capable(b'bundle2')
418 418
419 419
420 420 class pushoperation(object):
421 421 """A object that represent a single push operation
422 422
423 423 Its purpose is to carry push related state and very common operations.
424 424
425 425 A new pushoperation should be created at the beginning of each push and
426 426 discarded afterward.
427 427 """
428 428
429 429 def __init__(
430 430 self,
431 431 repo,
432 432 remote,
433 433 force=False,
434 434 revs=None,
435 435 newbranch=False,
436 436 bookmarks=(),
437 437 publish=False,
438 438 pushvars=None,
439 439 ):
440 440 # repo we push from
441 441 self.repo = repo
442 442 self.ui = repo.ui
443 443 # repo we push to
444 444 self.remote = remote
445 445 # force option provided
446 446 self.force = force
447 447 # revs to be pushed (None is "all")
448 448 self.revs = revs
449 449 # bookmark explicitly pushed
450 450 self.bookmarks = bookmarks
451 451 # allow push of new branch
452 452 self.newbranch = newbranch
453 453 # step already performed
454 454 # (used to check what steps have been already performed through bundle2)
455 455 self.stepsdone = set()
456 456 # Integer version of the changegroup push result
457 457 # - None means nothing to push
458 458 # - 0 means HTTP error
459 459 # - 1 means we pushed and remote head count is unchanged *or*
460 460 # we have outgoing changesets but refused to push
461 461 # - other values as described by addchangegroup()
462 462 self.cgresult = None
463 463 # Boolean value for the bookmark push
464 464 self.bkresult = None
465 465 # discover.outgoing object (contains common and outgoing data)
466 466 self.outgoing = None
467 467 # all remote topological heads before the push
468 468 self.remoteheads = None
469 469 # Details of the remote branch pre and post push
470 470 #
471 471 # mapping: {'branch': ([remoteheads],
472 472 # [newheads],
473 473 # [unsyncedheads],
474 474 # [discardedheads])}
475 475 # - branch: the branch name
476 476 # - remoteheads: the list of remote heads known locally
477 477 # None if the branch is new
478 478 # - newheads: the new remote heads (known locally) with outgoing pushed
479 479 # - unsyncedheads: the list of remote heads unknown locally.
480 480 # - discardedheads: the list of remote heads made obsolete by the push
481 481 self.pushbranchmap = None
482 482 # testable as a boolean indicating if any nodes are missing locally.
483 483 self.incoming = None
484 484 # summary of the remote phase situation
485 485 self.remotephases = None
486 486 # phases changes that must be pushed along side the changesets
487 487 self.outdatedphases = None
488 488 # phases changes that must be pushed if changeset push fails
489 489 self.fallbackoutdatedphases = None
490 490 # outgoing obsmarkers
491 491 self.outobsmarkers = set()
492 492 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
493 493 self.outbookmarks = []
494 494 # transaction manager
495 495 self.trmanager = None
496 496 # map { pushkey partid -> callback handling failure}
497 497 # used to handle exception from mandatory pushkey part failure
498 498 self.pkfailcb = {}
499 499 # an iterable of pushvars or None
500 500 self.pushvars = pushvars
501 501 # publish pushed changesets
502 502 self.publish = publish
503 503
504 504 @util.propertycache
505 505 def futureheads(self):
506 506 """future remote heads if the changeset push succeeds"""
507 507 return self.outgoing.missingheads
508 508
509 509 @util.propertycache
510 510 def fallbackheads(self):
511 511 """future remote heads if the changeset push fails"""
512 512 if self.revs is None:
513 513 # not target to push, all common are relevant
514 514 return self.outgoing.commonheads
515 515 unfi = self.repo.unfiltered()
516 516 # I want cheads = heads(::missingheads and ::commonheads)
517 517 # (missingheads is revs with secret changeset filtered out)
518 518 #
519 519 # This can be expressed as:
520 520 # cheads = ( (missingheads and ::commonheads)
521 521 # + (commonheads and ::missingheads))"
522 522 # )
523 523 #
524 524 # while trying to push we already computed the following:
525 525 # common = (::commonheads)
526 526 # missing = ((commonheads::missingheads) - commonheads)
527 527 #
528 528 # We can pick:
529 529 # * missingheads part of common (::commonheads)
530 530 common = self.outgoing.common
531 531 rev = self.repo.changelog.index.rev
532 532 cheads = [node for node in self.revs if rev(node) in common]
533 533 # and
534 534 # * commonheads parents on missing
535 535 revset = unfi.set(
536 536 b'%ln and parents(roots(%ln))',
537 537 self.outgoing.commonheads,
538 538 self.outgoing.missing,
539 539 )
540 540 cheads.extend(c.node() for c in revset)
541 541 return cheads
542 542
543 543 @property
544 544 def commonheads(self):
545 545 """set of all common heads after changeset bundle push"""
546 546 if self.cgresult:
547 547 return self.futureheads
548 548 else:
549 549 return self.fallbackheads
550 550
551 551
552 552 # mapping of message used when pushing bookmark
553 553 bookmsgmap = {
554 554 b'update': (
555 555 _(b"updating bookmark %s\n"),
556 556 _(b'updating bookmark %s failed!\n'),
557 557 ),
558 558 b'export': (
559 559 _(b"exporting bookmark %s\n"),
560 560 _(b'exporting bookmark %s failed!\n'),
561 561 ),
562 562 b'delete': (
563 563 _(b"deleting remote bookmark %s\n"),
564 564 _(b'deleting remote bookmark %s failed!\n'),
565 565 ),
566 566 }
567 567
568 568
569 569 def push(
570 570 repo,
571 571 remote,
572 572 force=False,
573 573 revs=None,
574 574 newbranch=False,
575 575 bookmarks=(),
576 576 publish=False,
577 577 opargs=None,
578 578 ):
579 579 '''Push outgoing changesets (limited by revs) from a local
580 580 repository to remote. Return an integer:
581 581 - None means nothing to push
582 582 - 0 means HTTP error
583 583 - 1 means we pushed and remote head count is unchanged *or*
584 584 we have outgoing changesets but refused to push
585 585 - other values as described by addchangegroup()
586 586 '''
587 587 if opargs is None:
588 588 opargs = {}
589 589 pushop = pushoperation(
590 590 repo,
591 591 remote,
592 592 force,
593 593 revs,
594 594 newbranch,
595 595 bookmarks,
596 596 publish,
597 597 **pycompat.strkwargs(opargs)
598 598 )
599 599 if pushop.remote.local():
600 600 missing = (
601 601 set(pushop.repo.requirements) - pushop.remote.local().supported
602 602 )
603 603 if missing:
604 604 msg = _(
605 605 b"required features are not"
606 606 b" supported in the destination:"
607 607 b" %s"
608 608 ) % (b', '.join(sorted(missing)))
609 609 raise error.Abort(msg)
610 610
611 611 if not pushop.remote.canpush():
612 612 raise error.Abort(_(b"destination does not support push"))
613 613
614 614 if not pushop.remote.capable(b'unbundle'):
615 615 raise error.Abort(
616 616 _(
617 617 b'cannot push: destination does not support the '
618 618 b'unbundle wire protocol command'
619 619 )
620 620 )
621 621
622 622 # get lock as we might write phase data
623 623 wlock = lock = None
624 624 try:
625 625 # bundle2 push may receive a reply bundle touching bookmarks
626 626 # requiring the wlock. Take it now to ensure proper ordering.
627 627 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
628 628 if (
629 629 (not _forcebundle1(pushop))
630 630 and maypushback
631 631 and not bookmod.bookmarksinstore(repo)
632 632 ):
633 633 wlock = pushop.repo.wlock()
634 634 lock = pushop.repo.lock()
635 635 pushop.trmanager = transactionmanager(
636 636 pushop.repo, b'push-response', pushop.remote.url()
637 637 )
638 638 except error.LockUnavailable as err:
639 639 # source repo cannot be locked.
640 640 # We do not abort the push, but just disable the local phase
641 641 # synchronisation.
642 642 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
643 643 err
644 644 )
645 645 pushop.ui.debug(msg)
646 646
647 647 with wlock or util.nullcontextmanager():
648 648 with lock or util.nullcontextmanager():
649 649 with pushop.trmanager or util.nullcontextmanager():
650 650 pushop.repo.checkpush(pushop)
651 651 _checkpublish(pushop)
652 652 _pushdiscovery(pushop)
653 653 if not _forcebundle1(pushop):
654 654 _pushbundle2(pushop)
655 655 _pushchangeset(pushop)
656 656 _pushsyncphase(pushop)
657 657 _pushobsolete(pushop)
658 658 _pushbookmark(pushop)
659 659
660 660 if repo.ui.configbool(b'experimental', b'remotenames'):
661 661 logexchange.pullremotenames(repo, remote)
662 662
663 663 return pushop
664 664
665 665
666 666 # list of steps to perform discovery before push
667 667 pushdiscoveryorder = []
668 668
669 669 # Mapping between step name and function
670 670 #
671 671 # This exists to help extensions wrap steps if necessary
672 672 pushdiscoverymapping = {}
673 673
674 674
675 675 def pushdiscovery(stepname):
676 676 """decorator for function performing discovery before push
677 677
678 678 The function is added to the step -> function mapping and appended to the
679 679 list of steps. Beware that decorated function will be added in order (this
680 680 may matter).
681 681
682 682 You can only use this decorator for a new step, if you want to wrap a step
683 683 from an extension, change the pushdiscovery dictionary directly."""
684 684
685 685 def dec(func):
686 686 assert stepname not in pushdiscoverymapping
687 687 pushdiscoverymapping[stepname] = func
688 688 pushdiscoveryorder.append(stepname)
689 689 return func
690 690
691 691 return dec
692 692
693 693
694 694 def _pushdiscovery(pushop):
695 695 """Run all discovery steps"""
696 696 for stepname in pushdiscoveryorder:
697 697 step = pushdiscoverymapping[stepname]
698 698 step(pushop)
699 699
700 700
701 701 @pushdiscovery(b'changeset')
702 702 def _pushdiscoverychangeset(pushop):
703 703 """discover the changeset that need to be pushed"""
704 704 fci = discovery.findcommonincoming
705 705 if pushop.revs:
706 706 commoninc = fci(
707 707 pushop.repo,
708 708 pushop.remote,
709 709 force=pushop.force,
710 710 ancestorsof=pushop.revs,
711 711 )
712 712 else:
713 713 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
714 714 common, inc, remoteheads = commoninc
715 715 fco = discovery.findcommonoutgoing
716 716 outgoing = fco(
717 717 pushop.repo,
718 718 pushop.remote,
719 719 onlyheads=pushop.revs,
720 720 commoninc=commoninc,
721 721 force=pushop.force,
722 722 )
723 723 pushop.outgoing = outgoing
724 724 pushop.remoteheads = remoteheads
725 725 pushop.incoming = inc
726 726
727 727
728 728 @pushdiscovery(b'phase')
729 729 def _pushdiscoveryphase(pushop):
730 730 """discover the phase that needs to be pushed
731 731
732 732 (computed for both success and failure case for changesets push)"""
733 733 outgoing = pushop.outgoing
734 734 unfi = pushop.repo.unfiltered()
735 735 remotephases = listkeys(pushop.remote, b'phases')
736 736
737 737 if (
738 738 pushop.ui.configbool(b'ui', b'_usedassubrepo')
739 739 and remotephases # server supports phases
740 740 and not pushop.outgoing.missing # no changesets to be pushed
741 741 and remotephases.get(b'publishing', False)
742 742 ):
743 743 # When:
744 744 # - this is a subrepo push
745 745 # - and remote support phase
746 746 # - and no changeset are to be pushed
747 747 # - and remote is publishing
748 748 # We may be in issue 3781 case!
749 749 # We drop the possible phase synchronisation done by
750 750 # courtesy to publish changesets possibly locally draft
751 751 # on the remote.
752 752 pushop.outdatedphases = []
753 753 pushop.fallbackoutdatedphases = []
754 754 return
755 755
756 756 pushop.remotephases = phases.remotephasessummary(
757 757 pushop.repo, pushop.fallbackheads, remotephases
758 758 )
759 759 droots = pushop.remotephases.draftroots
760 760
761 761 extracond = b''
762 762 if not pushop.remotephases.publishing:
763 763 extracond = b' and public()'
764 764 revset = b'heads((%%ln::%%ln) %s)' % extracond
765 765 # Get the list of all revs draft on remote by public here.
766 766 # XXX Beware that revset break if droots is not strictly
767 767 # XXX root we may want to ensure it is but it is costly
768 768 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
769 769 if not pushop.remotephases.publishing and pushop.publish:
770 770 future = list(
771 771 unfi.set(
772 772 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
773 773 )
774 774 )
775 775 elif not outgoing.missing:
776 776 future = fallback
777 777 else:
778 778 # adds changeset we are going to push as draft
779 779 #
780 780 # should not be necessary for publishing server, but because of an
781 781 # issue fixed in xxxxx we have to do it anyway.
782 782 fdroots = list(
783 783 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
784 784 )
785 785 fdroots = [f.node() for f in fdroots]
786 786 future = list(unfi.set(revset, fdroots, pushop.futureheads))
787 787 pushop.outdatedphases = future
788 788 pushop.fallbackoutdatedphases = fallback
789 789
790 790
791 791 @pushdiscovery(b'obsmarker')
792 792 def _pushdiscoveryobsmarkers(pushop):
793 793 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
794 794 return
795 795
796 796 if not pushop.repo.obsstore:
797 797 return
798 798
799 799 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
800 800 return
801 801
802 802 repo = pushop.repo
803 803 # very naive computation, that can be quite expensive on big repo.
804 804 # However: evolution is currently slow on them anyway.
805 805 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
806 806 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
807 807
808 808
809 809 @pushdiscovery(b'bookmarks')
810 810 def _pushdiscoverybookmarks(pushop):
811 811 ui = pushop.ui
812 812 repo = pushop.repo.unfiltered()
813 813 remote = pushop.remote
814 814 ui.debug(b"checking for updated bookmarks\n")
815 815 ancestors = ()
816 816 if pushop.revs:
817 817 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
818 818 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
819 819
820 820 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
821 821
822 822 explicit = {
823 823 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
824 824 }
825 825
826 826 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
827 827 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
828 828
829 829
830 830 def _processcompared(pushop, pushed, explicit, remotebms, comp):
831 831 """take decision on bookmarks to push to the remote repo
832 832
833 833 Exists to help extensions alter this behavior.
834 834 """
835 835 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
836 836
837 837 repo = pushop.repo
838 838
839 839 for b, scid, dcid in advsrc:
840 840 if b in explicit:
841 841 explicit.remove(b)
842 842 if not pushed or repo[scid].rev() in pushed:
843 843 pushop.outbookmarks.append((b, dcid, scid))
844 844 # search added bookmark
845 845 for b, scid, dcid in addsrc:
846 846 if b in explicit:
847 847 explicit.remove(b)
848 848 pushop.outbookmarks.append((b, b'', scid))
849 849 # search for overwritten bookmark
850 850 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
851 851 if b in explicit:
852 852 explicit.remove(b)
853 853 pushop.outbookmarks.append((b, dcid, scid))
854 854 # search for bookmark to delete
855 855 for b, scid, dcid in adddst:
856 856 if b in explicit:
857 857 explicit.remove(b)
858 858 # treat as "deleted locally"
859 859 pushop.outbookmarks.append((b, dcid, b''))
860 860 # identical bookmarks shouldn't get reported
861 861 for b, scid, dcid in same:
862 862 if b in explicit:
863 863 explicit.remove(b)
864 864
865 865 if explicit:
866 866 explicit = sorted(explicit)
867 867 # we should probably list all of them
868 868 pushop.ui.warn(
869 869 _(
870 870 b'bookmark %s does not exist on the local '
871 871 b'or remote repository!\n'
872 872 )
873 873 % explicit[0]
874 874 )
875 875 pushop.bkresult = 2
876 876
877 877 pushop.outbookmarks.sort()
878 878
879 879
880 880 def _pushcheckoutgoing(pushop):
881 881 outgoing = pushop.outgoing
882 882 unfi = pushop.repo.unfiltered()
883 883 if not outgoing.missing:
884 884 # nothing to push
885 885 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
886 886 return False
887 887 # something to push
888 888 if not pushop.force:
889 889 # if repo.obsstore == False --> no obsolete
890 890 # then, save the iteration
891 891 if unfi.obsstore:
892 892 # this message are here for 80 char limit reason
893 893 mso = _(b"push includes obsolete changeset: %s!")
894 894 mspd = _(b"push includes phase-divergent changeset: %s!")
895 895 mscd = _(b"push includes content-divergent changeset: %s!")
896 896 mst = {
897 897 b"orphan": _(b"push includes orphan changeset: %s!"),
898 898 b"phase-divergent": mspd,
899 899 b"content-divergent": mscd,
900 900 }
901 901 # If we are to push if there is at least one
902 902 # obsolete or unstable changeset in missing, at
903 903 # least one of the missinghead will be obsolete or
904 904 # unstable. So checking heads only is ok
905 905 for node in outgoing.missingheads:
906 906 ctx = unfi[node]
907 907 if ctx.obsolete():
908 908 raise error.Abort(mso % ctx)
909 909 elif ctx.isunstable():
910 910 # TODO print more than one instability in the abort
911 911 # message
912 912 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
913 913
914 914 discovery.checkheads(pushop)
915 915 return True
916 916
917 917
918 918 # List of names of steps to perform for an outgoing bundle2, order matters.
919 919 b2partsgenorder = []
920 920
921 921 # Mapping between step name and function
922 922 #
923 923 # This exists to help extensions wrap steps if necessary
924 924 b2partsgenmapping = {}
925 925
926 926
927 927 def b2partsgenerator(stepname, idx=None):
928 928 """decorator for function generating bundle2 part
929 929
930 930 The function is added to the step -> function mapping and appended to the
931 931 list of steps. Beware that decorated functions will be added in order
932 932 (this may matter).
933 933
934 934 You can only use this decorator for new steps, if you want to wrap a step
935 935 from an extension, attack the b2partsgenmapping dictionary directly."""
936 936
937 937 def dec(func):
938 938 assert stepname not in b2partsgenmapping
939 939 b2partsgenmapping[stepname] = func
940 940 if idx is None:
941 941 b2partsgenorder.append(stepname)
942 942 else:
943 943 b2partsgenorder.insert(idx, stepname)
944 944 return func
945 945
946 946 return dec
947 947
948 948
949 949 def _pushb2ctxcheckheads(pushop, bundler):
950 950 """Generate race condition checking parts
951 951
952 952 Exists as an independent function to aid extensions
953 953 """
954 954 # * 'force' do not check for push race,
955 955 # * if we don't push anything, there are nothing to check.
956 956 if not pushop.force and pushop.outgoing.missingheads:
957 957 allowunrelated = b'related' in bundler.capabilities.get(
958 958 b'checkheads', ()
959 959 )
960 960 emptyremote = pushop.pushbranchmap is None
961 961 if not allowunrelated or emptyremote:
962 962 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
963 963 else:
964 964 affected = set()
965 965 for branch, heads in pycompat.iteritems(pushop.pushbranchmap):
966 966 remoteheads, newheads, unsyncedheads, discardedheads = heads
967 967 if remoteheads is not None:
968 968 remote = set(remoteheads)
969 969 affected |= set(discardedheads) & remote
970 970 affected |= remote - set(newheads)
971 971 if affected:
972 972 data = iter(sorted(affected))
973 973 bundler.newpart(b'check:updated-heads', data=data)
974 974
975 975
976 976 def _pushing(pushop):
977 977 """return True if we are pushing anything"""
978 978 return bool(
979 979 pushop.outgoing.missing
980 980 or pushop.outdatedphases
981 981 or pushop.outobsmarkers
982 982 or pushop.outbookmarks
983 983 )
984 984
985 985
986 986 @b2partsgenerator(b'check-bookmarks')
987 987 def _pushb2checkbookmarks(pushop, bundler):
988 988 """insert bookmark move checking"""
989 989 if not _pushing(pushop) or pushop.force:
990 990 return
991 991 b2caps = bundle2.bundle2caps(pushop.remote)
992 992 hasbookmarkcheck = b'bookmarks' in b2caps
993 993 if not (pushop.outbookmarks and hasbookmarkcheck):
994 994 return
995 995 data = []
996 996 for book, old, new in pushop.outbookmarks:
997 997 data.append((book, old))
998 998 checkdata = bookmod.binaryencode(data)
999 999 bundler.newpart(b'check:bookmarks', data=checkdata)
1000 1000
1001 1001
1002 1002 @b2partsgenerator(b'check-phases')
1003 1003 def _pushb2checkphases(pushop, bundler):
1004 1004 """insert phase move checking"""
1005 1005 if not _pushing(pushop) or pushop.force:
1006 1006 return
1007 1007 b2caps = bundle2.bundle2caps(pushop.remote)
1008 1008 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1009 1009 if pushop.remotephases is not None and hasphaseheads:
1010 1010 # check that the remote phase has not changed
1011 1011 checks = [[] for p in phases.allphases]
1012 1012 checks[phases.public].extend(pushop.remotephases.publicheads)
1013 1013 checks[phases.draft].extend(pushop.remotephases.draftroots)
1014 1014 if any(checks):
1015 1015 for nodes in checks:
1016 1016 nodes.sort()
1017 1017 checkdata = phases.binaryencode(checks)
1018 1018 bundler.newpart(b'check:phases', data=checkdata)
1019 1019
1020 1020
1021 1021 @b2partsgenerator(b'changeset')
1022 1022 def _pushb2ctx(pushop, bundler):
1023 1023 """handle changegroup push through bundle2
1024 1024
1025 1025 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
1026 1026 """
1027 1027 if b'changesets' in pushop.stepsdone:
1028 1028 return
1029 1029 pushop.stepsdone.add(b'changesets')
1030 1030 # Send known heads to the server for race detection.
1031 1031 if not _pushcheckoutgoing(pushop):
1032 1032 return
1033 1033 pushop.repo.prepushoutgoinghooks(pushop)
1034 1034
1035 1035 _pushb2ctxcheckheads(pushop, bundler)
1036 1036
1037 1037 b2caps = bundle2.bundle2caps(pushop.remote)
1038 1038 version = b'01'
1039 1039 cgversions = b2caps.get(b'changegroup')
1040 1040 if cgversions: # 3.1 and 3.2 ship with an empty value
1041 1041 cgversions = [
1042 1042 v
1043 1043 for v in cgversions
1044 1044 if v in changegroup.supportedoutgoingversions(pushop.repo)
1045 1045 ]
1046 1046 if not cgversions:
1047 1047 raise error.Abort(_(b'no common changegroup version'))
1048 1048 version = max(cgversions)
1049 1049 cgstream = changegroup.makestream(
1050 1050 pushop.repo, pushop.outgoing, version, b'push'
1051 1051 )
1052 1052 cgpart = bundler.newpart(b'changegroup', data=cgstream)
1053 1053 if cgversions:
1054 1054 cgpart.addparam(b'version', version)
1055 1055 if b'treemanifest' in pushop.repo.requirements:
1056 1056 cgpart.addparam(b'treemanifest', b'1')
1057 1057 if b'exp-sidedata-flag' in pushop.repo.requirements:
1058 1058 cgpart.addparam(b'exp-sidedata', b'1')
1059 1059
1060 1060 def handlereply(op):
1061 1061 """extract addchangegroup returns from server reply"""
1062 1062 cgreplies = op.records.getreplies(cgpart.id)
1063 1063 assert len(cgreplies[b'changegroup']) == 1
1064 1064 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
1065 1065
1066 1066 return handlereply
1067 1067
1068 1068
1069 1069 @b2partsgenerator(b'phase')
1070 1070 def _pushb2phases(pushop, bundler):
1071 1071 """handle phase push through bundle2"""
1072 1072 if b'phases' in pushop.stepsdone:
1073 1073 return
1074 1074 b2caps = bundle2.bundle2caps(pushop.remote)
1075 1075 ui = pushop.repo.ui
1076 1076
1077 1077 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1078 1078 haspushkey = b'pushkey' in b2caps
1079 1079 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1080 1080
1081 1081 if hasphaseheads and not legacyphase:
1082 1082 return _pushb2phaseheads(pushop, bundler)
1083 1083 elif haspushkey:
1084 1084 return _pushb2phasespushkey(pushop, bundler)
1085 1085
1086 1086
1087 1087 def _pushb2phaseheads(pushop, bundler):
1088 1088 """push phase information through a bundle2 - binary part"""
1089 1089 pushop.stepsdone.add(b'phases')
1090 1090 if pushop.outdatedphases:
1091 1091 updates = [[] for p in phases.allphases]
1092 1092 updates[0].extend(h.node() for h in pushop.outdatedphases)
1093 1093 phasedata = phases.binaryencode(updates)
1094 1094 bundler.newpart(b'phase-heads', data=phasedata)
1095 1095
1096 1096
1097 1097 def _pushb2phasespushkey(pushop, bundler):
1098 1098 """push phase information through a bundle2 - pushkey part"""
1099 1099 pushop.stepsdone.add(b'phases')
1100 1100 part2node = []
1101 1101
1102 1102 def handlefailure(pushop, exc):
1103 1103 targetid = int(exc.partid)
1104 1104 for partid, node in part2node:
1105 1105 if partid == targetid:
1106 1106 raise error.Abort(_(b'updating %s to public failed') % node)
1107 1107
1108 1108 enc = pushkey.encode
1109 1109 for newremotehead in pushop.outdatedphases:
1110 1110 part = bundler.newpart(b'pushkey')
1111 1111 part.addparam(b'namespace', enc(b'phases'))
1112 1112 part.addparam(b'key', enc(newremotehead.hex()))
1113 1113 part.addparam(b'old', enc(b'%d' % phases.draft))
1114 1114 part.addparam(b'new', enc(b'%d' % phases.public))
1115 1115 part2node.append((part.id, newremotehead))
1116 1116 pushop.pkfailcb[part.id] = handlefailure
1117 1117
1118 1118 def handlereply(op):
1119 1119 for partid, node in part2node:
1120 1120 partrep = op.records.getreplies(partid)
1121 1121 results = partrep[b'pushkey']
1122 1122 assert len(results) <= 1
1123 1123 msg = None
1124 1124 if not results:
1125 1125 msg = _(b'server ignored update of %s to public!\n') % node
1126 1126 elif not int(results[0][b'return']):
1127 1127 msg = _(b'updating %s to public failed!\n') % node
1128 1128 if msg is not None:
1129 1129 pushop.ui.warn(msg)
1130 1130
1131 1131 return handlereply
1132 1132
1133 1133
1134 1134 @b2partsgenerator(b'obsmarkers')
1135 1135 def _pushb2obsmarkers(pushop, bundler):
1136 1136 if b'obsmarkers' in pushop.stepsdone:
1137 1137 return
1138 1138 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1139 1139 if obsolete.commonversion(remoteversions) is None:
1140 1140 return
1141 1141 pushop.stepsdone.add(b'obsmarkers')
1142 1142 if pushop.outobsmarkers:
1143 1143 markers = _sortedmarkers(pushop.outobsmarkers)
1144 1144 bundle2.buildobsmarkerspart(bundler, markers)
1145 1145
1146 1146
1147 1147 @b2partsgenerator(b'bookmarks')
1148 1148 def _pushb2bookmarks(pushop, bundler):
1149 1149 """handle bookmark push through bundle2"""
1150 1150 if b'bookmarks' in pushop.stepsdone:
1151 1151 return
1152 1152 b2caps = bundle2.bundle2caps(pushop.remote)
1153 1153
1154 1154 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1155 1155 legacybooks = b'bookmarks' in legacy
1156 1156
1157 1157 if not legacybooks and b'bookmarks' in b2caps:
1158 1158 return _pushb2bookmarkspart(pushop, bundler)
1159 1159 elif b'pushkey' in b2caps:
1160 1160 return _pushb2bookmarkspushkey(pushop, bundler)
1161 1161
1162 1162
1163 1163 def _bmaction(old, new):
1164 1164 """small utility for bookmark pushing"""
1165 1165 if not old:
1166 1166 return b'export'
1167 1167 elif not new:
1168 1168 return b'delete'
1169 1169 return b'update'
1170 1170
1171 1171
1172 1172 def _abortonsecretctx(pushop, node, b):
1173 1173 """abort if a given bookmark points to a secret changeset"""
1174 1174 if node and pushop.repo[node].phase() == phases.secret:
1175 1175 raise error.Abort(
1176 1176 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1177 1177 )
1178 1178
1179 1179
1180 1180 def _pushb2bookmarkspart(pushop, bundler):
1181 1181 pushop.stepsdone.add(b'bookmarks')
1182 1182 if not pushop.outbookmarks:
1183 1183 return
1184 1184
1185 1185 allactions = []
1186 1186 data = []
1187 1187 for book, old, new in pushop.outbookmarks:
1188 1188 _abortonsecretctx(pushop, new, book)
1189 1189 data.append((book, new))
1190 1190 allactions.append((book, _bmaction(old, new)))
1191 1191 checkdata = bookmod.binaryencode(data)
1192 1192 bundler.newpart(b'bookmarks', data=checkdata)
1193 1193
1194 1194 def handlereply(op):
1195 1195 ui = pushop.ui
1196 1196 # if success
1197 1197 for book, action in allactions:
1198 1198 ui.status(bookmsgmap[action][0] % book)
1199 1199
1200 1200 return handlereply
1201 1201
1202 1202
1203 1203 def _pushb2bookmarkspushkey(pushop, bundler):
1204 1204 pushop.stepsdone.add(b'bookmarks')
1205 1205 part2book = []
1206 1206 enc = pushkey.encode
1207 1207
1208 1208 def handlefailure(pushop, exc):
1209 1209 targetid = int(exc.partid)
1210 1210 for partid, book, action in part2book:
1211 1211 if partid == targetid:
1212 1212 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1213 1213 # we should not be called for part we did not generated
1214 1214 assert False
1215 1215
1216 1216 for book, old, new in pushop.outbookmarks:
1217 1217 _abortonsecretctx(pushop, new, book)
1218 1218 part = bundler.newpart(b'pushkey')
1219 1219 part.addparam(b'namespace', enc(b'bookmarks'))
1220 1220 part.addparam(b'key', enc(book))
1221 1221 part.addparam(b'old', enc(hex(old)))
1222 1222 part.addparam(b'new', enc(hex(new)))
1223 1223 action = b'update'
1224 1224 if not old:
1225 1225 action = b'export'
1226 1226 elif not new:
1227 1227 action = b'delete'
1228 1228 part2book.append((part.id, book, action))
1229 1229 pushop.pkfailcb[part.id] = handlefailure
1230 1230
1231 1231 def handlereply(op):
1232 1232 ui = pushop.ui
1233 1233 for partid, book, action in part2book:
1234 1234 partrep = op.records.getreplies(partid)
1235 1235 results = partrep[b'pushkey']
1236 1236 assert len(results) <= 1
1237 1237 if not results:
1238 1238 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1239 1239 else:
1240 1240 ret = int(results[0][b'return'])
1241 1241 if ret:
1242 1242 ui.status(bookmsgmap[action][0] % book)
1243 1243 else:
1244 1244 ui.warn(bookmsgmap[action][1] % book)
1245 1245 if pushop.bkresult is not None:
1246 1246 pushop.bkresult = 1
1247 1247
1248 1248 return handlereply
1249 1249
1250 1250
1251 1251 @b2partsgenerator(b'pushvars', idx=0)
1252 1252 def _getbundlesendvars(pushop, bundler):
1253 1253 '''send shellvars via bundle2'''
1254 1254 pushvars = pushop.pushvars
1255 1255 if pushvars:
1256 1256 shellvars = {}
1257 1257 for raw in pushvars:
1258 1258 if b'=' not in raw:
1259 1259 msg = (
1260 1260 b"unable to parse variable '%s', should follow "
1261 1261 b"'KEY=VALUE' or 'KEY=' format"
1262 1262 )
1263 1263 raise error.Abort(msg % raw)
1264 1264 k, v = raw.split(b'=', 1)
1265 1265 shellvars[k] = v
1266 1266
1267 1267 part = bundler.newpart(b'pushvars')
1268 1268
1269 1269 for key, value in pycompat.iteritems(shellvars):
1270 1270 part.addparam(key, value, mandatory=False)
1271 1271
1272 1272
1273 1273 def _pushbundle2(pushop):
1274 1274 """push data to the remote using bundle2
1275 1275
1276 1276 The only currently supported type of data is changegroup but this will
1277 1277 evolve in the future."""
1278 1278 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1279 1279 pushback = pushop.trmanager and pushop.ui.configbool(
1280 1280 b'experimental', b'bundle2.pushback'
1281 1281 )
1282 1282
1283 1283 # create reply capability
1284 1284 capsblob = bundle2.encodecaps(
1285 1285 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1286 1286 )
1287 1287 bundler.newpart(b'replycaps', data=capsblob)
1288 1288 replyhandlers = []
1289 1289 for partgenname in b2partsgenorder:
1290 1290 partgen = b2partsgenmapping[partgenname]
1291 1291 ret = partgen(pushop, bundler)
1292 1292 if callable(ret):
1293 1293 replyhandlers.append(ret)
1294 1294 # do not push if nothing to push
1295 1295 if bundler.nbparts <= 1:
1296 1296 return
1297 1297 stream = util.chunkbuffer(bundler.getchunks())
1298 1298 try:
1299 1299 try:
1300 1300 with pushop.remote.commandexecutor() as e:
1301 1301 reply = e.callcommand(
1302 1302 b'unbundle',
1303 1303 {
1304 1304 b'bundle': stream,
1305 1305 b'heads': [b'force'],
1306 1306 b'url': pushop.remote.url(),
1307 1307 },
1308 1308 ).result()
1309 1309 except error.BundleValueError as exc:
1310 1310 raise error.Abort(_(b'missing support for %s') % exc)
1311 1311 try:
1312 1312 trgetter = None
1313 1313 if pushback:
1314 1314 trgetter = pushop.trmanager.transaction
1315 1315 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1316 1316 except error.BundleValueError as exc:
1317 1317 raise error.Abort(_(b'missing support for %s') % exc)
1318 1318 except bundle2.AbortFromPart as exc:
1319 1319 pushop.ui.status(_(b'remote: %s\n') % exc)
1320 1320 if exc.hint is not None:
1321 1321 pushop.ui.status(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1322 1322 raise error.Abort(_(b'push failed on remote'))
1323 1323 except error.PushkeyFailed as exc:
1324 1324 partid = int(exc.partid)
1325 1325 if partid not in pushop.pkfailcb:
1326 1326 raise
1327 1327 pushop.pkfailcb[partid](pushop, exc)
1328 1328 for rephand in replyhandlers:
1329 1329 rephand(op)
1330 1330
1331 1331
1332 1332 def _pushchangeset(pushop):
1333 1333 """Make the actual push of changeset bundle to remote repo"""
1334 1334 if b'changesets' in pushop.stepsdone:
1335 1335 return
1336 1336 pushop.stepsdone.add(b'changesets')
1337 1337 if not _pushcheckoutgoing(pushop):
1338 1338 return
1339 1339
1340 1340 # Should have verified this in push().
1341 1341 assert pushop.remote.capable(b'unbundle')
1342 1342
1343 1343 pushop.repo.prepushoutgoinghooks(pushop)
1344 1344 outgoing = pushop.outgoing
1345 1345 # TODO: get bundlecaps from remote
1346 1346 bundlecaps = None
1347 1347 # create a changegroup from local
1348 1348 if pushop.revs is None and not (
1349 1349 outgoing.excluded or pushop.repo.changelog.filteredrevs
1350 1350 ):
1351 1351 # push everything,
1352 1352 # use the fast path, no race possible on push
1353 1353 cg = changegroup.makechangegroup(
1354 1354 pushop.repo,
1355 1355 outgoing,
1356 1356 b'01',
1357 1357 b'push',
1358 1358 fastpath=True,
1359 1359 bundlecaps=bundlecaps,
1360 1360 )
1361 1361 else:
1362 1362 cg = changegroup.makechangegroup(
1363 1363 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1364 1364 )
1365 1365
1366 1366 # apply changegroup to remote
1367 1367 # local repo finds heads on server, finds out what
1368 1368 # revs it must push. once revs transferred, if server
1369 1369 # finds it has different heads (someone else won
1370 1370 # commit/push race), server aborts.
1371 1371 if pushop.force:
1372 1372 remoteheads = [b'force']
1373 1373 else:
1374 1374 remoteheads = pushop.remoteheads
1375 1375 # ssh: return remote's addchangegroup()
1376 1376 # http: return remote's addchangegroup() or 0 for error
1377 1377 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1378 1378
1379 1379
1380 1380 def _pushsyncphase(pushop):
1381 1381 """synchronise phase information locally and remotely"""
1382 1382 cheads = pushop.commonheads
1383 1383 # even when we don't push, exchanging phase data is useful
1384 1384 remotephases = listkeys(pushop.remote, b'phases')
1385 1385 if (
1386 1386 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1387 1387 and remotephases # server supports phases
1388 1388 and pushop.cgresult is None # nothing was pushed
1389 1389 and remotephases.get(b'publishing', False)
1390 1390 ):
1391 1391 # When:
1392 1392 # - this is a subrepo push
1393 1393 # - and remote support phase
1394 1394 # - and no changeset was pushed
1395 1395 # - and remote is publishing
1396 1396 # We may be in issue 3871 case!
1397 1397 # We drop the possible phase synchronisation done by
1398 1398 # courtesy to publish changesets possibly locally draft
1399 1399 # on the remote.
1400 1400 remotephases = {b'publishing': b'True'}
1401 1401 if not remotephases: # old server or public only reply from non-publishing
1402 1402 _localphasemove(pushop, cheads)
1403 1403 # don't push any phase data as there is nothing to push
1404 1404 else:
1405 1405 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1406 1406 pheads, droots = ana
1407 1407 ### Apply remote phase on local
1408 1408 if remotephases.get(b'publishing', False):
1409 1409 _localphasemove(pushop, cheads)
1410 1410 else: # publish = False
1411 1411 _localphasemove(pushop, pheads)
1412 1412 _localphasemove(pushop, cheads, phases.draft)
1413 1413 ### Apply local phase on remote
1414 1414
1415 1415 if pushop.cgresult:
1416 1416 if b'phases' in pushop.stepsdone:
1417 1417 # phases already pushed though bundle2
1418 1418 return
1419 1419 outdated = pushop.outdatedphases
1420 1420 else:
1421 1421 outdated = pushop.fallbackoutdatedphases
1422 1422
1423 1423 pushop.stepsdone.add(b'phases')
1424 1424
1425 1425 # filter heads already turned public by the push
1426 1426 outdated = [c for c in outdated if c.node() not in pheads]
1427 1427 # fallback to independent pushkey command
1428 1428 for newremotehead in outdated:
1429 1429 with pushop.remote.commandexecutor() as e:
1430 1430 r = e.callcommand(
1431 1431 b'pushkey',
1432 1432 {
1433 1433 b'namespace': b'phases',
1434 1434 b'key': newremotehead.hex(),
1435 1435 b'old': b'%d' % phases.draft,
1436 1436 b'new': b'%d' % phases.public,
1437 1437 },
1438 1438 ).result()
1439 1439
1440 1440 if not r:
1441 1441 pushop.ui.warn(
1442 1442 _(b'updating %s to public failed!\n') % newremotehead
1443 1443 )
1444 1444
1445 1445
1446 1446 def _localphasemove(pushop, nodes, phase=phases.public):
1447 1447 """move <nodes> to <phase> in the local source repo"""
1448 1448 if pushop.trmanager:
1449 1449 phases.advanceboundary(
1450 1450 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1451 1451 )
1452 1452 else:
1453 1453 # repo is not locked, do not change any phases!
1454 1454 # Informs the user that phases should have been moved when
1455 1455 # applicable.
1456 1456 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1457 1457 phasestr = phases.phasenames[phase]
1458 1458 if actualmoves:
1459 1459 pushop.ui.status(
1460 1460 _(
1461 1461 b'cannot lock source repo, skipping '
1462 1462 b'local %s phase update\n'
1463 1463 )
1464 1464 % phasestr
1465 1465 )
1466 1466
1467 1467
1468 1468 def _pushobsolete(pushop):
1469 1469 """utility function to push obsolete markers to a remote"""
1470 1470 if b'obsmarkers' in pushop.stepsdone:
1471 1471 return
1472 1472 repo = pushop.repo
1473 1473 remote = pushop.remote
1474 1474 pushop.stepsdone.add(b'obsmarkers')
1475 1475 if pushop.outobsmarkers:
1476 1476 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1477 1477 rslts = []
1478 1478 markers = _sortedmarkers(pushop.outobsmarkers)
1479 1479 remotedata = obsolete._pushkeyescape(markers)
1480 1480 for key in sorted(remotedata, reverse=True):
1481 1481 # reverse sort to ensure we end with dump0
1482 1482 data = remotedata[key]
1483 1483 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1484 1484 if [r for r in rslts if not r]:
1485 1485 msg = _(b'failed to push some obsolete markers!\n')
1486 1486 repo.ui.warn(msg)
1487 1487
1488 1488
1489 1489 def _pushbookmark(pushop):
1490 1490 """Update bookmark position on remote"""
1491 1491 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1492 1492 return
1493 1493 pushop.stepsdone.add(b'bookmarks')
1494 1494 ui = pushop.ui
1495 1495 remote = pushop.remote
1496 1496
1497 1497 for b, old, new in pushop.outbookmarks:
1498 1498 action = b'update'
1499 1499 if not old:
1500 1500 action = b'export'
1501 1501 elif not new:
1502 1502 action = b'delete'
1503 1503
1504 1504 with remote.commandexecutor() as e:
1505 1505 r = e.callcommand(
1506 1506 b'pushkey',
1507 1507 {
1508 1508 b'namespace': b'bookmarks',
1509 1509 b'key': b,
1510 1510 b'old': hex(old),
1511 1511 b'new': hex(new),
1512 1512 },
1513 1513 ).result()
1514 1514
1515 1515 if r:
1516 1516 ui.status(bookmsgmap[action][0] % b)
1517 1517 else:
1518 1518 ui.warn(bookmsgmap[action][1] % b)
1519 1519 # discovery can have set the value form invalid entry
1520 1520 if pushop.bkresult is not None:
1521 1521 pushop.bkresult = 1
1522 1522
1523 1523
1524 1524 class pulloperation(object):
1525 1525 """A object that represent a single pull operation
1526 1526
1527 1527 It purpose is to carry pull related state and very common operation.
1528 1528
1529 1529 A new should be created at the beginning of each pull and discarded
1530 1530 afterward.
1531 1531 """
1532 1532
1533 1533 def __init__(
1534 1534 self,
1535 1535 repo,
1536 1536 remote,
1537 1537 heads=None,
1538 1538 force=False,
1539 1539 bookmarks=(),
1540 1540 remotebookmarks=None,
1541 1541 streamclonerequested=None,
1542 1542 includepats=None,
1543 1543 excludepats=None,
1544 1544 depth=None,
1545 1545 ):
1546 1546 # repo we pull into
1547 1547 self.repo = repo
1548 1548 # repo we pull from
1549 1549 self.remote = remote
1550 1550 # revision we try to pull (None is "all")
1551 1551 self.heads = heads
1552 1552 # bookmark pulled explicitly
1553 1553 self.explicitbookmarks = [
1554 1554 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1555 1555 ]
1556 1556 # do we force pull?
1557 1557 self.force = force
1558 1558 # whether a streaming clone was requested
1559 1559 self.streamclonerequested = streamclonerequested
1560 1560 # transaction manager
1561 1561 self.trmanager = None
1562 1562 # set of common changeset between local and remote before pull
1563 1563 self.common = None
1564 1564 # set of pulled head
1565 1565 self.rheads = None
1566 1566 # list of missing changeset to fetch remotely
1567 1567 self.fetch = None
1568 1568 # remote bookmarks data
1569 1569 self.remotebookmarks = remotebookmarks
1570 1570 # result of changegroup pulling (used as return code by pull)
1571 1571 self.cgresult = None
1572 1572 # list of step already done
1573 1573 self.stepsdone = set()
1574 1574 # Whether we attempted a clone from pre-generated bundles.
1575 1575 self.clonebundleattempted = False
1576 1576 # Set of file patterns to include.
1577 1577 self.includepats = includepats
1578 1578 # Set of file patterns to exclude.
1579 1579 self.excludepats = excludepats
1580 1580 # Number of ancestor changesets to pull from each pulled head.
1581 1581 self.depth = depth
1582 1582
1583 1583 @util.propertycache
1584 1584 def pulledsubset(self):
1585 1585 """heads of the set of changeset target by the pull"""
1586 1586 # compute target subset
1587 1587 if self.heads is None:
1588 1588 # We pulled every thing possible
1589 1589 # sync on everything common
1590 1590 c = set(self.common)
1591 1591 ret = list(self.common)
1592 1592 for n in self.rheads:
1593 1593 if n not in c:
1594 1594 ret.append(n)
1595 1595 return ret
1596 1596 else:
1597 1597 # We pulled a specific subset
1598 1598 # sync on this subset
1599 1599 return self.heads
1600 1600
1601 1601 @util.propertycache
1602 1602 def canusebundle2(self):
1603 1603 return not _forcebundle1(self)
1604 1604
1605 1605 @util.propertycache
1606 1606 def remotebundle2caps(self):
1607 1607 return bundle2.bundle2caps(self.remote)
1608 1608
1609 1609 def gettransaction(self):
1610 1610 # deprecated; talk to trmanager directly
1611 1611 return self.trmanager.transaction()
1612 1612
1613 1613
1614 1614 class transactionmanager(util.transactional):
1615 1615 """An object to manage the life cycle of a transaction
1616 1616
1617 1617 It creates the transaction on demand and calls the appropriate hooks when
1618 1618 closing the transaction."""
1619 1619
1620 1620 def __init__(self, repo, source, url):
1621 1621 self.repo = repo
1622 1622 self.source = source
1623 1623 self.url = url
1624 1624 self._tr = None
1625 1625
1626 1626 def transaction(self):
1627 1627 """Return an open transaction object, constructing if necessary"""
1628 1628 if not self._tr:
1629 1629 trname = b'%s\n%s' % (self.source, util.hidepassword(self.url))
1630 1630 self._tr = self.repo.transaction(trname)
1631 1631 self._tr.hookargs[b'source'] = self.source
1632 1632 self._tr.hookargs[b'url'] = self.url
1633 1633 return self._tr
1634 1634
1635 1635 def close(self):
1636 1636 """close transaction if created"""
1637 1637 if self._tr is not None:
1638 1638 self._tr.close()
1639 1639
1640 1640 def release(self):
1641 1641 """release transaction if created"""
1642 1642 if self._tr is not None:
1643 1643 self._tr.release()
1644 1644
1645 1645
1646 1646 def listkeys(remote, namespace):
1647 1647 with remote.commandexecutor() as e:
1648 1648 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1649 1649
1650 1650
1651 1651 def _fullpullbundle2(repo, pullop):
1652 1652 # The server may send a partial reply, i.e. when inlining
1653 1653 # pre-computed bundles. In that case, update the common
1654 1654 # set based on the results and pull another bundle.
1655 1655 #
1656 1656 # There are two indicators that the process is finished:
1657 1657 # - no changeset has been added, or
1658 1658 # - all remote heads are known locally.
1659 1659 # The head check must use the unfiltered view as obsoletion
1660 1660 # markers can hide heads.
1661 1661 unfi = repo.unfiltered()
1662 1662 unficl = unfi.changelog
1663 1663
1664 1664 def headsofdiff(h1, h2):
1665 1665 """Returns heads(h1 % h2)"""
1666 1666 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1667 1667 return set(ctx.node() for ctx in res)
1668 1668
1669 1669 def headsofunion(h1, h2):
1670 1670 """Returns heads((h1 + h2) - null)"""
1671 1671 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1672 1672 return set(ctx.node() for ctx in res)
1673 1673
1674 1674 while True:
1675 1675 old_heads = unficl.heads()
1676 1676 clstart = len(unficl)
1677 1677 _pullbundle2(pullop)
1678 1678 if repository.NARROW_REQUIREMENT in repo.requirements:
1679 1679 # XXX narrow clones filter the heads on the server side during
1680 1680 # XXX getbundle and result in partial replies as well.
1681 1681 # XXX Disable pull bundles in this case as band aid to avoid
1682 1682 # XXX extra round trips.
1683 1683 break
1684 1684 if clstart == len(unficl):
1685 1685 break
1686 1686 if all(unficl.hasnode(n) for n in pullop.rheads):
1687 1687 break
1688 1688 new_heads = headsofdiff(unficl.heads(), old_heads)
1689 1689 pullop.common = headsofunion(new_heads, pullop.common)
1690 1690 pullop.rheads = set(pullop.rheads) - pullop.common
1691 1691
1692 1692
1693 1693 def pull(
1694 1694 repo,
1695 1695 remote,
1696 1696 heads=None,
1697 1697 force=False,
1698 1698 bookmarks=(),
1699 1699 opargs=None,
1700 1700 streamclonerequested=None,
1701 1701 includepats=None,
1702 1702 excludepats=None,
1703 1703 depth=None,
1704 1704 ):
1705 1705 """Fetch repository data from a remote.
1706 1706
1707 1707 This is the main function used to retrieve data from a remote repository.
1708 1708
1709 1709 ``repo`` is the local repository to clone into.
1710 1710 ``remote`` is a peer instance.
1711 1711 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1712 1712 default) means to pull everything from the remote.
1713 1713 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1714 1714 default, all remote bookmarks are pulled.
1715 1715 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1716 1716 initialization.
1717 1717 ``streamclonerequested`` is a boolean indicating whether a "streaming
1718 1718 clone" is requested. A "streaming clone" is essentially a raw file copy
1719 1719 of revlogs from the server. This only works when the local repository is
1720 1720 empty. The default value of ``None`` means to respect the server
1721 1721 configuration for preferring stream clones.
1722 1722 ``includepats`` and ``excludepats`` define explicit file patterns to
1723 1723 include and exclude in storage, respectively. If not defined, narrow
1724 1724 patterns from the repo instance are used, if available.
1725 1725 ``depth`` is an integer indicating the DAG depth of history we're
1726 1726 interested in. If defined, for each revision specified in ``heads``, we
1727 1727 will fetch up to this many of its ancestors and data associated with them.
1728 1728
1729 1729 Returns the ``pulloperation`` created for this pull.
1730 1730 """
1731 1731 if opargs is None:
1732 1732 opargs = {}
1733 1733
1734 1734 # We allow the narrow patterns to be passed in explicitly to provide more
1735 1735 # flexibility for API consumers.
1736 1736 if includepats or excludepats:
1737 1737 includepats = includepats or set()
1738 1738 excludepats = excludepats or set()
1739 1739 else:
1740 1740 includepats, excludepats = repo.narrowpats
1741 1741
1742 1742 narrowspec.validatepatterns(includepats)
1743 1743 narrowspec.validatepatterns(excludepats)
1744 1744
1745 1745 pullop = pulloperation(
1746 1746 repo,
1747 1747 remote,
1748 1748 heads,
1749 1749 force,
1750 1750 bookmarks=bookmarks,
1751 1751 streamclonerequested=streamclonerequested,
1752 1752 includepats=includepats,
1753 1753 excludepats=excludepats,
1754 1754 depth=depth,
1755 1755 **pycompat.strkwargs(opargs)
1756 1756 )
1757 1757
1758 1758 peerlocal = pullop.remote.local()
1759 1759 if peerlocal:
1760 1760 missing = set(peerlocal.requirements) - pullop.repo.supported
1761 1761 if missing:
1762 1762 msg = _(
1763 1763 b"required features are not"
1764 1764 b" supported in the destination:"
1765 1765 b" %s"
1766 1766 ) % (b', '.join(sorted(missing)))
1767 1767 raise error.Abort(msg)
1768 1768
1769 1769 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1770 1770 wlock = util.nullcontextmanager()
1771 1771 if not bookmod.bookmarksinstore(repo):
1772 1772 wlock = repo.wlock()
1773 1773 with wlock, repo.lock(), pullop.trmanager:
1774 1774 # Use the modern wire protocol, if available.
1775 1775 if remote.capable(b'command-changesetdata'):
1776 1776 exchangev2.pull(pullop)
1777 1777 else:
1778 1778 # This should ideally be in _pullbundle2(). However, it needs to run
1779 1779 # before discovery to avoid extra work.
1780 1780 _maybeapplyclonebundle(pullop)
1781 1781 streamclone.maybeperformlegacystreamclone(pullop)
1782 1782 _pulldiscovery(pullop)
1783 1783 if pullop.canusebundle2:
1784 1784 _fullpullbundle2(repo, pullop)
1785 1785 _pullchangeset(pullop)
1786 1786 _pullphase(pullop)
1787 1787 _pullbookmarks(pullop)
1788 1788 _pullobsolete(pullop)
1789 1789
1790 1790 # storing remotenames
1791 1791 if repo.ui.configbool(b'experimental', b'remotenames'):
1792 1792 logexchange.pullremotenames(repo, remote)
1793 1793
1794 1794 return pullop
1795 1795
1796 1796
1797 1797 # list of steps to perform discovery before pull
1798 1798 pulldiscoveryorder = []
1799 1799
1800 1800 # Mapping between step name and function
1801 1801 #
1802 1802 # This exists to help extensions wrap steps if necessary
1803 1803 pulldiscoverymapping = {}
1804 1804
1805 1805
1806 1806 def pulldiscovery(stepname):
1807 1807 """decorator for function performing discovery before pull
1808 1808
1809 1809 The function is added to the step -> function mapping and appended to the
1810 1810 list of steps. Beware that decorated function will be added in order (this
1811 1811 may matter).
1812 1812
1813 1813 You can only use this decorator for a new step, if you want to wrap a step
1814 1814 from an extension, change the pulldiscovery dictionary directly."""
1815 1815
1816 1816 def dec(func):
1817 1817 assert stepname not in pulldiscoverymapping
1818 1818 pulldiscoverymapping[stepname] = func
1819 1819 pulldiscoveryorder.append(stepname)
1820 1820 return func
1821 1821
1822 1822 return dec
1823 1823
1824 1824
1825 1825 def _pulldiscovery(pullop):
1826 1826 """Run all discovery steps"""
1827 1827 for stepname in pulldiscoveryorder:
1828 1828 step = pulldiscoverymapping[stepname]
1829 1829 step(pullop)
1830 1830
1831 1831
1832 1832 @pulldiscovery(b'b1:bookmarks')
1833 1833 def _pullbookmarkbundle1(pullop):
1834 1834 """fetch bookmark data in bundle1 case
1835 1835
1836 1836 If not using bundle2, we have to fetch bookmarks before changeset
1837 1837 discovery to reduce the chance and impact of race conditions."""
1838 1838 if pullop.remotebookmarks is not None:
1839 1839 return
1840 1840 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1841 1841 # all known bundle2 servers now support listkeys, but lets be nice with
1842 1842 # new implementation.
1843 1843 return
1844 1844 books = listkeys(pullop.remote, b'bookmarks')
1845 1845 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1846 1846
1847 1847
1848 1848 @pulldiscovery(b'changegroup')
1849 1849 def _pulldiscoverychangegroup(pullop):
1850 1850 """discovery phase for the pull
1851 1851
1852 1852 Current handle changeset discovery only, will change handle all discovery
1853 1853 at some point."""
1854 1854 tmp = discovery.findcommonincoming(
1855 1855 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1856 1856 )
1857 1857 common, fetch, rheads = tmp
1858 1858 has_node = pullop.repo.unfiltered().changelog.index.has_node
1859 1859 if fetch and rheads:
1860 1860 # If a remote heads is filtered locally, put in back in common.
1861 1861 #
1862 1862 # This is a hackish solution to catch most of "common but locally
1863 1863 # hidden situation". We do not performs discovery on unfiltered
1864 1864 # repository because it end up doing a pathological amount of round
1865 1865 # trip for w huge amount of changeset we do not care about.
1866 1866 #
1867 1867 # If a set of such "common but filtered" changeset exist on the server
1868 1868 # but are not including a remote heads, we'll not be able to detect it,
1869 1869 scommon = set(common)
1870 1870 for n in rheads:
1871 1871 if has_node(n):
1872 1872 if n not in scommon:
1873 1873 common.append(n)
1874 1874 if set(rheads).issubset(set(common)):
1875 1875 fetch = []
1876 1876 pullop.common = common
1877 1877 pullop.fetch = fetch
1878 1878 pullop.rheads = rheads
1879 1879
1880 1880
1881 1881 def _pullbundle2(pullop):
1882 1882 """pull data using bundle2
1883 1883
1884 1884 For now, the only supported data are changegroup."""
1885 1885 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1886 1886
1887 1887 # make ui easier to access
1888 1888 ui = pullop.repo.ui
1889 1889
1890 1890 # At the moment we don't do stream clones over bundle2. If that is
1891 1891 # implemented then here's where the check for that will go.
1892 1892 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1893 1893
1894 1894 # declare pull perimeters
1895 1895 kwargs[b'common'] = pullop.common
1896 1896 kwargs[b'heads'] = pullop.heads or pullop.rheads
1897 1897
1898 1898 # check server supports narrow and then adding includepats and excludepats
1899 1899 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1900 1900 if servernarrow and pullop.includepats:
1901 1901 kwargs[b'includepats'] = pullop.includepats
1902 1902 if servernarrow and pullop.excludepats:
1903 1903 kwargs[b'excludepats'] = pullop.excludepats
1904 1904
1905 1905 if streaming:
1906 1906 kwargs[b'cg'] = False
1907 1907 kwargs[b'stream'] = True
1908 1908 pullop.stepsdone.add(b'changegroup')
1909 1909 pullop.stepsdone.add(b'phases')
1910 1910
1911 1911 else:
1912 1912 # pulling changegroup
1913 1913 pullop.stepsdone.add(b'changegroup')
1914 1914
1915 1915 kwargs[b'cg'] = pullop.fetch
1916 1916
1917 1917 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1918 1918 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1919 1919 if not legacyphase and hasbinaryphase:
1920 1920 kwargs[b'phases'] = True
1921 1921 pullop.stepsdone.add(b'phases')
1922 1922
1923 1923 if b'listkeys' in pullop.remotebundle2caps:
1924 1924 if b'phases' not in pullop.stepsdone:
1925 1925 kwargs[b'listkeys'] = [b'phases']
1926 1926
1927 1927 bookmarksrequested = False
1928 1928 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1929 1929 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1930 1930
1931 1931 if pullop.remotebookmarks is not None:
1932 1932 pullop.stepsdone.add(b'request-bookmarks')
1933 1933
1934 1934 if (
1935 1935 b'request-bookmarks' not in pullop.stepsdone
1936 1936 and pullop.remotebookmarks is None
1937 1937 and not legacybookmark
1938 1938 and hasbinarybook
1939 1939 ):
1940 1940 kwargs[b'bookmarks'] = True
1941 1941 bookmarksrequested = True
1942 1942
1943 1943 if b'listkeys' in pullop.remotebundle2caps:
1944 1944 if b'request-bookmarks' not in pullop.stepsdone:
1945 1945 # make sure to always includes bookmark data when migrating
1946 1946 # `hg incoming --bundle` to using this function.
1947 1947 pullop.stepsdone.add(b'request-bookmarks')
1948 1948 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1949 1949
1950 1950 # If this is a full pull / clone and the server supports the clone bundles
1951 1951 # feature, tell the server whether we attempted a clone bundle. The
1952 1952 # presence of this flag indicates the client supports clone bundles. This
1953 1953 # will enable the server to treat clients that support clone bundles
1954 1954 # differently from those that don't.
1955 1955 if (
1956 1956 pullop.remote.capable(b'clonebundles')
1957 1957 and pullop.heads is None
1958 1958 and list(pullop.common) == [nullid]
1959 1959 ):
1960 1960 kwargs[b'cbattempted'] = pullop.clonebundleattempted
1961 1961
1962 1962 if streaming:
1963 1963 pullop.repo.ui.status(_(b'streaming all changes\n'))
1964 1964 elif not pullop.fetch:
1965 1965 pullop.repo.ui.status(_(b"no changes found\n"))
1966 1966 pullop.cgresult = 0
1967 1967 else:
1968 1968 if pullop.heads is None and list(pullop.common) == [nullid]:
1969 1969 pullop.repo.ui.status(_(b"requesting all changes\n"))
1970 1970 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1971 1971 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1972 1972 if obsolete.commonversion(remoteversions) is not None:
1973 1973 kwargs[b'obsmarkers'] = True
1974 1974 pullop.stepsdone.add(b'obsmarkers')
1975 1975 _pullbundle2extraprepare(pullop, kwargs)
1976 1976
1977 1977 with pullop.remote.commandexecutor() as e:
1978 1978 args = dict(kwargs)
1979 1979 args[b'source'] = b'pull'
1980 1980 bundle = e.callcommand(b'getbundle', args).result()
1981 1981
1982 1982 try:
1983 1983 op = bundle2.bundleoperation(
1984 1984 pullop.repo, pullop.gettransaction, source=b'pull'
1985 1985 )
1986 1986 op.modes[b'bookmarks'] = b'records'
1987 1987 bundle2.processbundle(pullop.repo, bundle, op=op)
1988 1988 except bundle2.AbortFromPart as exc:
1989 1989 pullop.repo.ui.status(_(b'remote: abort: %s\n') % exc)
1990 1990 raise error.Abort(_(b'pull failed on remote'), hint=exc.hint)
1991 1991 except error.BundleValueError as exc:
1992 1992 raise error.Abort(_(b'missing support for %s') % exc)
1993 1993
1994 1994 if pullop.fetch:
1995 1995 pullop.cgresult = bundle2.combinechangegroupresults(op)
1996 1996
1997 1997 # processing phases change
1998 1998 for namespace, value in op.records[b'listkeys']:
1999 1999 if namespace == b'phases':
2000 2000 _pullapplyphases(pullop, value)
2001 2001
2002 2002 # processing bookmark update
2003 2003 if bookmarksrequested:
2004 2004 books = {}
2005 2005 for record in op.records[b'bookmarks']:
2006 2006 books[record[b'bookmark']] = record[b"node"]
2007 2007 pullop.remotebookmarks = books
2008 2008 else:
2009 2009 for namespace, value in op.records[b'listkeys']:
2010 2010 if namespace == b'bookmarks':
2011 2011 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
2012 2012
2013 2013 # bookmark data were either already there or pulled in the bundle
2014 2014 if pullop.remotebookmarks is not None:
2015 2015 _pullbookmarks(pullop)
2016 2016
2017 2017
2018 2018 def _pullbundle2extraprepare(pullop, kwargs):
2019 2019 """hook function so that extensions can extend the getbundle call"""
2020 2020
2021 2021
2022 2022 def _pullchangeset(pullop):
2023 2023 """pull changeset from unbundle into the local repo"""
2024 2024 # We delay the open of the transaction as late as possible so we
2025 2025 # don't open transaction for nothing or you break future useful
2026 2026 # rollback call
2027 2027 if b'changegroup' in pullop.stepsdone:
2028 2028 return
2029 2029 pullop.stepsdone.add(b'changegroup')
2030 2030 if not pullop.fetch:
2031 2031 pullop.repo.ui.status(_(b"no changes found\n"))
2032 2032 pullop.cgresult = 0
2033 2033 return
2034 2034 tr = pullop.gettransaction()
2035 2035 if pullop.heads is None and list(pullop.common) == [nullid]:
2036 2036 pullop.repo.ui.status(_(b"requesting all changes\n"))
2037 2037 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
2038 2038 # issue1320, avoid a race if remote changed after discovery
2039 2039 pullop.heads = pullop.rheads
2040 2040
2041 2041 if pullop.remote.capable(b'getbundle'):
2042 2042 # TODO: get bundlecaps from remote
2043 2043 cg = pullop.remote.getbundle(
2044 2044 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
2045 2045 )
2046 2046 elif pullop.heads is None:
2047 2047 with pullop.remote.commandexecutor() as e:
2048 2048 cg = e.callcommand(
2049 2049 b'changegroup', {b'nodes': pullop.fetch, b'source': b'pull',}
2050 2050 ).result()
2051 2051
2052 2052 elif not pullop.remote.capable(b'changegroupsubset'):
2053 2053 raise error.Abort(
2054 2054 _(
2055 2055 b"partial pull cannot be done because "
2056 2056 b"other repository doesn't support "
2057 2057 b"changegroupsubset."
2058 2058 )
2059 2059 )
2060 2060 else:
2061 2061 with pullop.remote.commandexecutor() as e:
2062 2062 cg = e.callcommand(
2063 2063 b'changegroupsubset',
2064 2064 {
2065 2065 b'bases': pullop.fetch,
2066 2066 b'heads': pullop.heads,
2067 2067 b'source': b'pull',
2068 2068 },
2069 2069 ).result()
2070 2070
2071 2071 bundleop = bundle2.applybundle(
2072 2072 pullop.repo, cg, tr, b'pull', pullop.remote.url()
2073 2073 )
2074 2074 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
2075 2075
2076 2076
2077 2077 def _pullphase(pullop):
2078 2078 # Get remote phases data from remote
2079 2079 if b'phases' in pullop.stepsdone:
2080 2080 return
2081 2081 remotephases = listkeys(pullop.remote, b'phases')
2082 2082 _pullapplyphases(pullop, remotephases)
2083 2083
2084 2084
2085 2085 def _pullapplyphases(pullop, remotephases):
2086 2086 """apply phase movement from observed remote state"""
2087 2087 if b'phases' in pullop.stepsdone:
2088 2088 return
2089 2089 pullop.stepsdone.add(b'phases')
2090 2090 publishing = bool(remotephases.get(b'publishing', False))
2091 2091 if remotephases and not publishing:
2092 2092 # remote is new and non-publishing
2093 2093 pheads, _dr = phases.analyzeremotephases(
2094 2094 pullop.repo, pullop.pulledsubset, remotephases
2095 2095 )
2096 2096 dheads = pullop.pulledsubset
2097 2097 else:
2098 2098 # Remote is old or publishing all common changesets
2099 2099 # should be seen as public
2100 2100 pheads = pullop.pulledsubset
2101 2101 dheads = []
2102 2102 unfi = pullop.repo.unfiltered()
2103 2103 phase = unfi._phasecache.phase
2104 rev = unfi.changelog.nodemap.get
2104 rev = unfi.changelog.index.get_rev
2105 2105 public = phases.public
2106 2106 draft = phases.draft
2107 2107
2108 2108 # exclude changesets already public locally and update the others
2109 2109 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2110 2110 if pheads:
2111 2111 tr = pullop.gettransaction()
2112 2112 phases.advanceboundary(pullop.repo, tr, public, pheads)
2113 2113
2114 2114 # exclude changesets already draft locally and update the others
2115 2115 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2116 2116 if dheads:
2117 2117 tr = pullop.gettransaction()
2118 2118 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2119 2119
2120 2120
2121 2121 def _pullbookmarks(pullop):
2122 2122 """process the remote bookmark information to update the local one"""
2123 2123 if b'bookmarks' in pullop.stepsdone:
2124 2124 return
2125 2125 pullop.stepsdone.add(b'bookmarks')
2126 2126 repo = pullop.repo
2127 2127 remotebookmarks = pullop.remotebookmarks
2128 2128 bookmod.updatefromremote(
2129 2129 repo.ui,
2130 2130 repo,
2131 2131 remotebookmarks,
2132 2132 pullop.remote.url(),
2133 2133 pullop.gettransaction,
2134 2134 explicit=pullop.explicitbookmarks,
2135 2135 )
2136 2136
2137 2137
2138 2138 def _pullobsolete(pullop):
2139 2139 """utility function to pull obsolete markers from a remote
2140 2140
2141 2141 The `gettransaction` is function that return the pull transaction, creating
2142 2142 one if necessary. We return the transaction to inform the calling code that
2143 2143 a new transaction have been created (when applicable).
2144 2144
2145 2145 Exists mostly to allow overriding for experimentation purpose"""
2146 2146 if b'obsmarkers' in pullop.stepsdone:
2147 2147 return
2148 2148 pullop.stepsdone.add(b'obsmarkers')
2149 2149 tr = None
2150 2150 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2151 2151 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2152 2152 remoteobs = listkeys(pullop.remote, b'obsolete')
2153 2153 if b'dump0' in remoteobs:
2154 2154 tr = pullop.gettransaction()
2155 2155 markers = []
2156 2156 for key in sorted(remoteobs, reverse=True):
2157 2157 if key.startswith(b'dump'):
2158 2158 data = util.b85decode(remoteobs[key])
2159 2159 version, newmarks = obsolete._readmarkers(data)
2160 2160 markers += newmarks
2161 2161 if markers:
2162 2162 pullop.repo.obsstore.add(tr, markers)
2163 2163 pullop.repo.invalidatevolatilesets()
2164 2164 return tr
2165 2165
2166 2166
2167 2167 def applynarrowacl(repo, kwargs):
2168 2168 """Apply narrow fetch access control.
2169 2169
2170 2170 This massages the named arguments for getbundle wire protocol commands
2171 2171 so requested data is filtered through access control rules.
2172 2172 """
2173 2173 ui = repo.ui
2174 2174 # TODO this assumes existence of HTTP and is a layering violation.
2175 2175 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2176 2176 user_includes = ui.configlist(
2177 2177 _NARROWACL_SECTION,
2178 2178 username + b'.includes',
2179 2179 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2180 2180 )
2181 2181 user_excludes = ui.configlist(
2182 2182 _NARROWACL_SECTION,
2183 2183 username + b'.excludes',
2184 2184 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2185 2185 )
2186 2186 if not user_includes:
2187 2187 raise error.Abort(
2188 2188 _(b"{} configuration for user {} is empty").format(
2189 2189 _NARROWACL_SECTION, username
2190 2190 )
2191 2191 )
2192 2192
2193 2193 user_includes = [
2194 2194 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2195 2195 ]
2196 2196 user_excludes = [
2197 2197 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2198 2198 ]
2199 2199
2200 2200 req_includes = set(kwargs.get('includepats', []))
2201 2201 req_excludes = set(kwargs.get('excludepats', []))
2202 2202
2203 2203 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2204 2204 req_includes, req_excludes, user_includes, user_excludes
2205 2205 )
2206 2206
2207 2207 if invalid_includes:
2208 2208 raise error.Abort(
2209 2209 _(b"The following includes are not accessible for {}: {}").format(
2210 2210 username, invalid_includes
2211 2211 )
2212 2212 )
2213 2213
2214 2214 new_args = {}
2215 2215 new_args.update(kwargs)
2216 2216 new_args['narrow'] = True
2217 2217 new_args['narrow_acl'] = True
2218 2218 new_args['includepats'] = req_includes
2219 2219 if req_excludes:
2220 2220 new_args['excludepats'] = req_excludes
2221 2221
2222 2222 return new_args
2223 2223
2224 2224
2225 2225 def _computeellipsis(repo, common, heads, known, match, depth=None):
2226 2226 """Compute the shape of a narrowed DAG.
2227 2227
2228 2228 Args:
2229 2229 repo: The repository we're transferring.
2230 2230 common: The roots of the DAG range we're transferring.
2231 2231 May be just [nullid], which means all ancestors of heads.
2232 2232 heads: The heads of the DAG range we're transferring.
2233 2233 match: The narrowmatcher that allows us to identify relevant changes.
2234 2234 depth: If not None, only consider nodes to be full nodes if they are at
2235 2235 most depth changesets away from one of heads.
2236 2236
2237 2237 Returns:
2238 2238 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2239 2239
2240 2240 visitnodes: The list of nodes (either full or ellipsis) which
2241 2241 need to be sent to the client.
2242 2242 relevant_nodes: The set of changelog nodes which change a file inside
2243 2243 the narrowspec. The client needs these as non-ellipsis nodes.
2244 2244 ellipsisroots: A dict of {rev: parents} that is used in
2245 2245 narrowchangegroup to produce ellipsis nodes with the
2246 2246 correct parents.
2247 2247 """
2248 2248 cl = repo.changelog
2249 2249 mfl = repo.manifestlog
2250 2250
2251 2251 clrev = cl.rev
2252 2252
2253 2253 commonrevs = {clrev(n) for n in common} | {nullrev}
2254 2254 headsrevs = {clrev(n) for n in heads}
2255 2255
2256 2256 if depth:
2257 2257 revdepth = {h: 0 for h in headsrevs}
2258 2258
2259 2259 ellipsisheads = collections.defaultdict(set)
2260 2260 ellipsisroots = collections.defaultdict(set)
2261 2261
2262 2262 def addroot(head, curchange):
2263 2263 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2264 2264 ellipsisroots[head].add(curchange)
2265 2265 # Recursively split ellipsis heads with 3 roots by finding the
2266 2266 # roots' youngest common descendant which is an elided merge commit.
2267 2267 # That descendant takes 2 of the 3 roots as its own, and becomes a
2268 2268 # root of the head.
2269 2269 while len(ellipsisroots[head]) > 2:
2270 2270 child, roots = splithead(head)
2271 2271 splitroots(head, child, roots)
2272 2272 head = child # Recurse in case we just added a 3rd root
2273 2273
2274 2274 def splitroots(head, child, roots):
2275 2275 ellipsisroots[head].difference_update(roots)
2276 2276 ellipsisroots[head].add(child)
2277 2277 ellipsisroots[child].update(roots)
2278 2278 ellipsisroots[child].discard(child)
2279 2279
2280 2280 def splithead(head):
2281 2281 r1, r2, r3 = sorted(ellipsisroots[head])
2282 2282 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2283 2283 mid = repo.revs(
2284 2284 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2285 2285 )
2286 2286 for j in mid:
2287 2287 if j == nr2:
2288 2288 return nr2, (nr1, nr2)
2289 2289 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2290 2290 return j, (nr1, nr2)
2291 2291 raise error.Abort(
2292 2292 _(
2293 2293 b'Failed to split up ellipsis node! head: %d, '
2294 2294 b'roots: %d %d %d'
2295 2295 )
2296 2296 % (head, r1, r2, r3)
2297 2297 )
2298 2298
2299 2299 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2300 2300 visit = reversed(missing)
2301 2301 relevant_nodes = set()
2302 2302 visitnodes = [cl.node(m) for m in missing]
2303 2303 required = set(headsrevs) | known
2304 2304 for rev in visit:
2305 2305 clrev = cl.changelogrevision(rev)
2306 2306 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2307 2307 if depth is not None:
2308 2308 curdepth = revdepth[rev]
2309 2309 for p in ps:
2310 2310 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2311 2311 needed = False
2312 2312 shallow_enough = depth is None or revdepth[rev] <= depth
2313 2313 if shallow_enough:
2314 2314 curmf = mfl[clrev.manifest].read()
2315 2315 if ps:
2316 2316 # We choose to not trust the changed files list in
2317 2317 # changesets because it's not always correct. TODO: could
2318 2318 # we trust it for the non-merge case?
2319 2319 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2320 2320 needed = bool(curmf.diff(p1mf, match))
2321 2321 if not needed and len(ps) > 1:
2322 2322 # For merge changes, the list of changed files is not
2323 2323 # helpful, since we need to emit the merge if a file
2324 2324 # in the narrow spec has changed on either side of the
2325 2325 # merge. As a result, we do a manifest diff to check.
2326 2326 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2327 2327 needed = bool(curmf.diff(p2mf, match))
2328 2328 else:
2329 2329 # For a root node, we need to include the node if any
2330 2330 # files in the node match the narrowspec.
2331 2331 needed = any(curmf.walk(match))
2332 2332
2333 2333 if needed:
2334 2334 for head in ellipsisheads[rev]:
2335 2335 addroot(head, rev)
2336 2336 for p in ps:
2337 2337 required.add(p)
2338 2338 relevant_nodes.add(cl.node(rev))
2339 2339 else:
2340 2340 if not ps:
2341 2341 ps = [nullrev]
2342 2342 if rev in required:
2343 2343 for head in ellipsisheads[rev]:
2344 2344 addroot(head, rev)
2345 2345 for p in ps:
2346 2346 ellipsisheads[p].add(rev)
2347 2347 else:
2348 2348 for p in ps:
2349 2349 ellipsisheads[p] |= ellipsisheads[rev]
2350 2350
2351 2351 # add common changesets as roots of their reachable ellipsis heads
2352 2352 for c in commonrevs:
2353 2353 for head in ellipsisheads[c]:
2354 2354 addroot(head, c)
2355 2355 return visitnodes, relevant_nodes, ellipsisroots
2356 2356
2357 2357
2358 2358 def caps20to10(repo, role):
2359 2359 """return a set with appropriate options to use bundle20 during getbundle"""
2360 2360 caps = {b'HG20'}
2361 2361 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2362 2362 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2363 2363 return caps
2364 2364
2365 2365
2366 2366 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2367 2367 getbundle2partsorder = []
2368 2368
2369 2369 # Mapping between step name and function
2370 2370 #
2371 2371 # This exists to help extensions wrap steps if necessary
2372 2372 getbundle2partsmapping = {}
2373 2373
2374 2374
2375 2375 def getbundle2partsgenerator(stepname, idx=None):
2376 2376 """decorator for function generating bundle2 part for getbundle
2377 2377
2378 2378 The function is added to the step -> function mapping and appended to the
2379 2379 list of steps. Beware that decorated functions will be added in order
2380 2380 (this may matter).
2381 2381
2382 2382 You can only use this decorator for new steps, if you want to wrap a step
2383 2383 from an extension, attack the getbundle2partsmapping dictionary directly."""
2384 2384
2385 2385 def dec(func):
2386 2386 assert stepname not in getbundle2partsmapping
2387 2387 getbundle2partsmapping[stepname] = func
2388 2388 if idx is None:
2389 2389 getbundle2partsorder.append(stepname)
2390 2390 else:
2391 2391 getbundle2partsorder.insert(idx, stepname)
2392 2392 return func
2393 2393
2394 2394 return dec
2395 2395
2396 2396
2397 2397 def bundle2requested(bundlecaps):
2398 2398 if bundlecaps is not None:
2399 2399 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2400 2400 return False
2401 2401
2402 2402
2403 2403 def getbundlechunks(
2404 2404 repo, source, heads=None, common=None, bundlecaps=None, **kwargs
2405 2405 ):
2406 2406 """Return chunks constituting a bundle's raw data.
2407 2407
2408 2408 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2409 2409 passed.
2410 2410
2411 2411 Returns a 2-tuple of a dict with metadata about the generated bundle
2412 2412 and an iterator over raw chunks (of varying sizes).
2413 2413 """
2414 2414 kwargs = pycompat.byteskwargs(kwargs)
2415 2415 info = {}
2416 2416 usebundle2 = bundle2requested(bundlecaps)
2417 2417 # bundle10 case
2418 2418 if not usebundle2:
2419 2419 if bundlecaps and not kwargs.get(b'cg', True):
2420 2420 raise ValueError(
2421 2421 _(b'request for bundle10 must include changegroup')
2422 2422 )
2423 2423
2424 2424 if kwargs:
2425 2425 raise ValueError(
2426 2426 _(b'unsupported getbundle arguments: %s')
2427 2427 % b', '.join(sorted(kwargs.keys()))
2428 2428 )
2429 2429 outgoing = _computeoutgoing(repo, heads, common)
2430 2430 info[b'bundleversion'] = 1
2431 2431 return (
2432 2432 info,
2433 2433 changegroup.makestream(
2434 2434 repo, outgoing, b'01', source, bundlecaps=bundlecaps
2435 2435 ),
2436 2436 )
2437 2437
2438 2438 # bundle20 case
2439 2439 info[b'bundleversion'] = 2
2440 2440 b2caps = {}
2441 2441 for bcaps in bundlecaps:
2442 2442 if bcaps.startswith(b'bundle2='):
2443 2443 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2444 2444 b2caps.update(bundle2.decodecaps(blob))
2445 2445 bundler = bundle2.bundle20(repo.ui, b2caps)
2446 2446
2447 2447 kwargs[b'heads'] = heads
2448 2448 kwargs[b'common'] = common
2449 2449
2450 2450 for name in getbundle2partsorder:
2451 2451 func = getbundle2partsmapping[name]
2452 2452 func(
2453 2453 bundler,
2454 2454 repo,
2455 2455 source,
2456 2456 bundlecaps=bundlecaps,
2457 2457 b2caps=b2caps,
2458 2458 **pycompat.strkwargs(kwargs)
2459 2459 )
2460 2460
2461 2461 info[b'prefercompressed'] = bundler.prefercompressed
2462 2462
2463 2463 return info, bundler.getchunks()
2464 2464
2465 2465
2466 2466 @getbundle2partsgenerator(b'stream2')
2467 2467 def _getbundlestream2(bundler, repo, *args, **kwargs):
2468 2468 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2469 2469
2470 2470
2471 2471 @getbundle2partsgenerator(b'changegroup')
2472 2472 def _getbundlechangegrouppart(
2473 2473 bundler,
2474 2474 repo,
2475 2475 source,
2476 2476 bundlecaps=None,
2477 2477 b2caps=None,
2478 2478 heads=None,
2479 2479 common=None,
2480 2480 **kwargs
2481 2481 ):
2482 2482 """add a changegroup part to the requested bundle"""
2483 2483 if not kwargs.get('cg', True):
2484 2484 return
2485 2485
2486 2486 version = b'01'
2487 2487 cgversions = b2caps.get(b'changegroup')
2488 2488 if cgversions: # 3.1 and 3.2 ship with an empty value
2489 2489 cgversions = [
2490 2490 v
2491 2491 for v in cgversions
2492 2492 if v in changegroup.supportedoutgoingversions(repo)
2493 2493 ]
2494 2494 if not cgversions:
2495 2495 raise error.Abort(_(b'no common changegroup version'))
2496 2496 version = max(cgversions)
2497 2497
2498 2498 outgoing = _computeoutgoing(repo, heads, common)
2499 2499 if not outgoing.missing:
2500 2500 return
2501 2501
2502 2502 if kwargs.get('narrow', False):
2503 2503 include = sorted(filter(bool, kwargs.get('includepats', [])))
2504 2504 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2505 2505 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2506 2506 else:
2507 2507 matcher = None
2508 2508
2509 2509 cgstream = changegroup.makestream(
2510 2510 repo, outgoing, version, source, bundlecaps=bundlecaps, matcher=matcher
2511 2511 )
2512 2512
2513 2513 part = bundler.newpart(b'changegroup', data=cgstream)
2514 2514 if cgversions:
2515 2515 part.addparam(b'version', version)
2516 2516
2517 2517 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2518 2518
2519 2519 if b'treemanifest' in repo.requirements:
2520 2520 part.addparam(b'treemanifest', b'1')
2521 2521
2522 2522 if b'exp-sidedata-flag' in repo.requirements:
2523 2523 part.addparam(b'exp-sidedata', b'1')
2524 2524
2525 2525 if (
2526 2526 kwargs.get('narrow', False)
2527 2527 and kwargs.get('narrow_acl', False)
2528 2528 and (include or exclude)
2529 2529 ):
2530 2530 # this is mandatory because otherwise ACL clients won't work
2531 2531 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2532 2532 narrowspecpart.data = b'%s\0%s' % (
2533 2533 b'\n'.join(include),
2534 2534 b'\n'.join(exclude),
2535 2535 )
2536 2536
2537 2537
2538 2538 @getbundle2partsgenerator(b'bookmarks')
2539 2539 def _getbundlebookmarkpart(
2540 2540 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2541 2541 ):
2542 2542 """add a bookmark part to the requested bundle"""
2543 2543 if not kwargs.get('bookmarks', False):
2544 2544 return
2545 2545 if b'bookmarks' not in b2caps:
2546 2546 raise error.Abort(_(b'no common bookmarks exchange method'))
2547 2547 books = bookmod.listbinbookmarks(repo)
2548 2548 data = bookmod.binaryencode(books)
2549 2549 if data:
2550 2550 bundler.newpart(b'bookmarks', data=data)
2551 2551
2552 2552
2553 2553 @getbundle2partsgenerator(b'listkeys')
2554 2554 def _getbundlelistkeysparts(
2555 2555 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2556 2556 ):
2557 2557 """add parts containing listkeys namespaces to the requested bundle"""
2558 2558 listkeys = kwargs.get('listkeys', ())
2559 2559 for namespace in listkeys:
2560 2560 part = bundler.newpart(b'listkeys')
2561 2561 part.addparam(b'namespace', namespace)
2562 2562 keys = repo.listkeys(namespace).items()
2563 2563 part.data = pushkey.encodekeys(keys)
2564 2564
2565 2565
2566 2566 @getbundle2partsgenerator(b'obsmarkers')
2567 2567 def _getbundleobsmarkerpart(
2568 2568 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2569 2569 ):
2570 2570 """add an obsolescence markers part to the requested bundle"""
2571 2571 if kwargs.get('obsmarkers', False):
2572 2572 if heads is None:
2573 2573 heads = repo.heads()
2574 2574 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2575 2575 markers = repo.obsstore.relevantmarkers(subset)
2576 2576 markers = _sortedmarkers(markers)
2577 2577 bundle2.buildobsmarkerspart(bundler, markers)
2578 2578
2579 2579
2580 2580 @getbundle2partsgenerator(b'phases')
2581 2581 def _getbundlephasespart(
2582 2582 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2583 2583 ):
2584 2584 """add phase heads part to the requested bundle"""
2585 2585 if kwargs.get('phases', False):
2586 2586 if not b'heads' in b2caps.get(b'phases'):
2587 2587 raise error.Abort(_(b'no common phases exchange method'))
2588 2588 if heads is None:
2589 2589 heads = repo.heads()
2590 2590
2591 2591 headsbyphase = collections.defaultdict(set)
2592 2592 if repo.publishing():
2593 2593 headsbyphase[phases.public] = heads
2594 2594 else:
2595 2595 # find the appropriate heads to move
2596 2596
2597 2597 phase = repo._phasecache.phase
2598 2598 node = repo.changelog.node
2599 2599 rev = repo.changelog.rev
2600 2600 for h in heads:
2601 2601 headsbyphase[phase(repo, rev(h))].add(h)
2602 2602 seenphases = list(headsbyphase.keys())
2603 2603
2604 2604 # We do not handle anything but public and draft phase for now)
2605 2605 if seenphases:
2606 2606 assert max(seenphases) <= phases.draft
2607 2607
2608 2608 # if client is pulling non-public changesets, we need to find
2609 2609 # intermediate public heads.
2610 2610 draftheads = headsbyphase.get(phases.draft, set())
2611 2611 if draftheads:
2612 2612 publicheads = headsbyphase.get(phases.public, set())
2613 2613
2614 2614 revset = b'heads(only(%ln, %ln) and public())'
2615 2615 extraheads = repo.revs(revset, draftheads, publicheads)
2616 2616 for r in extraheads:
2617 2617 headsbyphase[phases.public].add(node(r))
2618 2618
2619 2619 # transform data in a format used by the encoding function
2620 2620 phasemapping = []
2621 2621 for phase in phases.allphases:
2622 2622 phasemapping.append(sorted(headsbyphase[phase]))
2623 2623
2624 2624 # generate the actual part
2625 2625 phasedata = phases.binaryencode(phasemapping)
2626 2626 bundler.newpart(b'phase-heads', data=phasedata)
2627 2627
2628 2628
2629 2629 @getbundle2partsgenerator(b'hgtagsfnodes')
2630 2630 def _getbundletagsfnodes(
2631 2631 bundler,
2632 2632 repo,
2633 2633 source,
2634 2634 bundlecaps=None,
2635 2635 b2caps=None,
2636 2636 heads=None,
2637 2637 common=None,
2638 2638 **kwargs
2639 2639 ):
2640 2640 """Transfer the .hgtags filenodes mapping.
2641 2641
2642 2642 Only values for heads in this bundle will be transferred.
2643 2643
2644 2644 The part data consists of pairs of 20 byte changeset node and .hgtags
2645 2645 filenodes raw values.
2646 2646 """
2647 2647 # Don't send unless:
2648 2648 # - changeset are being exchanged,
2649 2649 # - the client supports it.
2650 2650 if not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2651 2651 return
2652 2652
2653 2653 outgoing = _computeoutgoing(repo, heads, common)
2654 2654 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2655 2655
2656 2656
2657 2657 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2658 2658 def _getbundlerevbranchcache(
2659 2659 bundler,
2660 2660 repo,
2661 2661 source,
2662 2662 bundlecaps=None,
2663 2663 b2caps=None,
2664 2664 heads=None,
2665 2665 common=None,
2666 2666 **kwargs
2667 2667 ):
2668 2668 """Transfer the rev-branch-cache mapping
2669 2669
2670 2670 The payload is a series of data related to each branch
2671 2671
2672 2672 1) branch name length
2673 2673 2) number of open heads
2674 2674 3) number of closed heads
2675 2675 4) open heads nodes
2676 2676 5) closed heads nodes
2677 2677 """
2678 2678 # Don't send unless:
2679 2679 # - changeset are being exchanged,
2680 2680 # - the client supports it.
2681 2681 # - narrow bundle isn't in play (not currently compatible).
2682 2682 if (
2683 2683 not kwargs.get('cg', True)
2684 2684 or b'rev-branch-cache' not in b2caps
2685 2685 or kwargs.get('narrow', False)
2686 2686 or repo.ui.has_section(_NARROWACL_SECTION)
2687 2687 ):
2688 2688 return
2689 2689
2690 2690 outgoing = _computeoutgoing(repo, heads, common)
2691 2691 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2692 2692
2693 2693
2694 2694 def check_heads(repo, their_heads, context):
2695 2695 """check if the heads of a repo have been modified
2696 2696
2697 2697 Used by peer for unbundling.
2698 2698 """
2699 2699 heads = repo.heads()
2700 2700 heads_hash = hashlib.sha1(b''.join(sorted(heads))).digest()
2701 2701 if not (
2702 2702 their_heads == [b'force']
2703 2703 or their_heads == heads
2704 2704 or their_heads == [b'hashed', heads_hash]
2705 2705 ):
2706 2706 # someone else committed/pushed/unbundled while we
2707 2707 # were transferring data
2708 2708 raise error.PushRaced(
2709 2709 b'repository changed while %s - please try again' % context
2710 2710 )
2711 2711
2712 2712
2713 2713 def unbundle(repo, cg, heads, source, url):
2714 2714 """Apply a bundle to a repo.
2715 2715
2716 2716 this function makes sure the repo is locked during the application and have
2717 2717 mechanism to check that no push race occurred between the creation of the
2718 2718 bundle and its application.
2719 2719
2720 2720 If the push was raced as PushRaced exception is raised."""
2721 2721 r = 0
2722 2722 # need a transaction when processing a bundle2 stream
2723 2723 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2724 2724 lockandtr = [None, None, None]
2725 2725 recordout = None
2726 2726 # quick fix for output mismatch with bundle2 in 3.4
2727 2727 captureoutput = repo.ui.configbool(
2728 2728 b'experimental', b'bundle2-output-capture'
2729 2729 )
2730 2730 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2731 2731 captureoutput = True
2732 2732 try:
2733 2733 # note: outside bundle1, 'heads' is expected to be empty and this
2734 2734 # 'check_heads' call wil be a no-op
2735 2735 check_heads(repo, heads, b'uploading changes')
2736 2736 # push can proceed
2737 2737 if not isinstance(cg, bundle2.unbundle20):
2738 2738 # legacy case: bundle1 (changegroup 01)
2739 2739 txnname = b"\n".join([source, util.hidepassword(url)])
2740 2740 with repo.lock(), repo.transaction(txnname) as tr:
2741 2741 op = bundle2.applybundle(repo, cg, tr, source, url)
2742 2742 r = bundle2.combinechangegroupresults(op)
2743 2743 else:
2744 2744 r = None
2745 2745 try:
2746 2746
2747 2747 def gettransaction():
2748 2748 if not lockandtr[2]:
2749 2749 if not bookmod.bookmarksinstore(repo):
2750 2750 lockandtr[0] = repo.wlock()
2751 2751 lockandtr[1] = repo.lock()
2752 2752 lockandtr[2] = repo.transaction(source)
2753 2753 lockandtr[2].hookargs[b'source'] = source
2754 2754 lockandtr[2].hookargs[b'url'] = url
2755 2755 lockandtr[2].hookargs[b'bundle2'] = b'1'
2756 2756 return lockandtr[2]
2757 2757
2758 2758 # Do greedy locking by default until we're satisfied with lazy
2759 2759 # locking.
2760 2760 if not repo.ui.configbool(
2761 2761 b'experimental', b'bundle2lazylocking'
2762 2762 ):
2763 2763 gettransaction()
2764 2764
2765 2765 op = bundle2.bundleoperation(
2766 2766 repo,
2767 2767 gettransaction,
2768 2768 captureoutput=captureoutput,
2769 2769 source=b'push',
2770 2770 )
2771 2771 try:
2772 2772 op = bundle2.processbundle(repo, cg, op=op)
2773 2773 finally:
2774 2774 r = op.reply
2775 2775 if captureoutput and r is not None:
2776 2776 repo.ui.pushbuffer(error=True, subproc=True)
2777 2777
2778 2778 def recordout(output):
2779 2779 r.newpart(b'output', data=output, mandatory=False)
2780 2780
2781 2781 if lockandtr[2] is not None:
2782 2782 lockandtr[2].close()
2783 2783 except BaseException as exc:
2784 2784 exc.duringunbundle2 = True
2785 2785 if captureoutput and r is not None:
2786 2786 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2787 2787
2788 2788 def recordout(output):
2789 2789 part = bundle2.bundlepart(
2790 2790 b'output', data=output, mandatory=False
2791 2791 )
2792 2792 parts.append(part)
2793 2793
2794 2794 raise
2795 2795 finally:
2796 2796 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2797 2797 if recordout is not None:
2798 2798 recordout(repo.ui.popbuffer())
2799 2799 return r
2800 2800
2801 2801
2802 2802 def _maybeapplyclonebundle(pullop):
2803 2803 """Apply a clone bundle from a remote, if possible."""
2804 2804
2805 2805 repo = pullop.repo
2806 2806 remote = pullop.remote
2807 2807
2808 2808 if not repo.ui.configbool(b'ui', b'clonebundles'):
2809 2809 return
2810 2810
2811 2811 # Only run if local repo is empty.
2812 2812 if len(repo):
2813 2813 return
2814 2814
2815 2815 if pullop.heads:
2816 2816 return
2817 2817
2818 2818 if not remote.capable(b'clonebundles'):
2819 2819 return
2820 2820
2821 2821 with remote.commandexecutor() as e:
2822 2822 res = e.callcommand(b'clonebundles', {}).result()
2823 2823
2824 2824 # If we call the wire protocol command, that's good enough to record the
2825 2825 # attempt.
2826 2826 pullop.clonebundleattempted = True
2827 2827
2828 2828 entries = parseclonebundlesmanifest(repo, res)
2829 2829 if not entries:
2830 2830 repo.ui.note(
2831 2831 _(
2832 2832 b'no clone bundles available on remote; '
2833 2833 b'falling back to regular clone\n'
2834 2834 )
2835 2835 )
2836 2836 return
2837 2837
2838 2838 entries = filterclonebundleentries(
2839 2839 repo, entries, streamclonerequested=pullop.streamclonerequested
2840 2840 )
2841 2841
2842 2842 if not entries:
2843 2843 # There is a thundering herd concern here. However, if a server
2844 2844 # operator doesn't advertise bundles appropriate for its clients,
2845 2845 # they deserve what's coming. Furthermore, from a client's
2846 2846 # perspective, no automatic fallback would mean not being able to
2847 2847 # clone!
2848 2848 repo.ui.warn(
2849 2849 _(
2850 2850 b'no compatible clone bundles available on server; '
2851 2851 b'falling back to regular clone\n'
2852 2852 )
2853 2853 )
2854 2854 repo.ui.warn(
2855 2855 _(b'(you may want to report this to the server operator)\n')
2856 2856 )
2857 2857 return
2858 2858
2859 2859 entries = sortclonebundleentries(repo.ui, entries)
2860 2860
2861 2861 url = entries[0][b'URL']
2862 2862 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2863 2863 if trypullbundlefromurl(repo.ui, repo, url):
2864 2864 repo.ui.status(_(b'finished applying clone bundle\n'))
2865 2865 # Bundle failed.
2866 2866 #
2867 2867 # We abort by default to avoid the thundering herd of
2868 2868 # clients flooding a server that was expecting expensive
2869 2869 # clone load to be offloaded.
2870 2870 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2871 2871 repo.ui.warn(_(b'falling back to normal clone\n'))
2872 2872 else:
2873 2873 raise error.Abort(
2874 2874 _(b'error applying bundle'),
2875 2875 hint=_(
2876 2876 b'if this error persists, consider contacting '
2877 2877 b'the server operator or disable clone '
2878 2878 b'bundles via '
2879 2879 b'"--config ui.clonebundles=false"'
2880 2880 ),
2881 2881 )
2882 2882
2883 2883
2884 2884 def parseclonebundlesmanifest(repo, s):
2885 2885 """Parses the raw text of a clone bundles manifest.
2886 2886
2887 2887 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2888 2888 to the URL and other keys are the attributes for the entry.
2889 2889 """
2890 2890 m = []
2891 2891 for line in s.splitlines():
2892 2892 fields = line.split()
2893 2893 if not fields:
2894 2894 continue
2895 2895 attrs = {b'URL': fields[0]}
2896 2896 for rawattr in fields[1:]:
2897 2897 key, value = rawattr.split(b'=', 1)
2898 2898 key = urlreq.unquote(key)
2899 2899 value = urlreq.unquote(value)
2900 2900 attrs[key] = value
2901 2901
2902 2902 # Parse BUNDLESPEC into components. This makes client-side
2903 2903 # preferences easier to specify since you can prefer a single
2904 2904 # component of the BUNDLESPEC.
2905 2905 if key == b'BUNDLESPEC':
2906 2906 try:
2907 2907 bundlespec = parsebundlespec(repo, value)
2908 2908 attrs[b'COMPRESSION'] = bundlespec.compression
2909 2909 attrs[b'VERSION'] = bundlespec.version
2910 2910 except error.InvalidBundleSpecification:
2911 2911 pass
2912 2912 except error.UnsupportedBundleSpecification:
2913 2913 pass
2914 2914
2915 2915 m.append(attrs)
2916 2916
2917 2917 return m
2918 2918
2919 2919
2920 2920 def isstreamclonespec(bundlespec):
2921 2921 # Stream clone v1
2922 2922 if bundlespec.wirecompression == b'UN' and bundlespec.wireversion == b's1':
2923 2923 return True
2924 2924
2925 2925 # Stream clone v2
2926 2926 if (
2927 2927 bundlespec.wirecompression == b'UN'
2928 2928 and bundlespec.wireversion == b'02'
2929 2929 and bundlespec.contentopts.get(b'streamv2')
2930 2930 ):
2931 2931 return True
2932 2932
2933 2933 return False
2934 2934
2935 2935
2936 2936 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2937 2937 """Remove incompatible clone bundle manifest entries.
2938 2938
2939 2939 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2940 2940 and returns a new list consisting of only the entries that this client
2941 2941 should be able to apply.
2942 2942
2943 2943 There is no guarantee we'll be able to apply all returned entries because
2944 2944 the metadata we use to filter on may be missing or wrong.
2945 2945 """
2946 2946 newentries = []
2947 2947 for entry in entries:
2948 2948 spec = entry.get(b'BUNDLESPEC')
2949 2949 if spec:
2950 2950 try:
2951 2951 bundlespec = parsebundlespec(repo, spec, strict=True)
2952 2952
2953 2953 # If a stream clone was requested, filter out non-streamclone
2954 2954 # entries.
2955 2955 if streamclonerequested and not isstreamclonespec(bundlespec):
2956 2956 repo.ui.debug(
2957 2957 b'filtering %s because not a stream clone\n'
2958 2958 % entry[b'URL']
2959 2959 )
2960 2960 continue
2961 2961
2962 2962 except error.InvalidBundleSpecification as e:
2963 2963 repo.ui.debug(stringutil.forcebytestr(e) + b'\n')
2964 2964 continue
2965 2965 except error.UnsupportedBundleSpecification as e:
2966 2966 repo.ui.debug(
2967 2967 b'filtering %s because unsupported bundle '
2968 2968 b'spec: %s\n' % (entry[b'URL'], stringutil.forcebytestr(e))
2969 2969 )
2970 2970 continue
2971 2971 # If we don't have a spec and requested a stream clone, we don't know
2972 2972 # what the entry is so don't attempt to apply it.
2973 2973 elif streamclonerequested:
2974 2974 repo.ui.debug(
2975 2975 b'filtering %s because cannot determine if a stream '
2976 2976 b'clone bundle\n' % entry[b'URL']
2977 2977 )
2978 2978 continue
2979 2979
2980 2980 if b'REQUIRESNI' in entry and not sslutil.hassni:
2981 2981 repo.ui.debug(
2982 2982 b'filtering %s because SNI not supported\n' % entry[b'URL']
2983 2983 )
2984 2984 continue
2985 2985
2986 2986 newentries.append(entry)
2987 2987
2988 2988 return newentries
2989 2989
2990 2990
2991 2991 class clonebundleentry(object):
2992 2992 """Represents an item in a clone bundles manifest.
2993 2993
2994 2994 This rich class is needed to support sorting since sorted() in Python 3
2995 2995 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2996 2996 won't work.
2997 2997 """
2998 2998
2999 2999 def __init__(self, value, prefers):
3000 3000 self.value = value
3001 3001 self.prefers = prefers
3002 3002
3003 3003 def _cmp(self, other):
3004 3004 for prefkey, prefvalue in self.prefers:
3005 3005 avalue = self.value.get(prefkey)
3006 3006 bvalue = other.value.get(prefkey)
3007 3007
3008 3008 # Special case for b missing attribute and a matches exactly.
3009 3009 if avalue is not None and bvalue is None and avalue == prefvalue:
3010 3010 return -1
3011 3011
3012 3012 # Special case for a missing attribute and b matches exactly.
3013 3013 if bvalue is not None and avalue is None and bvalue == prefvalue:
3014 3014 return 1
3015 3015
3016 3016 # We can't compare unless attribute present on both.
3017 3017 if avalue is None or bvalue is None:
3018 3018 continue
3019 3019
3020 3020 # Same values should fall back to next attribute.
3021 3021 if avalue == bvalue:
3022 3022 continue
3023 3023
3024 3024 # Exact matches come first.
3025 3025 if avalue == prefvalue:
3026 3026 return -1
3027 3027 if bvalue == prefvalue:
3028 3028 return 1
3029 3029
3030 3030 # Fall back to next attribute.
3031 3031 continue
3032 3032
3033 3033 # If we got here we couldn't sort by attributes and prefers. Fall
3034 3034 # back to index order.
3035 3035 return 0
3036 3036
3037 3037 def __lt__(self, other):
3038 3038 return self._cmp(other) < 0
3039 3039
3040 3040 def __gt__(self, other):
3041 3041 return self._cmp(other) > 0
3042 3042
3043 3043 def __eq__(self, other):
3044 3044 return self._cmp(other) == 0
3045 3045
3046 3046 def __le__(self, other):
3047 3047 return self._cmp(other) <= 0
3048 3048
3049 3049 def __ge__(self, other):
3050 3050 return self._cmp(other) >= 0
3051 3051
3052 3052 def __ne__(self, other):
3053 3053 return self._cmp(other) != 0
3054 3054
3055 3055
3056 3056 def sortclonebundleentries(ui, entries):
3057 3057 prefers = ui.configlist(b'ui', b'clonebundleprefers')
3058 3058 if not prefers:
3059 3059 return list(entries)
3060 3060
3061 3061 prefers = [p.split(b'=', 1) for p in prefers]
3062 3062
3063 3063 items = sorted(clonebundleentry(v, prefers) for v in entries)
3064 3064 return [i.value for i in items]
3065 3065
3066 3066
3067 3067 def trypullbundlefromurl(ui, repo, url):
3068 3068 """Attempt to apply a bundle from a URL."""
3069 3069 with repo.lock(), repo.transaction(b'bundleurl') as tr:
3070 3070 try:
3071 3071 fh = urlmod.open(ui, url)
3072 3072 cg = readbundle(ui, fh, b'stream')
3073 3073
3074 3074 if isinstance(cg, streamclone.streamcloneapplier):
3075 3075 cg.apply(repo)
3076 3076 else:
3077 3077 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
3078 3078 return True
3079 3079 except urlerr.httperror as e:
3080 3080 ui.warn(
3081 3081 _(b'HTTP error fetching bundle: %s\n')
3082 3082 % stringutil.forcebytestr(e)
3083 3083 )
3084 3084 except urlerr.urlerror as e:
3085 3085 ui.warn(
3086 3086 _(b'error fetching bundle: %s\n')
3087 3087 % stringutil.forcebytestr(e.reason)
3088 3088 )
3089 3089
3090 3090 return False
General Comments 0
You need to be logged in to leave comments. Login now