##// END OF EJS Templates
exchange: allow passing no includes/excludes to `pull()`...
Martin von Zweigbergk -
r51423:b361e9da default
parent child Browse files
Show More
@@ -1,2875 +1,2875 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import collections
10 10 import weakref
11 11
12 12 from .i18n import _
13 13 from .node import (
14 14 hex,
15 15 nullrev,
16 16 )
17 17 from . import (
18 18 bookmarks as bookmod,
19 19 bundle2,
20 20 bundlecaches,
21 21 changegroup,
22 22 discovery,
23 23 error,
24 24 lock as lockmod,
25 25 logexchange,
26 26 narrowspec,
27 27 obsolete,
28 28 obsutil,
29 29 phases,
30 30 pushkey,
31 31 pycompat,
32 32 requirements,
33 33 scmutil,
34 34 streamclone,
35 35 url as urlmod,
36 36 util,
37 37 wireprototypes,
38 38 )
39 39 from .utils import (
40 40 hashutil,
41 41 stringutil,
42 42 urlutil,
43 43 )
44 44 from .interfaces import repository
45 45
46 46 urlerr = util.urlerr
47 47 urlreq = util.urlreq
48 48
49 49 _NARROWACL_SECTION = b'narrowacl'
50 50
51 51
52 52 def readbundle(ui, fh, fname, vfs=None):
53 53 header = changegroup.readexactly(fh, 4)
54 54
55 55 alg = None
56 56 if not fname:
57 57 fname = b"stream"
58 58 if not header.startswith(b'HG') and header.startswith(b'\0'):
59 59 fh = changegroup.headerlessfixup(fh, header)
60 60 header = b"HG10"
61 61 alg = b'UN'
62 62 elif vfs:
63 63 fname = vfs.join(fname)
64 64
65 65 magic, version = header[0:2], header[2:4]
66 66
67 67 if magic != b'HG':
68 68 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
69 69 if version == b'10':
70 70 if alg is None:
71 71 alg = changegroup.readexactly(fh, 2)
72 72 return changegroup.cg1unpacker(fh, alg)
73 73 elif version.startswith(b'2'):
74 74 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
75 75 elif version == b'S1':
76 76 return streamclone.streamcloneapplier(fh)
77 77 else:
78 78 raise error.Abort(
79 79 _(b'%s: unknown bundle version %s') % (fname, version)
80 80 )
81 81
82 82
83 83 def _format_params(params):
84 84 parts = []
85 85 for key, value in sorted(params.items()):
86 86 value = urlreq.quote(value)
87 87 parts.append(b"%s=%s" % (key, value))
88 88 return b';'.join(parts)
89 89
90 90
91 91 def getbundlespec(ui, fh):
92 92 """Infer the bundlespec from a bundle file handle.
93 93
94 94 The input file handle is seeked and the original seek position is not
95 95 restored.
96 96 """
97 97
98 98 def speccompression(alg):
99 99 try:
100 100 return util.compengines.forbundletype(alg).bundletype()[0]
101 101 except KeyError:
102 102 return None
103 103
104 104 params = {}
105 105
106 106 b = readbundle(ui, fh, None)
107 107 if isinstance(b, changegroup.cg1unpacker):
108 108 alg = b._type
109 109 if alg == b'_truncatedBZ':
110 110 alg = b'BZ'
111 111 comp = speccompression(alg)
112 112 if not comp:
113 113 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
114 114 return b'%s-v1' % comp
115 115 elif isinstance(b, bundle2.unbundle20):
116 116 if b'Compression' in b.params:
117 117 comp = speccompression(b.params[b'Compression'])
118 118 if not comp:
119 119 raise error.Abort(
120 120 _(b'unknown compression algorithm: %s') % comp
121 121 )
122 122 else:
123 123 comp = b'none'
124 124
125 125 version = None
126 126 for part in b.iterparts():
127 127 if part.type == b'changegroup':
128 128 cgversion = part.params[b'version']
129 129 if cgversion in (b'01', b'02'):
130 130 version = b'v2'
131 131 elif cgversion in (b'03',):
132 132 version = b'v2'
133 133 params[b'cg.version'] = cgversion
134 134 else:
135 135 raise error.Abort(
136 136 _(
137 137 b'changegroup version %s does not have '
138 138 b'a known bundlespec'
139 139 )
140 140 % version,
141 141 hint=_(b'try upgrading your Mercurial client'),
142 142 )
143 143 elif part.type == b'stream2' and version is None:
144 144 # A stream2 part requires to be part of a v2 bundle
145 145 requirements = urlreq.unquote(part.params[b'requirements'])
146 146 splitted = requirements.split()
147 147 params = bundle2._formatrequirementsparams(splitted)
148 148 return b'none-v2;stream=v2;%s' % params
149 149 elif part.type == b'obsmarkers':
150 150 params[b'obsolescence'] = b'yes'
151 151 if not part.mandatory:
152 152 params[b'obsolescence-mandatory'] = b'no'
153 153
154 154 if not version:
155 155 raise error.Abort(
156 156 _(b'could not identify changegroup version in bundle')
157 157 )
158 158 spec = b'%s-%s' % (comp, version)
159 159 if params:
160 160 spec += b';'
161 161 spec += _format_params(params)
162 162 return spec
163 163
164 164 elif isinstance(b, streamclone.streamcloneapplier):
165 165 requirements = streamclone.readbundle1header(fh)[2]
166 166 formatted = bundle2._formatrequirementsparams(requirements)
167 167 return b'none-packed1;%s' % formatted
168 168 else:
169 169 raise error.Abort(_(b'unknown bundle type: %s') % b)
170 170
171 171
172 172 def _computeoutgoing(repo, heads, common):
173 173 """Computes which revs are outgoing given a set of common
174 174 and a set of heads.
175 175
176 176 This is a separate function so extensions can have access to
177 177 the logic.
178 178
179 179 Returns a discovery.outgoing object.
180 180 """
181 181 cl = repo.changelog
182 182 if common:
183 183 hasnode = cl.hasnode
184 184 common = [n for n in common if hasnode(n)]
185 185 else:
186 186 common = [repo.nullid]
187 187 if not heads:
188 188 heads = cl.heads()
189 189 return discovery.outgoing(repo, common, heads)
190 190
191 191
192 192 def _checkpublish(pushop):
193 193 repo = pushop.repo
194 194 ui = repo.ui
195 195 behavior = ui.config(b'experimental', b'auto-publish')
196 196 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
197 197 return
198 198 remotephases = listkeys(pushop.remote, b'phases')
199 199 if not remotephases.get(b'publishing', False):
200 200 return
201 201
202 202 if pushop.revs is None:
203 203 published = repo.filtered(b'served').revs(b'not public()')
204 204 else:
205 205 published = repo.revs(b'::%ln - public()', pushop.revs)
206 206 # we want to use pushop.revs in the revset even if they themselves are
207 207 # secret, but we don't want to have anything that the server won't see
208 208 # in the result of this expression
209 209 published &= repo.filtered(b'served')
210 210 if published:
211 211 if behavior == b'warn':
212 212 ui.warn(
213 213 _(b'%i changesets about to be published\n') % len(published)
214 214 )
215 215 elif behavior == b'confirm':
216 216 if ui.promptchoice(
217 217 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
218 218 % len(published)
219 219 ):
220 220 raise error.CanceledError(_(b'user quit'))
221 221 elif behavior == b'abort':
222 222 msg = _(b'push would publish %i changesets') % len(published)
223 223 hint = _(
224 224 b"use --publish or adjust 'experimental.auto-publish'"
225 225 b" config"
226 226 )
227 227 raise error.Abort(msg, hint=hint)
228 228
229 229
230 230 def _forcebundle1(op):
231 231 """return true if a pull/push must use bundle1
232 232
233 233 This function is used to allow testing of the older bundle version"""
234 234 ui = op.repo.ui
235 235 # The goal is this config is to allow developer to choose the bundle
236 236 # version used during exchanged. This is especially handy during test.
237 237 # Value is a list of bundle version to be picked from, highest version
238 238 # should be used.
239 239 #
240 240 # developer config: devel.legacy.exchange
241 241 exchange = ui.configlist(b'devel', b'legacy.exchange')
242 242 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
243 243 return forcebundle1 or not op.remote.capable(b'bundle2')
244 244
245 245
246 246 class pushoperation:
247 247 """A object that represent a single push operation
248 248
249 249 Its purpose is to carry push related state and very common operations.
250 250
251 251 A new pushoperation should be created at the beginning of each push and
252 252 discarded afterward.
253 253 """
254 254
255 255 def __init__(
256 256 self,
257 257 repo,
258 258 remote,
259 259 force=False,
260 260 revs=None,
261 261 newbranch=False,
262 262 bookmarks=(),
263 263 publish=False,
264 264 pushvars=None,
265 265 ):
266 266 # repo we push from
267 267 self.repo = repo
268 268 self.ui = repo.ui
269 269 # repo we push to
270 270 self.remote = remote
271 271 # force option provided
272 272 self.force = force
273 273 # revs to be pushed (None is "all")
274 274 self.revs = revs
275 275 # bookmark explicitly pushed
276 276 self.bookmarks = bookmarks
277 277 # allow push of new branch
278 278 self.newbranch = newbranch
279 279 # step already performed
280 280 # (used to check what steps have been already performed through bundle2)
281 281 self.stepsdone = set()
282 282 # Integer version of the changegroup push result
283 283 # - None means nothing to push
284 284 # - 0 means HTTP error
285 285 # - 1 means we pushed and remote head count is unchanged *or*
286 286 # we have outgoing changesets but refused to push
287 287 # - other values as described by addchangegroup()
288 288 self.cgresult = None
289 289 # Boolean value for the bookmark push
290 290 self.bkresult = None
291 291 # discover.outgoing object (contains common and outgoing data)
292 292 self.outgoing = None
293 293 # all remote topological heads before the push
294 294 self.remoteheads = None
295 295 # Details of the remote branch pre and post push
296 296 #
297 297 # mapping: {'branch': ([remoteheads],
298 298 # [newheads],
299 299 # [unsyncedheads],
300 300 # [discardedheads])}
301 301 # - branch: the branch name
302 302 # - remoteheads: the list of remote heads known locally
303 303 # None if the branch is new
304 304 # - newheads: the new remote heads (known locally) with outgoing pushed
305 305 # - unsyncedheads: the list of remote heads unknown locally.
306 306 # - discardedheads: the list of remote heads made obsolete by the push
307 307 self.pushbranchmap = None
308 308 # testable as a boolean indicating if any nodes are missing locally.
309 309 self.incoming = None
310 310 # summary of the remote phase situation
311 311 self.remotephases = None
312 312 # phases changes that must be pushed along side the changesets
313 313 self.outdatedphases = None
314 314 # phases changes that must be pushed if changeset push fails
315 315 self.fallbackoutdatedphases = None
316 316 # outgoing obsmarkers
317 317 self.outobsmarkers = set()
318 318 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
319 319 self.outbookmarks = []
320 320 # transaction manager
321 321 self.trmanager = None
322 322 # map { pushkey partid -> callback handling failure}
323 323 # used to handle exception from mandatory pushkey part failure
324 324 self.pkfailcb = {}
325 325 # an iterable of pushvars or None
326 326 self.pushvars = pushvars
327 327 # publish pushed changesets
328 328 self.publish = publish
329 329
330 330 @util.propertycache
331 331 def futureheads(self):
332 332 """future remote heads if the changeset push succeeds"""
333 333 return self.outgoing.ancestorsof
334 334
335 335 @util.propertycache
336 336 def fallbackheads(self):
337 337 """future remote heads if the changeset push fails"""
338 338 if self.revs is None:
339 339 # not target to push, all common are relevant
340 340 return self.outgoing.commonheads
341 341 unfi = self.repo.unfiltered()
342 342 # I want cheads = heads(::ancestorsof and ::commonheads)
343 343 # (ancestorsof is revs with secret changeset filtered out)
344 344 #
345 345 # This can be expressed as:
346 346 # cheads = ( (ancestorsof and ::commonheads)
347 347 # + (commonheads and ::ancestorsof))"
348 348 # )
349 349 #
350 350 # while trying to push we already computed the following:
351 351 # common = (::commonheads)
352 352 # missing = ((commonheads::ancestorsof) - commonheads)
353 353 #
354 354 # We can pick:
355 355 # * ancestorsof part of common (::commonheads)
356 356 common = self.outgoing.common
357 357 rev = self.repo.changelog.index.rev
358 358 cheads = [node for node in self.revs if rev(node) in common]
359 359 # and
360 360 # * commonheads parents on missing
361 361 revset = unfi.set(
362 362 b'%ln and parents(roots(%ln))',
363 363 self.outgoing.commonheads,
364 364 self.outgoing.missing,
365 365 )
366 366 cheads.extend(c.node() for c in revset)
367 367 return cheads
368 368
369 369 @property
370 370 def commonheads(self):
371 371 """set of all common heads after changeset bundle push"""
372 372 if self.cgresult:
373 373 return self.futureheads
374 374 else:
375 375 return self.fallbackheads
376 376
377 377
378 378 # mapping of message used when pushing bookmark
379 379 bookmsgmap = {
380 380 b'update': (
381 381 _(b"updating bookmark %s\n"),
382 382 _(b'updating bookmark %s failed\n'),
383 383 ),
384 384 b'export': (
385 385 _(b"exporting bookmark %s\n"),
386 386 _(b'exporting bookmark %s failed\n'),
387 387 ),
388 388 b'delete': (
389 389 _(b"deleting remote bookmark %s\n"),
390 390 _(b'deleting remote bookmark %s failed\n'),
391 391 ),
392 392 }
393 393
394 394
395 395 def push(
396 396 repo,
397 397 remote,
398 398 force=False,
399 399 revs=None,
400 400 newbranch=False,
401 401 bookmarks=(),
402 402 publish=False,
403 403 opargs=None,
404 404 ):
405 405 """Push outgoing changesets (limited by revs) from a local
406 406 repository to remote. Return an integer:
407 407 - None means nothing to push
408 408 - 0 means HTTP error
409 409 - 1 means we pushed and remote head count is unchanged *or*
410 410 we have outgoing changesets but refused to push
411 411 - other values as described by addchangegroup()
412 412 """
413 413 if opargs is None:
414 414 opargs = {}
415 415 pushop = pushoperation(
416 416 repo,
417 417 remote,
418 418 force,
419 419 revs,
420 420 newbranch,
421 421 bookmarks,
422 422 publish,
423 423 **pycompat.strkwargs(opargs)
424 424 )
425 425 if pushop.remote.local():
426 426 missing = (
427 427 set(pushop.repo.requirements) - pushop.remote.local().supported
428 428 )
429 429 if missing:
430 430 msg = _(
431 431 b"required features are not"
432 432 b" supported in the destination:"
433 433 b" %s"
434 434 ) % (b', '.join(sorted(missing)))
435 435 raise error.Abort(msg)
436 436
437 437 if not pushop.remote.canpush():
438 438 raise error.Abort(_(b"destination does not support push"))
439 439
440 440 if not pushop.remote.capable(b'unbundle'):
441 441 raise error.Abort(
442 442 _(
443 443 b'cannot push: destination does not support the '
444 444 b'unbundle wire protocol command'
445 445 )
446 446 )
447 447 for category in sorted(bundle2.read_remote_wanted_sidedata(pushop.remote)):
448 448 # Check that a computer is registered for that category for at least
449 449 # one revlog kind.
450 450 for kind, computers in repo._sidedata_computers.items():
451 451 if computers.get(category):
452 452 break
453 453 else:
454 454 raise error.Abort(
455 455 _(
456 456 b'cannot push: required sidedata category not supported'
457 457 b" by this client: '%s'"
458 458 )
459 459 % pycompat.bytestr(category)
460 460 )
461 461 # get lock as we might write phase data
462 462 wlock = lock = None
463 463 try:
464 464 # bundle2 push may receive a reply bundle touching bookmarks
465 465 # requiring the wlock. Take it now to ensure proper ordering.
466 466 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
467 467 if (
468 468 (not _forcebundle1(pushop))
469 469 and maypushback
470 470 and not bookmod.bookmarksinstore(repo)
471 471 ):
472 472 wlock = pushop.repo.wlock()
473 473 lock = pushop.repo.lock()
474 474 pushop.trmanager = transactionmanager(
475 475 pushop.repo, b'push-response', pushop.remote.url()
476 476 )
477 477 except error.LockUnavailable as err:
478 478 # source repo cannot be locked.
479 479 # We do not abort the push, but just disable the local phase
480 480 # synchronisation.
481 481 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
482 482 err
483 483 )
484 484 pushop.ui.debug(msg)
485 485
486 486 with wlock or util.nullcontextmanager():
487 487 with lock or util.nullcontextmanager():
488 488 with pushop.trmanager or util.nullcontextmanager():
489 489 pushop.repo.checkpush(pushop)
490 490 _checkpublish(pushop)
491 491 _pushdiscovery(pushop)
492 492 if not pushop.force:
493 493 _checksubrepostate(pushop)
494 494 if not _forcebundle1(pushop):
495 495 _pushbundle2(pushop)
496 496 _pushchangeset(pushop)
497 497 _pushsyncphase(pushop)
498 498 _pushobsolete(pushop)
499 499 _pushbookmark(pushop)
500 500
501 501 if repo.ui.configbool(b'experimental', b'remotenames'):
502 502 logexchange.pullremotenames(repo, remote)
503 503
504 504 return pushop
505 505
506 506
507 507 # list of steps to perform discovery before push
508 508 pushdiscoveryorder = []
509 509
510 510 # Mapping between step name and function
511 511 #
512 512 # This exists to help extensions wrap steps if necessary
513 513 pushdiscoverymapping = {}
514 514
515 515
516 516 def pushdiscovery(stepname):
517 517 """decorator for function performing discovery before push
518 518
519 519 The function is added to the step -> function mapping and appended to the
520 520 list of steps. Beware that decorated function will be added in order (this
521 521 may matter).
522 522
523 523 You can only use this decorator for a new step, if you want to wrap a step
524 524 from an extension, change the pushdiscovery dictionary directly."""
525 525
526 526 def dec(func):
527 527 assert stepname not in pushdiscoverymapping
528 528 pushdiscoverymapping[stepname] = func
529 529 pushdiscoveryorder.append(stepname)
530 530 return func
531 531
532 532 return dec
533 533
534 534
535 535 def _pushdiscovery(pushop):
536 536 """Run all discovery steps"""
537 537 for stepname in pushdiscoveryorder:
538 538 step = pushdiscoverymapping[stepname]
539 539 step(pushop)
540 540
541 541
542 542 def _checksubrepostate(pushop):
543 543 """Ensure all outgoing referenced subrepo revisions are present locally"""
544 544
545 545 repo = pushop.repo
546 546
547 547 # If the repository does not use subrepos, skip the expensive
548 548 # manifest checks.
549 549 if not len(repo.file(b'.hgsub')) or not len(repo.file(b'.hgsubstate')):
550 550 return
551 551
552 552 for n in pushop.outgoing.missing:
553 553 ctx = repo[n]
554 554
555 555 if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files():
556 556 for subpath in sorted(ctx.substate):
557 557 sub = ctx.sub(subpath)
558 558 sub.verify(onpush=True)
559 559
560 560
561 561 @pushdiscovery(b'changeset')
562 562 def _pushdiscoverychangeset(pushop):
563 563 """discover the changeset that need to be pushed"""
564 564 fci = discovery.findcommonincoming
565 565 if pushop.revs:
566 566 commoninc = fci(
567 567 pushop.repo,
568 568 pushop.remote,
569 569 force=pushop.force,
570 570 ancestorsof=pushop.revs,
571 571 )
572 572 else:
573 573 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
574 574 common, inc, remoteheads = commoninc
575 575 fco = discovery.findcommonoutgoing
576 576 outgoing = fco(
577 577 pushop.repo,
578 578 pushop.remote,
579 579 onlyheads=pushop.revs,
580 580 commoninc=commoninc,
581 581 force=pushop.force,
582 582 )
583 583 pushop.outgoing = outgoing
584 584 pushop.remoteheads = remoteheads
585 585 pushop.incoming = inc
586 586
587 587
588 588 @pushdiscovery(b'phase')
589 589 def _pushdiscoveryphase(pushop):
590 590 """discover the phase that needs to be pushed
591 591
592 592 (computed for both success and failure case for changesets push)"""
593 593 outgoing = pushop.outgoing
594 594 unfi = pushop.repo.unfiltered()
595 595 remotephases = listkeys(pushop.remote, b'phases')
596 596
597 597 if (
598 598 pushop.ui.configbool(b'ui', b'_usedassubrepo')
599 599 and remotephases # server supports phases
600 600 and not pushop.outgoing.missing # no changesets to be pushed
601 601 and remotephases.get(b'publishing', False)
602 602 ):
603 603 # When:
604 604 # - this is a subrepo push
605 605 # - and remote support phase
606 606 # - and no changeset are to be pushed
607 607 # - and remote is publishing
608 608 # We may be in issue 3781 case!
609 609 # We drop the possible phase synchronisation done by
610 610 # courtesy to publish changesets possibly locally draft
611 611 # on the remote.
612 612 pushop.outdatedphases = []
613 613 pushop.fallbackoutdatedphases = []
614 614 return
615 615
616 616 pushop.remotephases = phases.remotephasessummary(
617 617 pushop.repo, pushop.fallbackheads, remotephases
618 618 )
619 619 droots = pushop.remotephases.draftroots
620 620
621 621 extracond = b''
622 622 if not pushop.remotephases.publishing:
623 623 extracond = b' and public()'
624 624 revset = b'heads((%%ln::%%ln) %s)' % extracond
625 625 # Get the list of all revs draft on remote by public here.
626 626 # XXX Beware that revset break if droots is not strictly
627 627 # XXX root we may want to ensure it is but it is costly
628 628 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
629 629 if not pushop.remotephases.publishing and pushop.publish:
630 630 future = list(
631 631 unfi.set(
632 632 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
633 633 )
634 634 )
635 635 elif not outgoing.missing:
636 636 future = fallback
637 637 else:
638 638 # adds changeset we are going to push as draft
639 639 #
640 640 # should not be necessary for publishing server, but because of an
641 641 # issue fixed in xxxxx we have to do it anyway.
642 642 fdroots = list(
643 643 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
644 644 )
645 645 fdroots = [f.node() for f in fdroots]
646 646 future = list(unfi.set(revset, fdroots, pushop.futureheads))
647 647 pushop.outdatedphases = future
648 648 pushop.fallbackoutdatedphases = fallback
649 649
650 650
651 651 @pushdiscovery(b'obsmarker')
652 652 def _pushdiscoveryobsmarkers(pushop):
653 653 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
654 654 return
655 655
656 656 if not pushop.repo.obsstore:
657 657 return
658 658
659 659 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
660 660 return
661 661
662 662 repo = pushop.repo
663 663 # very naive computation, that can be quite expensive on big repo.
664 664 # However: evolution is currently slow on them anyway.
665 665 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
666 666 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
667 667
668 668
669 669 @pushdiscovery(b'bookmarks')
670 670 def _pushdiscoverybookmarks(pushop):
671 671 ui = pushop.ui
672 672 repo = pushop.repo.unfiltered()
673 673 remote = pushop.remote
674 674 ui.debug(b"checking for updated bookmarks\n")
675 675 ancestors = ()
676 676 if pushop.revs:
677 677 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
678 678 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
679 679
680 680 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
681 681
682 682 explicit = {
683 683 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
684 684 }
685 685
686 686 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
687 687 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
688 688
689 689
690 690 def _processcompared(pushop, pushed, explicit, remotebms, comp):
691 691 """take decision on bookmarks to push to the remote repo
692 692
693 693 Exists to help extensions alter this behavior.
694 694 """
695 695 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
696 696
697 697 repo = pushop.repo
698 698
699 699 for b, scid, dcid in advsrc:
700 700 if b in explicit:
701 701 explicit.remove(b)
702 702 if not pushed or repo[scid].rev() in pushed:
703 703 pushop.outbookmarks.append((b, dcid, scid))
704 704 # search added bookmark
705 705 for b, scid, dcid in addsrc:
706 706 if b in explicit:
707 707 explicit.remove(b)
708 708 if bookmod.isdivergent(b):
709 709 pushop.ui.warn(_(b'cannot push divergent bookmark %s!\n') % b)
710 710 pushop.bkresult = 2
711 711 else:
712 712 pushop.outbookmarks.append((b, b'', scid))
713 713 # search for overwritten bookmark
714 714 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
715 715 if b in explicit:
716 716 explicit.remove(b)
717 717 pushop.outbookmarks.append((b, dcid, scid))
718 718 # search for bookmark to delete
719 719 for b, scid, dcid in adddst:
720 720 if b in explicit:
721 721 explicit.remove(b)
722 722 # treat as "deleted locally"
723 723 pushop.outbookmarks.append((b, dcid, b''))
724 724 # identical bookmarks shouldn't get reported
725 725 for b, scid, dcid in same:
726 726 if b in explicit:
727 727 explicit.remove(b)
728 728
729 729 if explicit:
730 730 explicit = sorted(explicit)
731 731 # we should probably list all of them
732 732 pushop.ui.warn(
733 733 _(
734 734 b'bookmark %s does not exist on the local '
735 735 b'or remote repository!\n'
736 736 )
737 737 % explicit[0]
738 738 )
739 739 pushop.bkresult = 2
740 740
741 741 pushop.outbookmarks.sort()
742 742
743 743
744 744 def _pushcheckoutgoing(pushop):
745 745 outgoing = pushop.outgoing
746 746 unfi = pushop.repo.unfiltered()
747 747 if not outgoing.missing:
748 748 # nothing to push
749 749 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
750 750 return False
751 751 # something to push
752 752 if not pushop.force:
753 753 # if repo.obsstore == False --> no obsolete
754 754 # then, save the iteration
755 755 if unfi.obsstore:
756 756 # this message are here for 80 char limit reason
757 757 mso = _(b"push includes obsolete changeset: %s!")
758 758 mspd = _(b"push includes phase-divergent changeset: %s!")
759 759 mscd = _(b"push includes content-divergent changeset: %s!")
760 760 mst = {
761 761 b"orphan": _(b"push includes orphan changeset: %s!"),
762 762 b"phase-divergent": mspd,
763 763 b"content-divergent": mscd,
764 764 }
765 765 # If we are to push if there is at least one
766 766 # obsolete or unstable changeset in missing, at
767 767 # least one of the missinghead will be obsolete or
768 768 # unstable. So checking heads only is ok
769 769 for node in outgoing.ancestorsof:
770 770 ctx = unfi[node]
771 771 if ctx.obsolete():
772 772 raise error.Abort(mso % ctx)
773 773 elif ctx.isunstable():
774 774 # TODO print more than one instability in the abort
775 775 # message
776 776 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
777 777
778 778 discovery.checkheads(pushop)
779 779 return True
780 780
781 781
782 782 # List of names of steps to perform for an outgoing bundle2, order matters.
783 783 b2partsgenorder = []
784 784
785 785 # Mapping between step name and function
786 786 #
787 787 # This exists to help extensions wrap steps if necessary
788 788 b2partsgenmapping = {}
789 789
790 790
791 791 def b2partsgenerator(stepname, idx=None):
792 792 """decorator for function generating bundle2 part
793 793
794 794 The function is added to the step -> function mapping and appended to the
795 795 list of steps. Beware that decorated functions will be added in order
796 796 (this may matter).
797 797
798 798 You can only use this decorator for new steps, if you want to wrap a step
799 799 from an extension, attack the b2partsgenmapping dictionary directly."""
800 800
801 801 def dec(func):
802 802 assert stepname not in b2partsgenmapping
803 803 b2partsgenmapping[stepname] = func
804 804 if idx is None:
805 805 b2partsgenorder.append(stepname)
806 806 else:
807 807 b2partsgenorder.insert(idx, stepname)
808 808 return func
809 809
810 810 return dec
811 811
812 812
813 813 def _pushb2ctxcheckheads(pushop, bundler):
814 814 """Generate race condition checking parts
815 815
816 816 Exists as an independent function to aid extensions
817 817 """
818 818 # * 'force' do not check for push race,
819 819 # * if we don't push anything, there are nothing to check.
820 820 if not pushop.force and pushop.outgoing.ancestorsof:
821 821 allowunrelated = b'related' in bundler.capabilities.get(
822 822 b'checkheads', ()
823 823 )
824 824 emptyremote = pushop.pushbranchmap is None
825 825 if not allowunrelated or emptyremote:
826 826 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
827 827 else:
828 828 affected = set()
829 829 for branch, heads in pushop.pushbranchmap.items():
830 830 remoteheads, newheads, unsyncedheads, discardedheads = heads
831 831 if remoteheads is not None:
832 832 remote = set(remoteheads)
833 833 affected |= set(discardedheads) & remote
834 834 affected |= remote - set(newheads)
835 835 if affected:
836 836 data = iter(sorted(affected))
837 837 bundler.newpart(b'check:updated-heads', data=data)
838 838
839 839
840 840 def _pushing(pushop):
841 841 """return True if we are pushing anything"""
842 842 return bool(
843 843 pushop.outgoing.missing
844 844 or pushop.outdatedphases
845 845 or pushop.outobsmarkers
846 846 or pushop.outbookmarks
847 847 )
848 848
849 849
850 850 @b2partsgenerator(b'check-bookmarks')
851 851 def _pushb2checkbookmarks(pushop, bundler):
852 852 """insert bookmark move checking"""
853 853 if not _pushing(pushop) or pushop.force:
854 854 return
855 855 b2caps = bundle2.bundle2caps(pushop.remote)
856 856 hasbookmarkcheck = b'bookmarks' in b2caps
857 857 if not (pushop.outbookmarks and hasbookmarkcheck):
858 858 return
859 859 data = []
860 860 for book, old, new in pushop.outbookmarks:
861 861 data.append((book, old))
862 862 checkdata = bookmod.binaryencode(pushop.repo, data)
863 863 bundler.newpart(b'check:bookmarks', data=checkdata)
864 864
865 865
866 866 @b2partsgenerator(b'check-phases')
867 867 def _pushb2checkphases(pushop, bundler):
868 868 """insert phase move checking"""
869 869 if not _pushing(pushop) or pushop.force:
870 870 return
871 871 b2caps = bundle2.bundle2caps(pushop.remote)
872 872 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
873 873 if pushop.remotephases is not None and hasphaseheads:
874 874 # check that the remote phase has not changed
875 875 checks = {p: [] for p in phases.allphases}
876 876 checks[phases.public].extend(pushop.remotephases.publicheads)
877 877 checks[phases.draft].extend(pushop.remotephases.draftroots)
878 878 if any(checks.values()):
879 879 for phase in checks:
880 880 checks[phase].sort()
881 881 checkdata = phases.binaryencode(checks)
882 882 bundler.newpart(b'check:phases', data=checkdata)
883 883
884 884
885 885 @b2partsgenerator(b'changeset')
886 886 def _pushb2ctx(pushop, bundler):
887 887 """handle changegroup push through bundle2
888 888
889 889 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
890 890 """
891 891 if b'changesets' in pushop.stepsdone:
892 892 return
893 893 pushop.stepsdone.add(b'changesets')
894 894 # Send known heads to the server for race detection.
895 895 if not _pushcheckoutgoing(pushop):
896 896 return
897 897 pushop.repo.prepushoutgoinghooks(pushop)
898 898
899 899 _pushb2ctxcheckheads(pushop, bundler)
900 900
901 901 b2caps = bundle2.bundle2caps(pushop.remote)
902 902 version = b'01'
903 903 cgversions = b2caps.get(b'changegroup')
904 904 if cgversions: # 3.1 and 3.2 ship with an empty value
905 905 cgversions = [
906 906 v
907 907 for v in cgversions
908 908 if v in changegroup.supportedoutgoingversions(pushop.repo)
909 909 ]
910 910 if not cgversions:
911 911 raise error.Abort(_(b'no common changegroup version'))
912 912 version = max(cgversions)
913 913
914 914 remote_sidedata = bundle2.read_remote_wanted_sidedata(pushop.remote)
915 915 cgstream = changegroup.makestream(
916 916 pushop.repo,
917 917 pushop.outgoing,
918 918 version,
919 919 b'push',
920 920 bundlecaps=b2caps,
921 921 remote_sidedata=remote_sidedata,
922 922 )
923 923 cgpart = bundler.newpart(b'changegroup', data=cgstream)
924 924 if cgversions:
925 925 cgpart.addparam(b'version', version)
926 926 if scmutil.istreemanifest(pushop.repo):
927 927 cgpart.addparam(b'treemanifest', b'1')
928 928 if repository.REPO_FEATURE_SIDE_DATA in pushop.repo.features:
929 929 cgpart.addparam(b'exp-sidedata', b'1')
930 930
931 931 def handlereply(op):
932 932 """extract addchangegroup returns from server reply"""
933 933 cgreplies = op.records.getreplies(cgpart.id)
934 934 assert len(cgreplies[b'changegroup']) == 1
935 935 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
936 936
937 937 return handlereply
938 938
939 939
940 940 @b2partsgenerator(b'phase')
941 941 def _pushb2phases(pushop, bundler):
942 942 """handle phase push through bundle2"""
943 943 if b'phases' in pushop.stepsdone:
944 944 return
945 945 b2caps = bundle2.bundle2caps(pushop.remote)
946 946 ui = pushop.repo.ui
947 947
948 948 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
949 949 haspushkey = b'pushkey' in b2caps
950 950 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
951 951
952 952 if hasphaseheads and not legacyphase:
953 953 return _pushb2phaseheads(pushop, bundler)
954 954 elif haspushkey:
955 955 return _pushb2phasespushkey(pushop, bundler)
956 956
957 957
958 958 def _pushb2phaseheads(pushop, bundler):
959 959 """push phase information through a bundle2 - binary part"""
960 960 pushop.stepsdone.add(b'phases')
961 961 if pushop.outdatedphases:
962 962 updates = {p: [] for p in phases.allphases}
963 963 updates[0].extend(h.node() for h in pushop.outdatedphases)
964 964 phasedata = phases.binaryencode(updates)
965 965 bundler.newpart(b'phase-heads', data=phasedata)
966 966
967 967
968 968 def _pushb2phasespushkey(pushop, bundler):
969 969 """push phase information through a bundle2 - pushkey part"""
970 970 pushop.stepsdone.add(b'phases')
971 971 part2node = []
972 972
973 973 def handlefailure(pushop, exc):
974 974 targetid = int(exc.partid)
975 975 for partid, node in part2node:
976 976 if partid == targetid:
977 977 raise error.Abort(_(b'updating %s to public failed') % node)
978 978
979 979 enc = pushkey.encode
980 980 for newremotehead in pushop.outdatedphases:
981 981 part = bundler.newpart(b'pushkey')
982 982 part.addparam(b'namespace', enc(b'phases'))
983 983 part.addparam(b'key', enc(newremotehead.hex()))
984 984 part.addparam(b'old', enc(b'%d' % phases.draft))
985 985 part.addparam(b'new', enc(b'%d' % phases.public))
986 986 part2node.append((part.id, newremotehead))
987 987 pushop.pkfailcb[part.id] = handlefailure
988 988
989 989 def handlereply(op):
990 990 for partid, node in part2node:
991 991 partrep = op.records.getreplies(partid)
992 992 results = partrep[b'pushkey']
993 993 assert len(results) <= 1
994 994 msg = None
995 995 if not results:
996 996 msg = _(b'server ignored update of %s to public!\n') % node
997 997 elif not int(results[0][b'return']):
998 998 msg = _(b'updating %s to public failed!\n') % node
999 999 if msg is not None:
1000 1000 pushop.ui.warn(msg)
1001 1001
1002 1002 return handlereply
1003 1003
1004 1004
1005 1005 @b2partsgenerator(b'obsmarkers')
1006 1006 def _pushb2obsmarkers(pushop, bundler):
1007 1007 if b'obsmarkers' in pushop.stepsdone:
1008 1008 return
1009 1009 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1010 1010 if obsolete.commonversion(remoteversions) is None:
1011 1011 return
1012 1012 pushop.stepsdone.add(b'obsmarkers')
1013 1013 if pushop.outobsmarkers:
1014 1014 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1015 1015 bundle2.buildobsmarkerspart(bundler, markers)
1016 1016
1017 1017
1018 1018 @b2partsgenerator(b'bookmarks')
1019 1019 def _pushb2bookmarks(pushop, bundler):
1020 1020 """handle bookmark push through bundle2"""
1021 1021 if b'bookmarks' in pushop.stepsdone:
1022 1022 return
1023 1023 b2caps = bundle2.bundle2caps(pushop.remote)
1024 1024
1025 1025 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1026 1026 legacybooks = b'bookmarks' in legacy
1027 1027
1028 1028 if not legacybooks and b'bookmarks' in b2caps:
1029 1029 return _pushb2bookmarkspart(pushop, bundler)
1030 1030 elif b'pushkey' in b2caps:
1031 1031 return _pushb2bookmarkspushkey(pushop, bundler)
1032 1032
1033 1033
1034 1034 def _bmaction(old, new):
1035 1035 """small utility for bookmark pushing"""
1036 1036 if not old:
1037 1037 return b'export'
1038 1038 elif not new:
1039 1039 return b'delete'
1040 1040 return b'update'
1041 1041
1042 1042
1043 1043 def _abortonsecretctx(pushop, node, b):
1044 1044 """abort if a given bookmark points to a secret changeset"""
1045 1045 if node and pushop.repo[node].phase() == phases.secret:
1046 1046 raise error.Abort(
1047 1047 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1048 1048 )
1049 1049
1050 1050
1051 1051 def _pushb2bookmarkspart(pushop, bundler):
1052 1052 pushop.stepsdone.add(b'bookmarks')
1053 1053 if not pushop.outbookmarks:
1054 1054 return
1055 1055
1056 1056 allactions = []
1057 1057 data = []
1058 1058 for book, old, new in pushop.outbookmarks:
1059 1059 _abortonsecretctx(pushop, new, book)
1060 1060 data.append((book, new))
1061 1061 allactions.append((book, _bmaction(old, new)))
1062 1062 checkdata = bookmod.binaryencode(pushop.repo, data)
1063 1063 bundler.newpart(b'bookmarks', data=checkdata)
1064 1064
1065 1065 def handlereply(op):
1066 1066 ui = pushop.ui
1067 1067 # if success
1068 1068 for book, action in allactions:
1069 1069 ui.status(bookmsgmap[action][0] % book)
1070 1070
1071 1071 return handlereply
1072 1072
1073 1073
1074 1074 def _pushb2bookmarkspushkey(pushop, bundler):
1075 1075 pushop.stepsdone.add(b'bookmarks')
1076 1076 part2book = []
1077 1077 enc = pushkey.encode
1078 1078
1079 1079 def handlefailure(pushop, exc):
1080 1080 targetid = int(exc.partid)
1081 1081 for partid, book, action in part2book:
1082 1082 if partid == targetid:
1083 1083 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1084 1084 # we should not be called for part we did not generated
1085 1085 assert False
1086 1086
1087 1087 for book, old, new in pushop.outbookmarks:
1088 1088 _abortonsecretctx(pushop, new, book)
1089 1089 part = bundler.newpart(b'pushkey')
1090 1090 part.addparam(b'namespace', enc(b'bookmarks'))
1091 1091 part.addparam(b'key', enc(book))
1092 1092 part.addparam(b'old', enc(hex(old)))
1093 1093 part.addparam(b'new', enc(hex(new)))
1094 1094 action = b'update'
1095 1095 if not old:
1096 1096 action = b'export'
1097 1097 elif not new:
1098 1098 action = b'delete'
1099 1099 part2book.append((part.id, book, action))
1100 1100 pushop.pkfailcb[part.id] = handlefailure
1101 1101
1102 1102 def handlereply(op):
1103 1103 ui = pushop.ui
1104 1104 for partid, book, action in part2book:
1105 1105 partrep = op.records.getreplies(partid)
1106 1106 results = partrep[b'pushkey']
1107 1107 assert len(results) <= 1
1108 1108 if not results:
1109 1109 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1110 1110 else:
1111 1111 ret = int(results[0][b'return'])
1112 1112 if ret:
1113 1113 ui.status(bookmsgmap[action][0] % book)
1114 1114 else:
1115 1115 ui.warn(bookmsgmap[action][1] % book)
1116 1116 if pushop.bkresult is not None:
1117 1117 pushop.bkresult = 1
1118 1118
1119 1119 return handlereply
1120 1120
1121 1121
1122 1122 @b2partsgenerator(b'pushvars', idx=0)
1123 1123 def _getbundlesendvars(pushop, bundler):
1124 1124 '''send shellvars via bundle2'''
1125 1125 pushvars = pushop.pushvars
1126 1126 if pushvars:
1127 1127 shellvars = {}
1128 1128 for raw in pushvars:
1129 1129 if b'=' not in raw:
1130 1130 msg = (
1131 1131 b"unable to parse variable '%s', should follow "
1132 1132 b"'KEY=VALUE' or 'KEY=' format"
1133 1133 )
1134 1134 raise error.Abort(msg % raw)
1135 1135 k, v = raw.split(b'=', 1)
1136 1136 shellvars[k] = v
1137 1137
1138 1138 part = bundler.newpart(b'pushvars')
1139 1139
1140 1140 for key, value in shellvars.items():
1141 1141 part.addparam(key, value, mandatory=False)
1142 1142
1143 1143
1144 1144 def _pushbundle2(pushop):
1145 1145 """push data to the remote using bundle2
1146 1146
1147 1147 The only currently supported type of data is changegroup but this will
1148 1148 evolve in the future."""
1149 1149 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1150 1150 pushback = pushop.trmanager and pushop.ui.configbool(
1151 1151 b'experimental', b'bundle2.pushback'
1152 1152 )
1153 1153
1154 1154 # create reply capability
1155 1155 capsblob = bundle2.encodecaps(
1156 1156 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1157 1157 )
1158 1158 bundler.newpart(b'replycaps', data=capsblob)
1159 1159 replyhandlers = []
1160 1160 for partgenname in b2partsgenorder:
1161 1161 partgen = b2partsgenmapping[partgenname]
1162 1162 ret = partgen(pushop, bundler)
1163 1163 if callable(ret):
1164 1164 replyhandlers.append(ret)
1165 1165 # do not push if nothing to push
1166 1166 if bundler.nbparts <= 1:
1167 1167 return
1168 1168 stream = util.chunkbuffer(bundler.getchunks())
1169 1169 try:
1170 1170 try:
1171 1171 with pushop.remote.commandexecutor() as e:
1172 1172 reply = e.callcommand(
1173 1173 b'unbundle',
1174 1174 {
1175 1175 b'bundle': stream,
1176 1176 b'heads': [b'force'],
1177 1177 b'url': pushop.remote.url(),
1178 1178 },
1179 1179 ).result()
1180 1180 except error.BundleValueError as exc:
1181 1181 raise error.RemoteError(_(b'missing support for %s') % exc)
1182 1182 try:
1183 1183 trgetter = None
1184 1184 if pushback:
1185 1185 trgetter = pushop.trmanager.transaction
1186 1186 op = bundle2.processbundle(
1187 1187 pushop.repo,
1188 1188 reply,
1189 1189 trgetter,
1190 1190 remote=pushop.remote,
1191 1191 )
1192 1192 except error.BundleValueError as exc:
1193 1193 raise error.RemoteError(_(b'missing support for %s') % exc)
1194 1194 except bundle2.AbortFromPart as exc:
1195 1195 pushop.ui.error(_(b'remote: %s\n') % exc)
1196 1196 if exc.hint is not None:
1197 1197 pushop.ui.error(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1198 1198 raise error.RemoteError(_(b'push failed on remote'))
1199 1199 except error.PushkeyFailed as exc:
1200 1200 partid = int(exc.partid)
1201 1201 if partid not in pushop.pkfailcb:
1202 1202 raise
1203 1203 pushop.pkfailcb[partid](pushop, exc)
1204 1204 for rephand in replyhandlers:
1205 1205 rephand(op)
1206 1206
1207 1207
1208 1208 def _pushchangeset(pushop):
1209 1209 """Make the actual push of changeset bundle to remote repo"""
1210 1210 if b'changesets' in pushop.stepsdone:
1211 1211 return
1212 1212 pushop.stepsdone.add(b'changesets')
1213 1213 if not _pushcheckoutgoing(pushop):
1214 1214 return
1215 1215
1216 1216 # Should have verified this in push().
1217 1217 assert pushop.remote.capable(b'unbundle')
1218 1218
1219 1219 pushop.repo.prepushoutgoinghooks(pushop)
1220 1220 outgoing = pushop.outgoing
1221 1221 # TODO: get bundlecaps from remote
1222 1222 bundlecaps = None
1223 1223 # create a changegroup from local
1224 1224 if pushop.revs is None and not (
1225 1225 outgoing.excluded or pushop.repo.changelog.filteredrevs
1226 1226 ):
1227 1227 # push everything,
1228 1228 # use the fast path, no race possible on push
1229 1229 cg = changegroup.makechangegroup(
1230 1230 pushop.repo,
1231 1231 outgoing,
1232 1232 b'01',
1233 1233 b'push',
1234 1234 fastpath=True,
1235 1235 bundlecaps=bundlecaps,
1236 1236 )
1237 1237 else:
1238 1238 cg = changegroup.makechangegroup(
1239 1239 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1240 1240 )
1241 1241
1242 1242 # apply changegroup to remote
1243 1243 # local repo finds heads on server, finds out what
1244 1244 # revs it must push. once revs transferred, if server
1245 1245 # finds it has different heads (someone else won
1246 1246 # commit/push race), server aborts.
1247 1247 if pushop.force:
1248 1248 remoteheads = [b'force']
1249 1249 else:
1250 1250 remoteheads = pushop.remoteheads
1251 1251 # ssh: return remote's addchangegroup()
1252 1252 # http: return remote's addchangegroup() or 0 for error
1253 1253 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1254 1254
1255 1255
1256 1256 def _pushsyncphase(pushop):
1257 1257 """synchronise phase information locally and remotely"""
1258 1258 cheads = pushop.commonheads
1259 1259 # even when we don't push, exchanging phase data is useful
1260 1260 remotephases = listkeys(pushop.remote, b'phases')
1261 1261 if (
1262 1262 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1263 1263 and remotephases # server supports phases
1264 1264 and pushop.cgresult is None # nothing was pushed
1265 1265 and remotephases.get(b'publishing', False)
1266 1266 ):
1267 1267 # When:
1268 1268 # - this is a subrepo push
1269 1269 # - and remote support phase
1270 1270 # - and no changeset was pushed
1271 1271 # - and remote is publishing
1272 1272 # We may be in issue 3871 case!
1273 1273 # We drop the possible phase synchronisation done by
1274 1274 # courtesy to publish changesets possibly locally draft
1275 1275 # on the remote.
1276 1276 remotephases = {b'publishing': b'True'}
1277 1277 if not remotephases: # old server or public only reply from non-publishing
1278 1278 _localphasemove(pushop, cheads)
1279 1279 # don't push any phase data as there is nothing to push
1280 1280 else:
1281 1281 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1282 1282 pheads, droots = ana
1283 1283 ### Apply remote phase on local
1284 1284 if remotephases.get(b'publishing', False):
1285 1285 _localphasemove(pushop, cheads)
1286 1286 else: # publish = False
1287 1287 _localphasemove(pushop, pheads)
1288 1288 _localphasemove(pushop, cheads, phases.draft)
1289 1289 ### Apply local phase on remote
1290 1290
1291 1291 if pushop.cgresult:
1292 1292 if b'phases' in pushop.stepsdone:
1293 1293 # phases already pushed though bundle2
1294 1294 return
1295 1295 outdated = pushop.outdatedphases
1296 1296 else:
1297 1297 outdated = pushop.fallbackoutdatedphases
1298 1298
1299 1299 pushop.stepsdone.add(b'phases')
1300 1300
1301 1301 # filter heads already turned public by the push
1302 1302 outdated = [c for c in outdated if c.node() not in pheads]
1303 1303 # fallback to independent pushkey command
1304 1304 for newremotehead in outdated:
1305 1305 with pushop.remote.commandexecutor() as e:
1306 1306 r = e.callcommand(
1307 1307 b'pushkey',
1308 1308 {
1309 1309 b'namespace': b'phases',
1310 1310 b'key': newremotehead.hex(),
1311 1311 b'old': b'%d' % phases.draft,
1312 1312 b'new': b'%d' % phases.public,
1313 1313 },
1314 1314 ).result()
1315 1315
1316 1316 if not r:
1317 1317 pushop.ui.warn(
1318 1318 _(b'updating %s to public failed!\n') % newremotehead
1319 1319 )
1320 1320
1321 1321
1322 1322 def _localphasemove(pushop, nodes, phase=phases.public):
1323 1323 """move <nodes> to <phase> in the local source repo"""
1324 1324 if pushop.trmanager:
1325 1325 phases.advanceboundary(
1326 1326 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1327 1327 )
1328 1328 else:
1329 1329 # repo is not locked, do not change any phases!
1330 1330 # Informs the user that phases should have been moved when
1331 1331 # applicable.
1332 1332 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1333 1333 phasestr = phases.phasenames[phase]
1334 1334 if actualmoves:
1335 1335 pushop.ui.status(
1336 1336 _(
1337 1337 b'cannot lock source repo, skipping '
1338 1338 b'local %s phase update\n'
1339 1339 )
1340 1340 % phasestr
1341 1341 )
1342 1342
1343 1343
1344 1344 def _pushobsolete(pushop):
1345 1345 """utility function to push obsolete markers to a remote"""
1346 1346 if b'obsmarkers' in pushop.stepsdone:
1347 1347 return
1348 1348 repo = pushop.repo
1349 1349 remote = pushop.remote
1350 1350 pushop.stepsdone.add(b'obsmarkers')
1351 1351 if pushop.outobsmarkers:
1352 1352 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1353 1353 rslts = []
1354 1354 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1355 1355 remotedata = obsolete._pushkeyescape(markers)
1356 1356 for key in sorted(remotedata, reverse=True):
1357 1357 # reverse sort to ensure we end with dump0
1358 1358 data = remotedata[key]
1359 1359 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1360 1360 if [r for r in rslts if not r]:
1361 1361 msg = _(b'failed to push some obsolete markers!\n')
1362 1362 repo.ui.warn(msg)
1363 1363
1364 1364
1365 1365 def _pushbookmark(pushop):
1366 1366 """Update bookmark position on remote"""
1367 1367 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1368 1368 return
1369 1369 pushop.stepsdone.add(b'bookmarks')
1370 1370 ui = pushop.ui
1371 1371 remote = pushop.remote
1372 1372
1373 1373 for b, old, new in pushop.outbookmarks:
1374 1374 action = b'update'
1375 1375 if not old:
1376 1376 action = b'export'
1377 1377 elif not new:
1378 1378 action = b'delete'
1379 1379
1380 1380 with remote.commandexecutor() as e:
1381 1381 r = e.callcommand(
1382 1382 b'pushkey',
1383 1383 {
1384 1384 b'namespace': b'bookmarks',
1385 1385 b'key': b,
1386 1386 b'old': hex(old),
1387 1387 b'new': hex(new),
1388 1388 },
1389 1389 ).result()
1390 1390
1391 1391 if r:
1392 1392 ui.status(bookmsgmap[action][0] % b)
1393 1393 else:
1394 1394 ui.warn(bookmsgmap[action][1] % b)
1395 1395 # discovery can have set the value form invalid entry
1396 1396 if pushop.bkresult is not None:
1397 1397 pushop.bkresult = 1
1398 1398
1399 1399
1400 1400 class pulloperation:
1401 1401 """A object that represent a single pull operation
1402 1402
1403 1403 It purpose is to carry pull related state and very common operation.
1404 1404
1405 1405 A new should be created at the beginning of each pull and discarded
1406 1406 afterward.
1407 1407 """
1408 1408
1409 1409 def __init__(
1410 1410 self,
1411 1411 repo,
1412 1412 remote,
1413 1413 heads=None,
1414 1414 force=False,
1415 1415 bookmarks=(),
1416 1416 remotebookmarks=None,
1417 1417 streamclonerequested=None,
1418 1418 includepats=None,
1419 1419 excludepats=None,
1420 1420 depth=None,
1421 1421 path=None,
1422 1422 ):
1423 1423 # repo we pull into
1424 1424 self.repo = repo
1425 1425 # repo we pull from
1426 1426 self.remote = remote
1427 1427 # path object used to build this remote
1428 1428 #
1429 1429 # Ideally, the remote peer would carry that directly.
1430 1430 self.remote_path = path
1431 1431 # revision we try to pull (None is "all")
1432 1432 self.heads = heads
1433 1433 # bookmark pulled explicitly
1434 1434 self.explicitbookmarks = [
1435 1435 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1436 1436 ]
1437 1437 # do we force pull?
1438 1438 self.force = force
1439 1439 # whether a streaming clone was requested
1440 1440 self.streamclonerequested = streamclonerequested
1441 1441 # transaction manager
1442 1442 self.trmanager = None
1443 1443 # set of common changeset between local and remote before pull
1444 1444 self.common = None
1445 1445 # set of pulled head
1446 1446 self.rheads = None
1447 1447 # list of missing changeset to fetch remotely
1448 1448 self.fetch = None
1449 1449 # remote bookmarks data
1450 1450 self.remotebookmarks = remotebookmarks
1451 1451 # result of changegroup pulling (used as return code by pull)
1452 1452 self.cgresult = None
1453 1453 # list of step already done
1454 1454 self.stepsdone = set()
1455 1455 # Whether we attempted a clone from pre-generated bundles.
1456 1456 self.clonebundleattempted = False
1457 1457 # Set of file patterns to include.
1458 1458 self.includepats = includepats
1459 1459 # Set of file patterns to exclude.
1460 1460 self.excludepats = excludepats
1461 1461 # Number of ancestor changesets to pull from each pulled head.
1462 1462 self.depth = depth
1463 1463
1464 1464 @util.propertycache
1465 1465 def pulledsubset(self):
1466 1466 """heads of the set of changeset target by the pull"""
1467 1467 # compute target subset
1468 1468 if self.heads is None:
1469 1469 # We pulled every thing possible
1470 1470 # sync on everything common
1471 1471 c = set(self.common)
1472 1472 ret = list(self.common)
1473 1473 for n in self.rheads:
1474 1474 if n not in c:
1475 1475 ret.append(n)
1476 1476 return ret
1477 1477 else:
1478 1478 # We pulled a specific subset
1479 1479 # sync on this subset
1480 1480 return self.heads
1481 1481
1482 1482 @util.propertycache
1483 1483 def canusebundle2(self):
1484 1484 return not _forcebundle1(self)
1485 1485
1486 1486 @util.propertycache
1487 1487 def remotebundle2caps(self):
1488 1488 return bundle2.bundle2caps(self.remote)
1489 1489
1490 1490 def gettransaction(self):
1491 1491 # deprecated; talk to trmanager directly
1492 1492 return self.trmanager.transaction()
1493 1493
1494 1494
1495 1495 class transactionmanager(util.transactional):
1496 1496 """An object to manage the life cycle of a transaction
1497 1497
1498 1498 It creates the transaction on demand and calls the appropriate hooks when
1499 1499 closing the transaction."""
1500 1500
1501 1501 def __init__(self, repo, source, url):
1502 1502 self.repo = repo
1503 1503 self.source = source
1504 1504 self.url = url
1505 1505 self._tr = None
1506 1506
1507 1507 def transaction(self):
1508 1508 """Return an open transaction object, constructing if necessary"""
1509 1509 if not self._tr:
1510 1510 trname = b'%s\n%s' % (self.source, urlutil.hidepassword(self.url))
1511 1511 self._tr = self.repo.transaction(trname)
1512 1512 self._tr.hookargs[b'source'] = self.source
1513 1513 self._tr.hookargs[b'url'] = self.url
1514 1514 return self._tr
1515 1515
1516 1516 def close(self):
1517 1517 """close transaction if created"""
1518 1518 if self._tr is not None:
1519 1519 self._tr.close()
1520 1520
1521 1521 def release(self):
1522 1522 """release transaction if created"""
1523 1523 if self._tr is not None:
1524 1524 self._tr.release()
1525 1525
1526 1526
1527 1527 def listkeys(remote, namespace):
1528 1528 with remote.commandexecutor() as e:
1529 1529 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1530 1530
1531 1531
1532 1532 def _fullpullbundle2(repo, pullop):
1533 1533 # The server may send a partial reply, i.e. when inlining
1534 1534 # pre-computed bundles. In that case, update the common
1535 1535 # set based on the results and pull another bundle.
1536 1536 #
1537 1537 # There are two indicators that the process is finished:
1538 1538 # - no changeset has been added, or
1539 1539 # - all remote heads are known locally.
1540 1540 # The head check must use the unfiltered view as obsoletion
1541 1541 # markers can hide heads.
1542 1542 unfi = repo.unfiltered()
1543 1543 unficl = unfi.changelog
1544 1544
1545 1545 def headsofdiff(h1, h2):
1546 1546 """Returns heads(h1 % h2)"""
1547 1547 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1548 1548 return {ctx.node() for ctx in res}
1549 1549
1550 1550 def headsofunion(h1, h2):
1551 1551 """Returns heads((h1 + h2) - null)"""
1552 1552 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1553 1553 return {ctx.node() for ctx in res}
1554 1554
1555 1555 while True:
1556 1556 old_heads = unficl.heads()
1557 1557 clstart = len(unficl)
1558 1558 _pullbundle2(pullop)
1559 1559 if requirements.NARROW_REQUIREMENT in repo.requirements:
1560 1560 # XXX narrow clones filter the heads on the server side during
1561 1561 # XXX getbundle and result in partial replies as well.
1562 1562 # XXX Disable pull bundles in this case as band aid to avoid
1563 1563 # XXX extra round trips.
1564 1564 break
1565 1565 if clstart == len(unficl):
1566 1566 break
1567 1567 if all(unficl.hasnode(n) for n in pullop.rheads):
1568 1568 break
1569 1569 new_heads = headsofdiff(unficl.heads(), old_heads)
1570 1570 pullop.common = headsofunion(new_heads, pullop.common)
1571 1571 pullop.rheads = set(pullop.rheads) - pullop.common
1572 1572
1573 1573
1574 1574 def add_confirm_callback(repo, pullop):
1575 1575 """adds a finalize callback to transaction which can be used to show stats
1576 1576 to user and confirm the pull before committing transaction"""
1577 1577
1578 1578 tr = pullop.trmanager.transaction()
1579 1579 scmutil.registersummarycallback(
1580 1580 repo, tr, txnname=b'pull', as_validator=True
1581 1581 )
1582 1582 reporef = weakref.ref(repo.unfiltered())
1583 1583
1584 1584 def prompt(tr):
1585 1585 repo = reporef()
1586 1586 cm = _(b'accept incoming changes (yn)?$$ &Yes $$ &No')
1587 1587 if repo.ui.promptchoice(cm):
1588 1588 raise error.Abort(b"user aborted")
1589 1589
1590 1590 tr.addvalidator(b'900-pull-prompt', prompt)
1591 1591
1592 1592
1593 1593 def pull(
1594 1594 repo,
1595 1595 remote,
1596 1596 path=None,
1597 1597 heads=None,
1598 1598 force=False,
1599 1599 bookmarks=(),
1600 1600 opargs=None,
1601 1601 streamclonerequested=None,
1602 1602 includepats=None,
1603 1603 excludepats=None,
1604 1604 depth=None,
1605 1605 confirm=None,
1606 1606 ):
1607 1607 """Fetch repository data from a remote.
1608 1608
1609 1609 This is the main function used to retrieve data from a remote repository.
1610 1610
1611 1611 ``repo`` is the local repository to clone into.
1612 1612 ``remote`` is a peer instance.
1613 1613 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1614 1614 default) means to pull everything from the remote.
1615 1615 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1616 1616 default, all remote bookmarks are pulled.
1617 1617 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1618 1618 initialization.
1619 1619 ``streamclonerequested`` is a boolean indicating whether a "streaming
1620 1620 clone" is requested. A "streaming clone" is essentially a raw file copy
1621 1621 of revlogs from the server. This only works when the local repository is
1622 1622 empty. The default value of ``None`` means to respect the server
1623 1623 configuration for preferring stream clones.
1624 1624 ``includepats`` and ``excludepats`` define explicit file patterns to
1625 1625 include and exclude in storage, respectively. If not defined, narrow
1626 1626 patterns from the repo instance are used, if available.
1627 1627 ``depth`` is an integer indicating the DAG depth of history we're
1628 1628 interested in. If defined, for each revision specified in ``heads``, we
1629 1629 will fetch up to this many of its ancestors and data associated with them.
1630 1630 ``confirm`` is a boolean indicating whether the pull should be confirmed
1631 1631 before committing the transaction. This overrides HGPLAIN.
1632 1632
1633 1633 Returns the ``pulloperation`` created for this pull.
1634 1634 """
1635 1635 if opargs is None:
1636 1636 opargs = {}
1637 1637
1638 1638 # We allow the narrow patterns to be passed in explicitly to provide more
1639 1639 # flexibility for API consumers.
1640 if includepats or excludepats:
1640 if includepats is not None or excludepats is not None:
1641 1641 includepats = includepats or set()
1642 1642 excludepats = excludepats or set()
1643 1643 else:
1644 1644 includepats, excludepats = repo.narrowpats
1645 1645
1646 1646 narrowspec.validatepatterns(includepats)
1647 1647 narrowspec.validatepatterns(excludepats)
1648 1648
1649 1649 pullop = pulloperation(
1650 1650 repo,
1651 1651 remote,
1652 1652 path=path,
1653 1653 heads=heads,
1654 1654 force=force,
1655 1655 bookmarks=bookmarks,
1656 1656 streamclonerequested=streamclonerequested,
1657 1657 includepats=includepats,
1658 1658 excludepats=excludepats,
1659 1659 depth=depth,
1660 1660 **pycompat.strkwargs(opargs)
1661 1661 )
1662 1662
1663 1663 peerlocal = pullop.remote.local()
1664 1664 if peerlocal:
1665 1665 missing = set(peerlocal.requirements) - pullop.repo.supported
1666 1666 if missing:
1667 1667 msg = _(
1668 1668 b"required features are not"
1669 1669 b" supported in the destination:"
1670 1670 b" %s"
1671 1671 ) % (b', '.join(sorted(missing)))
1672 1672 raise error.Abort(msg)
1673 1673
1674 1674 for category in repo._wanted_sidedata:
1675 1675 # Check that a computer is registered for that category for at least
1676 1676 # one revlog kind.
1677 1677 for kind, computers in repo._sidedata_computers.items():
1678 1678 if computers.get(category):
1679 1679 break
1680 1680 else:
1681 1681 # This should never happen since repos are supposed to be able to
1682 1682 # generate the sidedata they require.
1683 1683 raise error.ProgrammingError(
1684 1684 _(
1685 1685 b'sidedata category requested by local side without local'
1686 1686 b"support: '%s'"
1687 1687 )
1688 1688 % pycompat.bytestr(category)
1689 1689 )
1690 1690
1691 1691 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1692 1692 wlock = util.nullcontextmanager()
1693 1693 if not bookmod.bookmarksinstore(repo):
1694 1694 wlock = repo.wlock()
1695 1695 with wlock, repo.lock(), pullop.trmanager:
1696 1696 if confirm or (
1697 1697 repo.ui.configbool(b"pull", b"confirm") and not repo.ui.plain()
1698 1698 ):
1699 1699 add_confirm_callback(repo, pullop)
1700 1700
1701 1701 # This should ideally be in _pullbundle2(). However, it needs to run
1702 1702 # before discovery to avoid extra work.
1703 1703 _maybeapplyclonebundle(pullop)
1704 1704 streamclone.maybeperformlegacystreamclone(pullop)
1705 1705 _pulldiscovery(pullop)
1706 1706 if pullop.canusebundle2:
1707 1707 _fullpullbundle2(repo, pullop)
1708 1708 _pullchangeset(pullop)
1709 1709 _pullphase(pullop)
1710 1710 _pullbookmarks(pullop)
1711 1711 _pullobsolete(pullop)
1712 1712
1713 1713 # storing remotenames
1714 1714 if repo.ui.configbool(b'experimental', b'remotenames'):
1715 1715 logexchange.pullremotenames(repo, remote)
1716 1716
1717 1717 return pullop
1718 1718
1719 1719
1720 1720 # list of steps to perform discovery before pull
1721 1721 pulldiscoveryorder = []
1722 1722
1723 1723 # Mapping between step name and function
1724 1724 #
1725 1725 # This exists to help extensions wrap steps if necessary
1726 1726 pulldiscoverymapping = {}
1727 1727
1728 1728
1729 1729 def pulldiscovery(stepname):
1730 1730 """decorator for function performing discovery before pull
1731 1731
1732 1732 The function is added to the step -> function mapping and appended to the
1733 1733 list of steps. Beware that decorated function will be added in order (this
1734 1734 may matter).
1735 1735
1736 1736 You can only use this decorator for a new step, if you want to wrap a step
1737 1737 from an extension, change the pulldiscovery dictionary directly."""
1738 1738
1739 1739 def dec(func):
1740 1740 assert stepname not in pulldiscoverymapping
1741 1741 pulldiscoverymapping[stepname] = func
1742 1742 pulldiscoveryorder.append(stepname)
1743 1743 return func
1744 1744
1745 1745 return dec
1746 1746
1747 1747
1748 1748 def _pulldiscovery(pullop):
1749 1749 """Run all discovery steps"""
1750 1750 for stepname in pulldiscoveryorder:
1751 1751 step = pulldiscoverymapping[stepname]
1752 1752 step(pullop)
1753 1753
1754 1754
1755 1755 @pulldiscovery(b'b1:bookmarks')
1756 1756 def _pullbookmarkbundle1(pullop):
1757 1757 """fetch bookmark data in bundle1 case
1758 1758
1759 1759 If not using bundle2, we have to fetch bookmarks before changeset
1760 1760 discovery to reduce the chance and impact of race conditions."""
1761 1761 if pullop.remotebookmarks is not None:
1762 1762 return
1763 1763 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1764 1764 # all known bundle2 servers now support listkeys, but lets be nice with
1765 1765 # new implementation.
1766 1766 return
1767 1767 books = listkeys(pullop.remote, b'bookmarks')
1768 1768 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1769 1769
1770 1770
1771 1771 @pulldiscovery(b'changegroup')
1772 1772 def _pulldiscoverychangegroup(pullop):
1773 1773 """discovery phase for the pull
1774 1774
1775 1775 Current handle changeset discovery only, will change handle all discovery
1776 1776 at some point."""
1777 1777 tmp = discovery.findcommonincoming(
1778 1778 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1779 1779 )
1780 1780 common, fetch, rheads = tmp
1781 1781 has_node = pullop.repo.unfiltered().changelog.index.has_node
1782 1782 if fetch and rheads:
1783 1783 # If a remote heads is filtered locally, put in back in common.
1784 1784 #
1785 1785 # This is a hackish solution to catch most of "common but locally
1786 1786 # hidden situation". We do not performs discovery on unfiltered
1787 1787 # repository because it end up doing a pathological amount of round
1788 1788 # trip for w huge amount of changeset we do not care about.
1789 1789 #
1790 1790 # If a set of such "common but filtered" changeset exist on the server
1791 1791 # but are not including a remote heads, we'll not be able to detect it,
1792 1792 scommon = set(common)
1793 1793 for n in rheads:
1794 1794 if has_node(n):
1795 1795 if n not in scommon:
1796 1796 common.append(n)
1797 1797 if set(rheads).issubset(set(common)):
1798 1798 fetch = []
1799 1799 pullop.common = common
1800 1800 pullop.fetch = fetch
1801 1801 pullop.rheads = rheads
1802 1802
1803 1803
1804 1804 def _pullbundle2(pullop):
1805 1805 """pull data using bundle2
1806 1806
1807 1807 For now, the only supported data are changegroup."""
1808 1808 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1809 1809
1810 1810 # make ui easier to access
1811 1811 ui = pullop.repo.ui
1812 1812
1813 1813 # At the moment we don't do stream clones over bundle2. If that is
1814 1814 # implemented then here's where the check for that will go.
1815 1815 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1816 1816
1817 1817 # declare pull perimeters
1818 1818 kwargs[b'common'] = pullop.common
1819 1819 kwargs[b'heads'] = pullop.heads or pullop.rheads
1820 1820
1821 1821 # check server supports narrow and then adding includepats and excludepats
1822 1822 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1823 1823 if servernarrow and pullop.includepats:
1824 1824 kwargs[b'includepats'] = pullop.includepats
1825 1825 if servernarrow and pullop.excludepats:
1826 1826 kwargs[b'excludepats'] = pullop.excludepats
1827 1827
1828 1828 if streaming:
1829 1829 kwargs[b'cg'] = False
1830 1830 kwargs[b'stream'] = True
1831 1831 pullop.stepsdone.add(b'changegroup')
1832 1832 pullop.stepsdone.add(b'phases')
1833 1833
1834 1834 else:
1835 1835 # pulling changegroup
1836 1836 pullop.stepsdone.add(b'changegroup')
1837 1837
1838 1838 kwargs[b'cg'] = pullop.fetch
1839 1839
1840 1840 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1841 1841 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1842 1842 if not legacyphase and hasbinaryphase:
1843 1843 kwargs[b'phases'] = True
1844 1844 pullop.stepsdone.add(b'phases')
1845 1845
1846 1846 if b'listkeys' in pullop.remotebundle2caps:
1847 1847 if b'phases' not in pullop.stepsdone:
1848 1848 kwargs[b'listkeys'] = [b'phases']
1849 1849
1850 1850 bookmarksrequested = False
1851 1851 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1852 1852 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1853 1853
1854 1854 if pullop.remotebookmarks is not None:
1855 1855 pullop.stepsdone.add(b'request-bookmarks')
1856 1856
1857 1857 if (
1858 1858 b'request-bookmarks' not in pullop.stepsdone
1859 1859 and pullop.remotebookmarks is None
1860 1860 and not legacybookmark
1861 1861 and hasbinarybook
1862 1862 ):
1863 1863 kwargs[b'bookmarks'] = True
1864 1864 bookmarksrequested = True
1865 1865
1866 1866 if b'listkeys' in pullop.remotebundle2caps:
1867 1867 if b'request-bookmarks' not in pullop.stepsdone:
1868 1868 # make sure to always includes bookmark data when migrating
1869 1869 # `hg incoming --bundle` to using this function.
1870 1870 pullop.stepsdone.add(b'request-bookmarks')
1871 1871 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1872 1872
1873 1873 # If this is a full pull / clone and the server supports the clone bundles
1874 1874 # feature, tell the server whether we attempted a clone bundle. The
1875 1875 # presence of this flag indicates the client supports clone bundles. This
1876 1876 # will enable the server to treat clients that support clone bundles
1877 1877 # differently from those that don't.
1878 1878 if (
1879 1879 pullop.remote.capable(b'clonebundles')
1880 1880 and pullop.heads is None
1881 1881 and list(pullop.common) == [pullop.repo.nullid]
1882 1882 ):
1883 1883 kwargs[b'cbattempted'] = pullop.clonebundleattempted
1884 1884
1885 1885 if streaming:
1886 1886 pullop.repo.ui.status(_(b'streaming all changes\n'))
1887 1887 elif not pullop.fetch:
1888 1888 pullop.repo.ui.status(_(b"no changes found\n"))
1889 1889 pullop.cgresult = 0
1890 1890 else:
1891 1891 if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
1892 1892 pullop.repo.ui.status(_(b"requesting all changes\n"))
1893 1893 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1894 1894 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1895 1895 if obsolete.commonversion(remoteversions) is not None:
1896 1896 kwargs[b'obsmarkers'] = True
1897 1897 pullop.stepsdone.add(b'obsmarkers')
1898 1898 _pullbundle2extraprepare(pullop, kwargs)
1899 1899
1900 1900 remote_sidedata = bundle2.read_remote_wanted_sidedata(pullop.remote)
1901 1901 if remote_sidedata:
1902 1902 kwargs[b'remote_sidedata'] = remote_sidedata
1903 1903
1904 1904 with pullop.remote.commandexecutor() as e:
1905 1905 args = dict(kwargs)
1906 1906 args[b'source'] = b'pull'
1907 1907 bundle = e.callcommand(b'getbundle', args).result()
1908 1908
1909 1909 try:
1910 1910 op = bundle2.bundleoperation(
1911 1911 pullop.repo,
1912 1912 pullop.gettransaction,
1913 1913 source=b'pull',
1914 1914 remote=pullop.remote,
1915 1915 )
1916 1916 op.modes[b'bookmarks'] = b'records'
1917 1917 bundle2.processbundle(
1918 1918 pullop.repo,
1919 1919 bundle,
1920 1920 op=op,
1921 1921 remote=pullop.remote,
1922 1922 )
1923 1923 except bundle2.AbortFromPart as exc:
1924 1924 pullop.repo.ui.error(_(b'remote: abort: %s\n') % exc)
1925 1925 raise error.RemoteError(_(b'pull failed on remote'), hint=exc.hint)
1926 1926 except error.BundleValueError as exc:
1927 1927 raise error.RemoteError(_(b'missing support for %s') % exc)
1928 1928
1929 1929 if pullop.fetch:
1930 1930 pullop.cgresult = bundle2.combinechangegroupresults(op)
1931 1931
1932 1932 # processing phases change
1933 1933 for namespace, value in op.records[b'listkeys']:
1934 1934 if namespace == b'phases':
1935 1935 _pullapplyphases(pullop, value)
1936 1936
1937 1937 # processing bookmark update
1938 1938 if bookmarksrequested:
1939 1939 books = {}
1940 1940 for record in op.records[b'bookmarks']:
1941 1941 books[record[b'bookmark']] = record[b"node"]
1942 1942 pullop.remotebookmarks = books
1943 1943 else:
1944 1944 for namespace, value in op.records[b'listkeys']:
1945 1945 if namespace == b'bookmarks':
1946 1946 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1947 1947
1948 1948 # bookmark data were either already there or pulled in the bundle
1949 1949 if pullop.remotebookmarks is not None:
1950 1950 _pullbookmarks(pullop)
1951 1951
1952 1952
1953 1953 def _pullbundle2extraprepare(pullop, kwargs):
1954 1954 """hook function so that extensions can extend the getbundle call"""
1955 1955
1956 1956
1957 1957 def _pullchangeset(pullop):
1958 1958 """pull changeset from unbundle into the local repo"""
1959 1959 # We delay the open of the transaction as late as possible so we
1960 1960 # don't open transaction for nothing or you break future useful
1961 1961 # rollback call
1962 1962 if b'changegroup' in pullop.stepsdone:
1963 1963 return
1964 1964 pullop.stepsdone.add(b'changegroup')
1965 1965 if not pullop.fetch:
1966 1966 pullop.repo.ui.status(_(b"no changes found\n"))
1967 1967 pullop.cgresult = 0
1968 1968 return
1969 1969 tr = pullop.gettransaction()
1970 1970 if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
1971 1971 pullop.repo.ui.status(_(b"requesting all changes\n"))
1972 1972 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
1973 1973 # issue1320, avoid a race if remote changed after discovery
1974 1974 pullop.heads = pullop.rheads
1975 1975
1976 1976 if pullop.remote.capable(b'getbundle'):
1977 1977 # TODO: get bundlecaps from remote
1978 1978 cg = pullop.remote.getbundle(
1979 1979 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
1980 1980 )
1981 1981 elif pullop.heads is None:
1982 1982 with pullop.remote.commandexecutor() as e:
1983 1983 cg = e.callcommand(
1984 1984 b'changegroup',
1985 1985 {
1986 1986 b'nodes': pullop.fetch,
1987 1987 b'source': b'pull',
1988 1988 },
1989 1989 ).result()
1990 1990
1991 1991 elif not pullop.remote.capable(b'changegroupsubset'):
1992 1992 raise error.Abort(
1993 1993 _(
1994 1994 b"partial pull cannot be done because "
1995 1995 b"other repository doesn't support "
1996 1996 b"changegroupsubset."
1997 1997 )
1998 1998 )
1999 1999 else:
2000 2000 with pullop.remote.commandexecutor() as e:
2001 2001 cg = e.callcommand(
2002 2002 b'changegroupsubset',
2003 2003 {
2004 2004 b'bases': pullop.fetch,
2005 2005 b'heads': pullop.heads,
2006 2006 b'source': b'pull',
2007 2007 },
2008 2008 ).result()
2009 2009
2010 2010 bundleop = bundle2.applybundle(
2011 2011 pullop.repo,
2012 2012 cg,
2013 2013 tr,
2014 2014 b'pull',
2015 2015 pullop.remote.url(),
2016 2016 remote=pullop.remote,
2017 2017 )
2018 2018 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
2019 2019
2020 2020
2021 2021 def _pullphase(pullop):
2022 2022 # Get remote phases data from remote
2023 2023 if b'phases' in pullop.stepsdone:
2024 2024 return
2025 2025 remotephases = listkeys(pullop.remote, b'phases')
2026 2026 _pullapplyphases(pullop, remotephases)
2027 2027
2028 2028
2029 2029 def _pullapplyphases(pullop, remotephases):
2030 2030 """apply phase movement from observed remote state"""
2031 2031 if b'phases' in pullop.stepsdone:
2032 2032 return
2033 2033 pullop.stepsdone.add(b'phases')
2034 2034 publishing = bool(remotephases.get(b'publishing', False))
2035 2035 if remotephases and not publishing:
2036 2036 # remote is new and non-publishing
2037 2037 pheads, _dr = phases.analyzeremotephases(
2038 2038 pullop.repo, pullop.pulledsubset, remotephases
2039 2039 )
2040 2040 dheads = pullop.pulledsubset
2041 2041 else:
2042 2042 # Remote is old or publishing all common changesets
2043 2043 # should be seen as public
2044 2044 pheads = pullop.pulledsubset
2045 2045 dheads = []
2046 2046 unfi = pullop.repo.unfiltered()
2047 2047 phase = unfi._phasecache.phase
2048 2048 rev = unfi.changelog.index.get_rev
2049 2049 public = phases.public
2050 2050 draft = phases.draft
2051 2051
2052 2052 # exclude changesets already public locally and update the others
2053 2053 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2054 2054 if pheads:
2055 2055 tr = pullop.gettransaction()
2056 2056 phases.advanceboundary(pullop.repo, tr, public, pheads)
2057 2057
2058 2058 # exclude changesets already draft locally and update the others
2059 2059 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2060 2060 if dheads:
2061 2061 tr = pullop.gettransaction()
2062 2062 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2063 2063
2064 2064
2065 2065 def _pullbookmarks(pullop):
2066 2066 """process the remote bookmark information to update the local one"""
2067 2067 if b'bookmarks' in pullop.stepsdone:
2068 2068 return
2069 2069 pullop.stepsdone.add(b'bookmarks')
2070 2070 repo = pullop.repo
2071 2071 remotebookmarks = pullop.remotebookmarks
2072 2072 bookmarks_mode = None
2073 2073 if pullop.remote_path is not None:
2074 2074 bookmarks_mode = pullop.remote_path.bookmarks_mode
2075 2075 bookmod.updatefromremote(
2076 2076 repo.ui,
2077 2077 repo,
2078 2078 remotebookmarks,
2079 2079 pullop.remote.url(),
2080 2080 pullop.gettransaction,
2081 2081 explicit=pullop.explicitbookmarks,
2082 2082 mode=bookmarks_mode,
2083 2083 )
2084 2084
2085 2085
2086 2086 def _pullobsolete(pullop):
2087 2087 """utility function to pull obsolete markers from a remote
2088 2088
2089 2089 The `gettransaction` is function that return the pull transaction, creating
2090 2090 one if necessary. We return the transaction to inform the calling code that
2091 2091 a new transaction have been created (when applicable).
2092 2092
2093 2093 Exists mostly to allow overriding for experimentation purpose"""
2094 2094 if b'obsmarkers' in pullop.stepsdone:
2095 2095 return
2096 2096 pullop.stepsdone.add(b'obsmarkers')
2097 2097 tr = None
2098 2098 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2099 2099 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2100 2100 remoteobs = listkeys(pullop.remote, b'obsolete')
2101 2101 if b'dump0' in remoteobs:
2102 2102 tr = pullop.gettransaction()
2103 2103 markers = []
2104 2104 for key in sorted(remoteobs, reverse=True):
2105 2105 if key.startswith(b'dump'):
2106 2106 data = util.b85decode(remoteobs[key])
2107 2107 version, newmarks = obsolete._readmarkers(data)
2108 2108 markers += newmarks
2109 2109 if markers:
2110 2110 pullop.repo.obsstore.add(tr, markers)
2111 2111 pullop.repo.invalidatevolatilesets()
2112 2112 return tr
2113 2113
2114 2114
2115 2115 def applynarrowacl(repo, kwargs):
2116 2116 """Apply narrow fetch access control.
2117 2117
2118 2118 This massages the named arguments for getbundle wire protocol commands
2119 2119 so requested data is filtered through access control rules.
2120 2120 """
2121 2121 ui = repo.ui
2122 2122 # TODO this assumes existence of HTTP and is a layering violation.
2123 2123 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2124 2124 user_includes = ui.configlist(
2125 2125 _NARROWACL_SECTION,
2126 2126 username + b'.includes',
2127 2127 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2128 2128 )
2129 2129 user_excludes = ui.configlist(
2130 2130 _NARROWACL_SECTION,
2131 2131 username + b'.excludes',
2132 2132 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2133 2133 )
2134 2134 if not user_includes:
2135 2135 raise error.Abort(
2136 2136 _(b"%s configuration for user %s is empty")
2137 2137 % (_NARROWACL_SECTION, username)
2138 2138 )
2139 2139
2140 2140 user_includes = [
2141 2141 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2142 2142 ]
2143 2143 user_excludes = [
2144 2144 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2145 2145 ]
2146 2146
2147 2147 req_includes = set(kwargs.get('includepats', []))
2148 2148 req_excludes = set(kwargs.get('excludepats', []))
2149 2149
2150 2150 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2151 2151 req_includes, req_excludes, user_includes, user_excludes
2152 2152 )
2153 2153
2154 2154 if invalid_includes:
2155 2155 raise error.Abort(
2156 2156 _(b"The following includes are not accessible for %s: %s")
2157 2157 % (username, stringutil.pprint(invalid_includes))
2158 2158 )
2159 2159
2160 2160 new_args = {}
2161 2161 new_args.update(kwargs)
2162 2162 new_args['narrow'] = True
2163 2163 new_args['narrow_acl'] = True
2164 2164 new_args['includepats'] = req_includes
2165 2165 if req_excludes:
2166 2166 new_args['excludepats'] = req_excludes
2167 2167
2168 2168 return new_args
2169 2169
2170 2170
2171 2171 def _computeellipsis(repo, common, heads, known, match, depth=None):
2172 2172 """Compute the shape of a narrowed DAG.
2173 2173
2174 2174 Args:
2175 2175 repo: The repository we're transferring.
2176 2176 common: The roots of the DAG range we're transferring.
2177 2177 May be just [nullid], which means all ancestors of heads.
2178 2178 heads: The heads of the DAG range we're transferring.
2179 2179 match: The narrowmatcher that allows us to identify relevant changes.
2180 2180 depth: If not None, only consider nodes to be full nodes if they are at
2181 2181 most depth changesets away from one of heads.
2182 2182
2183 2183 Returns:
2184 2184 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2185 2185
2186 2186 visitnodes: The list of nodes (either full or ellipsis) which
2187 2187 need to be sent to the client.
2188 2188 relevant_nodes: The set of changelog nodes which change a file inside
2189 2189 the narrowspec. The client needs these as non-ellipsis nodes.
2190 2190 ellipsisroots: A dict of {rev: parents} that is used in
2191 2191 narrowchangegroup to produce ellipsis nodes with the
2192 2192 correct parents.
2193 2193 """
2194 2194 cl = repo.changelog
2195 2195 mfl = repo.manifestlog
2196 2196
2197 2197 clrev = cl.rev
2198 2198
2199 2199 commonrevs = {clrev(n) for n in common} | {nullrev}
2200 2200 headsrevs = {clrev(n) for n in heads}
2201 2201
2202 2202 if depth:
2203 2203 revdepth = {h: 0 for h in headsrevs}
2204 2204
2205 2205 ellipsisheads = collections.defaultdict(set)
2206 2206 ellipsisroots = collections.defaultdict(set)
2207 2207
2208 2208 def addroot(head, curchange):
2209 2209 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2210 2210 ellipsisroots[head].add(curchange)
2211 2211 # Recursively split ellipsis heads with 3 roots by finding the
2212 2212 # roots' youngest common descendant which is an elided merge commit.
2213 2213 # That descendant takes 2 of the 3 roots as its own, and becomes a
2214 2214 # root of the head.
2215 2215 while len(ellipsisroots[head]) > 2:
2216 2216 child, roots = splithead(head)
2217 2217 splitroots(head, child, roots)
2218 2218 head = child # Recurse in case we just added a 3rd root
2219 2219
2220 2220 def splitroots(head, child, roots):
2221 2221 ellipsisroots[head].difference_update(roots)
2222 2222 ellipsisroots[head].add(child)
2223 2223 ellipsisroots[child].update(roots)
2224 2224 ellipsisroots[child].discard(child)
2225 2225
2226 2226 def splithead(head):
2227 2227 r1, r2, r3 = sorted(ellipsisroots[head])
2228 2228 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2229 2229 mid = repo.revs(
2230 2230 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2231 2231 )
2232 2232 for j in mid:
2233 2233 if j == nr2:
2234 2234 return nr2, (nr1, nr2)
2235 2235 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2236 2236 return j, (nr1, nr2)
2237 2237 raise error.Abort(
2238 2238 _(
2239 2239 b'Failed to split up ellipsis node! head: %d, '
2240 2240 b'roots: %d %d %d'
2241 2241 )
2242 2242 % (head, r1, r2, r3)
2243 2243 )
2244 2244
2245 2245 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2246 2246 visit = reversed(missing)
2247 2247 relevant_nodes = set()
2248 2248 visitnodes = [cl.node(m) for m in missing]
2249 2249 required = set(headsrevs) | known
2250 2250 for rev in visit:
2251 2251 clrev = cl.changelogrevision(rev)
2252 2252 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2253 2253 if depth is not None:
2254 2254 curdepth = revdepth[rev]
2255 2255 for p in ps:
2256 2256 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2257 2257 needed = False
2258 2258 shallow_enough = depth is None or revdepth[rev] <= depth
2259 2259 if shallow_enough:
2260 2260 curmf = mfl[clrev.manifest].read()
2261 2261 if ps:
2262 2262 # We choose to not trust the changed files list in
2263 2263 # changesets because it's not always correct. TODO: could
2264 2264 # we trust it for the non-merge case?
2265 2265 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2266 2266 needed = bool(curmf.diff(p1mf, match))
2267 2267 if not needed and len(ps) > 1:
2268 2268 # For merge changes, the list of changed files is not
2269 2269 # helpful, since we need to emit the merge if a file
2270 2270 # in the narrow spec has changed on either side of the
2271 2271 # merge. As a result, we do a manifest diff to check.
2272 2272 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2273 2273 needed = bool(curmf.diff(p2mf, match))
2274 2274 else:
2275 2275 # For a root node, we need to include the node if any
2276 2276 # files in the node match the narrowspec.
2277 2277 needed = any(curmf.walk(match))
2278 2278
2279 2279 if needed:
2280 2280 for head in ellipsisheads[rev]:
2281 2281 addroot(head, rev)
2282 2282 for p in ps:
2283 2283 required.add(p)
2284 2284 relevant_nodes.add(cl.node(rev))
2285 2285 else:
2286 2286 if not ps:
2287 2287 ps = [nullrev]
2288 2288 if rev in required:
2289 2289 for head in ellipsisheads[rev]:
2290 2290 addroot(head, rev)
2291 2291 for p in ps:
2292 2292 ellipsisheads[p].add(rev)
2293 2293 else:
2294 2294 for p in ps:
2295 2295 ellipsisheads[p] |= ellipsisheads[rev]
2296 2296
2297 2297 # add common changesets as roots of their reachable ellipsis heads
2298 2298 for c in commonrevs:
2299 2299 for head in ellipsisheads[c]:
2300 2300 addroot(head, c)
2301 2301 return visitnodes, relevant_nodes, ellipsisroots
2302 2302
2303 2303
2304 2304 def caps20to10(repo, role):
2305 2305 """return a set with appropriate options to use bundle20 during getbundle"""
2306 2306 caps = {b'HG20'}
2307 2307 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2308 2308 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2309 2309 return caps
2310 2310
2311 2311
2312 2312 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2313 2313 getbundle2partsorder = []
2314 2314
2315 2315 # Mapping between step name and function
2316 2316 #
2317 2317 # This exists to help extensions wrap steps if necessary
2318 2318 getbundle2partsmapping = {}
2319 2319
2320 2320
2321 2321 def getbundle2partsgenerator(stepname, idx=None):
2322 2322 """decorator for function generating bundle2 part for getbundle
2323 2323
2324 2324 The function is added to the step -> function mapping and appended to the
2325 2325 list of steps. Beware that decorated functions will be added in order
2326 2326 (this may matter).
2327 2327
2328 2328 You can only use this decorator for new steps, if you want to wrap a step
2329 2329 from an extension, attack the getbundle2partsmapping dictionary directly."""
2330 2330
2331 2331 def dec(func):
2332 2332 assert stepname not in getbundle2partsmapping
2333 2333 getbundle2partsmapping[stepname] = func
2334 2334 if idx is None:
2335 2335 getbundle2partsorder.append(stepname)
2336 2336 else:
2337 2337 getbundle2partsorder.insert(idx, stepname)
2338 2338 return func
2339 2339
2340 2340 return dec
2341 2341
2342 2342
2343 2343 def bundle2requested(bundlecaps):
2344 2344 if bundlecaps is not None:
2345 2345 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2346 2346 return False
2347 2347
2348 2348
2349 2349 def getbundlechunks(
2350 2350 repo,
2351 2351 source,
2352 2352 heads=None,
2353 2353 common=None,
2354 2354 bundlecaps=None,
2355 2355 remote_sidedata=None,
2356 2356 **kwargs
2357 2357 ):
2358 2358 """Return chunks constituting a bundle's raw data.
2359 2359
2360 2360 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2361 2361 passed.
2362 2362
2363 2363 Returns a 2-tuple of a dict with metadata about the generated bundle
2364 2364 and an iterator over raw chunks (of varying sizes).
2365 2365 """
2366 2366 kwargs = pycompat.byteskwargs(kwargs)
2367 2367 info = {}
2368 2368 usebundle2 = bundle2requested(bundlecaps)
2369 2369 # bundle10 case
2370 2370 if not usebundle2:
2371 2371 if bundlecaps and not kwargs.get(b'cg', True):
2372 2372 raise ValueError(
2373 2373 _(b'request for bundle10 must include changegroup')
2374 2374 )
2375 2375
2376 2376 if kwargs:
2377 2377 raise ValueError(
2378 2378 _(b'unsupported getbundle arguments: %s')
2379 2379 % b', '.join(sorted(kwargs.keys()))
2380 2380 )
2381 2381 outgoing = _computeoutgoing(repo, heads, common)
2382 2382 info[b'bundleversion'] = 1
2383 2383 return (
2384 2384 info,
2385 2385 changegroup.makestream(
2386 2386 repo,
2387 2387 outgoing,
2388 2388 b'01',
2389 2389 source,
2390 2390 bundlecaps=bundlecaps,
2391 2391 remote_sidedata=remote_sidedata,
2392 2392 ),
2393 2393 )
2394 2394
2395 2395 # bundle20 case
2396 2396 info[b'bundleversion'] = 2
2397 2397 b2caps = {}
2398 2398 for bcaps in bundlecaps:
2399 2399 if bcaps.startswith(b'bundle2='):
2400 2400 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2401 2401 b2caps.update(bundle2.decodecaps(blob))
2402 2402 bundler = bundle2.bundle20(repo.ui, b2caps)
2403 2403
2404 2404 kwargs[b'heads'] = heads
2405 2405 kwargs[b'common'] = common
2406 2406
2407 2407 for name in getbundle2partsorder:
2408 2408 func = getbundle2partsmapping[name]
2409 2409 func(
2410 2410 bundler,
2411 2411 repo,
2412 2412 source,
2413 2413 bundlecaps=bundlecaps,
2414 2414 b2caps=b2caps,
2415 2415 remote_sidedata=remote_sidedata,
2416 2416 **pycompat.strkwargs(kwargs)
2417 2417 )
2418 2418
2419 2419 info[b'prefercompressed'] = bundler.prefercompressed
2420 2420
2421 2421 return info, bundler.getchunks()
2422 2422
2423 2423
2424 2424 @getbundle2partsgenerator(b'stream')
2425 2425 def _getbundlestream2(bundler, repo, *args, **kwargs):
2426 2426 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2427 2427
2428 2428
2429 2429 @getbundle2partsgenerator(b'changegroup')
2430 2430 def _getbundlechangegrouppart(
2431 2431 bundler,
2432 2432 repo,
2433 2433 source,
2434 2434 bundlecaps=None,
2435 2435 b2caps=None,
2436 2436 heads=None,
2437 2437 common=None,
2438 2438 remote_sidedata=None,
2439 2439 **kwargs
2440 2440 ):
2441 2441 """add a changegroup part to the requested bundle"""
2442 2442 if not kwargs.get('cg', True) or not b2caps:
2443 2443 return
2444 2444
2445 2445 version = b'01'
2446 2446 cgversions = b2caps.get(b'changegroup')
2447 2447 if cgversions: # 3.1 and 3.2 ship with an empty value
2448 2448 cgversions = [
2449 2449 v
2450 2450 for v in cgversions
2451 2451 if v in changegroup.supportedoutgoingversions(repo)
2452 2452 ]
2453 2453 if not cgversions:
2454 2454 raise error.Abort(_(b'no common changegroup version'))
2455 2455 version = max(cgversions)
2456 2456
2457 2457 outgoing = _computeoutgoing(repo, heads, common)
2458 2458 if not outgoing.missing:
2459 2459 return
2460 2460
2461 2461 if kwargs.get('narrow', False):
2462 2462 include = sorted(filter(bool, kwargs.get('includepats', [])))
2463 2463 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2464 2464 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2465 2465 else:
2466 2466 matcher = None
2467 2467
2468 2468 cgstream = changegroup.makestream(
2469 2469 repo,
2470 2470 outgoing,
2471 2471 version,
2472 2472 source,
2473 2473 bundlecaps=bundlecaps,
2474 2474 matcher=matcher,
2475 2475 remote_sidedata=remote_sidedata,
2476 2476 )
2477 2477
2478 2478 part = bundler.newpart(b'changegroup', data=cgstream)
2479 2479 if cgversions:
2480 2480 part.addparam(b'version', version)
2481 2481
2482 2482 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2483 2483
2484 2484 if scmutil.istreemanifest(repo):
2485 2485 part.addparam(b'treemanifest', b'1')
2486 2486
2487 2487 if repository.REPO_FEATURE_SIDE_DATA in repo.features:
2488 2488 part.addparam(b'exp-sidedata', b'1')
2489 2489 sidedata = bundle2.format_remote_wanted_sidedata(repo)
2490 2490 part.addparam(b'exp-wanted-sidedata', sidedata)
2491 2491
2492 2492 if (
2493 2493 kwargs.get('narrow', False)
2494 2494 and kwargs.get('narrow_acl', False)
2495 2495 and (include or exclude)
2496 2496 ):
2497 2497 # this is mandatory because otherwise ACL clients won't work
2498 2498 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2499 2499 narrowspecpart.data = b'%s\0%s' % (
2500 2500 b'\n'.join(include),
2501 2501 b'\n'.join(exclude),
2502 2502 )
2503 2503
2504 2504
2505 2505 @getbundle2partsgenerator(b'bookmarks')
2506 2506 def _getbundlebookmarkpart(
2507 2507 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2508 2508 ):
2509 2509 """add a bookmark part to the requested bundle"""
2510 2510 if not kwargs.get('bookmarks', False):
2511 2511 return
2512 2512 if not b2caps or b'bookmarks' not in b2caps:
2513 2513 raise error.Abort(_(b'no common bookmarks exchange method'))
2514 2514 books = bookmod.listbinbookmarks(repo)
2515 2515 data = bookmod.binaryencode(repo, books)
2516 2516 if data:
2517 2517 bundler.newpart(b'bookmarks', data=data)
2518 2518
2519 2519
2520 2520 @getbundle2partsgenerator(b'listkeys')
2521 2521 def _getbundlelistkeysparts(
2522 2522 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2523 2523 ):
2524 2524 """add parts containing listkeys namespaces to the requested bundle"""
2525 2525 listkeys = kwargs.get('listkeys', ())
2526 2526 for namespace in listkeys:
2527 2527 part = bundler.newpart(b'listkeys')
2528 2528 part.addparam(b'namespace', namespace)
2529 2529 keys = repo.listkeys(namespace).items()
2530 2530 part.data = pushkey.encodekeys(keys)
2531 2531
2532 2532
2533 2533 @getbundle2partsgenerator(b'obsmarkers')
2534 2534 def _getbundleobsmarkerpart(
2535 2535 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2536 2536 ):
2537 2537 """add an obsolescence markers part to the requested bundle"""
2538 2538 if kwargs.get('obsmarkers', False):
2539 2539 if heads is None:
2540 2540 heads = repo.heads()
2541 2541 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2542 2542 markers = repo.obsstore.relevantmarkers(subset)
2543 2543 markers = obsutil.sortedmarkers(markers)
2544 2544 bundle2.buildobsmarkerspart(bundler, markers)
2545 2545
2546 2546
2547 2547 @getbundle2partsgenerator(b'phases')
2548 2548 def _getbundlephasespart(
2549 2549 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2550 2550 ):
2551 2551 """add phase heads part to the requested bundle"""
2552 2552 if kwargs.get('phases', False):
2553 2553 if not b2caps or b'heads' not in b2caps.get(b'phases'):
2554 2554 raise error.Abort(_(b'no common phases exchange method'))
2555 2555 if heads is None:
2556 2556 heads = repo.heads()
2557 2557
2558 2558 headsbyphase = collections.defaultdict(set)
2559 2559 if repo.publishing():
2560 2560 headsbyphase[phases.public] = heads
2561 2561 else:
2562 2562 # find the appropriate heads to move
2563 2563
2564 2564 phase = repo._phasecache.phase
2565 2565 node = repo.changelog.node
2566 2566 rev = repo.changelog.rev
2567 2567 for h in heads:
2568 2568 headsbyphase[phase(repo, rev(h))].add(h)
2569 2569 seenphases = list(headsbyphase.keys())
2570 2570
2571 2571 # We do not handle anything but public and draft phase for now)
2572 2572 if seenphases:
2573 2573 assert max(seenphases) <= phases.draft
2574 2574
2575 2575 # if client is pulling non-public changesets, we need to find
2576 2576 # intermediate public heads.
2577 2577 draftheads = headsbyphase.get(phases.draft, set())
2578 2578 if draftheads:
2579 2579 publicheads = headsbyphase.get(phases.public, set())
2580 2580
2581 2581 revset = b'heads(only(%ln, %ln) and public())'
2582 2582 extraheads = repo.revs(revset, draftheads, publicheads)
2583 2583 for r in extraheads:
2584 2584 headsbyphase[phases.public].add(node(r))
2585 2585
2586 2586 # transform data in a format used by the encoding function
2587 2587 phasemapping = {
2588 2588 phase: sorted(headsbyphase[phase]) for phase in phases.allphases
2589 2589 }
2590 2590
2591 2591 # generate the actual part
2592 2592 phasedata = phases.binaryencode(phasemapping)
2593 2593 bundler.newpart(b'phase-heads', data=phasedata)
2594 2594
2595 2595
2596 2596 @getbundle2partsgenerator(b'hgtagsfnodes')
2597 2597 def _getbundletagsfnodes(
2598 2598 bundler,
2599 2599 repo,
2600 2600 source,
2601 2601 bundlecaps=None,
2602 2602 b2caps=None,
2603 2603 heads=None,
2604 2604 common=None,
2605 2605 **kwargs
2606 2606 ):
2607 2607 """Transfer the .hgtags filenodes mapping.
2608 2608
2609 2609 Only values for heads in this bundle will be transferred.
2610 2610
2611 2611 The part data consists of pairs of 20 byte changeset node and .hgtags
2612 2612 filenodes raw values.
2613 2613 """
2614 2614 # Don't send unless:
2615 2615 # - changeset are being exchanged,
2616 2616 # - the client supports it.
2617 2617 if not b2caps or not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2618 2618 return
2619 2619
2620 2620 outgoing = _computeoutgoing(repo, heads, common)
2621 2621 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2622 2622
2623 2623
2624 2624 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2625 2625 def _getbundlerevbranchcache(
2626 2626 bundler,
2627 2627 repo,
2628 2628 source,
2629 2629 bundlecaps=None,
2630 2630 b2caps=None,
2631 2631 heads=None,
2632 2632 common=None,
2633 2633 **kwargs
2634 2634 ):
2635 2635 """Transfer the rev-branch-cache mapping
2636 2636
2637 2637 The payload is a series of data related to each branch
2638 2638
2639 2639 1) branch name length
2640 2640 2) number of open heads
2641 2641 3) number of closed heads
2642 2642 4) open heads nodes
2643 2643 5) closed heads nodes
2644 2644 """
2645 2645 # Don't send unless:
2646 2646 # - changeset are being exchanged,
2647 2647 # - the client supports it.
2648 2648 # - narrow bundle isn't in play (not currently compatible).
2649 2649 if (
2650 2650 not kwargs.get('cg', True)
2651 2651 or not b2caps
2652 2652 or b'rev-branch-cache' not in b2caps
2653 2653 or kwargs.get('narrow', False)
2654 2654 or repo.ui.has_section(_NARROWACL_SECTION)
2655 2655 ):
2656 2656 return
2657 2657
2658 2658 outgoing = _computeoutgoing(repo, heads, common)
2659 2659 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2660 2660
2661 2661
2662 2662 def check_heads(repo, their_heads, context):
2663 2663 """check if the heads of a repo have been modified
2664 2664
2665 2665 Used by peer for unbundling.
2666 2666 """
2667 2667 heads = repo.heads()
2668 2668 heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest()
2669 2669 if not (
2670 2670 their_heads == [b'force']
2671 2671 or their_heads == heads
2672 2672 or their_heads == [b'hashed', heads_hash]
2673 2673 ):
2674 2674 # someone else committed/pushed/unbundled while we
2675 2675 # were transferring data
2676 2676 raise error.PushRaced(
2677 2677 b'repository changed while %s - please try again' % context
2678 2678 )
2679 2679
2680 2680
2681 2681 def unbundle(repo, cg, heads, source, url):
2682 2682 """Apply a bundle to a repo.
2683 2683
2684 2684 this function makes sure the repo is locked during the application and have
2685 2685 mechanism to check that no push race occurred between the creation of the
2686 2686 bundle and its application.
2687 2687
2688 2688 If the push was raced as PushRaced exception is raised."""
2689 2689 r = 0
2690 2690 # need a transaction when processing a bundle2 stream
2691 2691 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2692 2692 lockandtr = [None, None, None]
2693 2693 recordout = None
2694 2694 # quick fix for output mismatch with bundle2 in 3.4
2695 2695 captureoutput = repo.ui.configbool(
2696 2696 b'experimental', b'bundle2-output-capture'
2697 2697 )
2698 2698 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2699 2699 captureoutput = True
2700 2700 try:
2701 2701 # note: outside bundle1, 'heads' is expected to be empty and this
2702 2702 # 'check_heads' call wil be a no-op
2703 2703 check_heads(repo, heads, b'uploading changes')
2704 2704 # push can proceed
2705 2705 if not isinstance(cg, bundle2.unbundle20):
2706 2706 # legacy case: bundle1 (changegroup 01)
2707 2707 txnname = b"\n".join([source, urlutil.hidepassword(url)])
2708 2708 with repo.lock(), repo.transaction(txnname) as tr:
2709 2709 op = bundle2.applybundle(repo, cg, tr, source, url)
2710 2710 r = bundle2.combinechangegroupresults(op)
2711 2711 else:
2712 2712 r = None
2713 2713 try:
2714 2714
2715 2715 def gettransaction():
2716 2716 if not lockandtr[2]:
2717 2717 if not bookmod.bookmarksinstore(repo):
2718 2718 lockandtr[0] = repo.wlock()
2719 2719 lockandtr[1] = repo.lock()
2720 2720 lockandtr[2] = repo.transaction(source)
2721 2721 lockandtr[2].hookargs[b'source'] = source
2722 2722 lockandtr[2].hookargs[b'url'] = url
2723 2723 lockandtr[2].hookargs[b'bundle2'] = b'1'
2724 2724 return lockandtr[2]
2725 2725
2726 2726 # Do greedy locking by default until we're satisfied with lazy
2727 2727 # locking.
2728 2728 if not repo.ui.configbool(
2729 2729 b'experimental', b'bundle2lazylocking'
2730 2730 ):
2731 2731 gettransaction()
2732 2732
2733 2733 op = bundle2.bundleoperation(
2734 2734 repo,
2735 2735 gettransaction,
2736 2736 captureoutput=captureoutput,
2737 2737 source=b'push',
2738 2738 )
2739 2739 try:
2740 2740 op = bundle2.processbundle(repo, cg, op=op)
2741 2741 finally:
2742 2742 r = op.reply
2743 2743 if captureoutput and r is not None:
2744 2744 repo.ui.pushbuffer(error=True, subproc=True)
2745 2745
2746 2746 def recordout(output):
2747 2747 r.newpart(b'output', data=output, mandatory=False)
2748 2748
2749 2749 if lockandtr[2] is not None:
2750 2750 lockandtr[2].close()
2751 2751 except BaseException as exc:
2752 2752 exc.duringunbundle2 = True
2753 2753 if captureoutput and r is not None:
2754 2754 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2755 2755
2756 2756 def recordout(output):
2757 2757 part = bundle2.bundlepart(
2758 2758 b'output', data=output, mandatory=False
2759 2759 )
2760 2760 parts.append(part)
2761 2761
2762 2762 raise
2763 2763 finally:
2764 2764 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2765 2765 if recordout is not None:
2766 2766 recordout(repo.ui.popbuffer())
2767 2767 return r
2768 2768
2769 2769
2770 2770 def _maybeapplyclonebundle(pullop):
2771 2771 """Apply a clone bundle from a remote, if possible."""
2772 2772
2773 2773 repo = pullop.repo
2774 2774 remote = pullop.remote
2775 2775
2776 2776 if not repo.ui.configbool(b'ui', b'clonebundles'):
2777 2777 return
2778 2778
2779 2779 # Only run if local repo is empty.
2780 2780 if len(repo):
2781 2781 return
2782 2782
2783 2783 if pullop.heads:
2784 2784 return
2785 2785
2786 2786 if not remote.capable(b'clonebundles'):
2787 2787 return
2788 2788
2789 2789 with remote.commandexecutor() as e:
2790 2790 res = e.callcommand(b'clonebundles', {}).result()
2791 2791
2792 2792 # If we call the wire protocol command, that's good enough to record the
2793 2793 # attempt.
2794 2794 pullop.clonebundleattempted = True
2795 2795
2796 2796 entries = bundlecaches.parseclonebundlesmanifest(repo, res)
2797 2797 if not entries:
2798 2798 repo.ui.note(
2799 2799 _(
2800 2800 b'no clone bundles available on remote; '
2801 2801 b'falling back to regular clone\n'
2802 2802 )
2803 2803 )
2804 2804 return
2805 2805
2806 2806 entries = bundlecaches.filterclonebundleentries(
2807 2807 repo, entries, streamclonerequested=pullop.streamclonerequested
2808 2808 )
2809 2809
2810 2810 if not entries:
2811 2811 # There is a thundering herd concern here. However, if a server
2812 2812 # operator doesn't advertise bundles appropriate for its clients,
2813 2813 # they deserve what's coming. Furthermore, from a client's
2814 2814 # perspective, no automatic fallback would mean not being able to
2815 2815 # clone!
2816 2816 repo.ui.warn(
2817 2817 _(
2818 2818 b'no compatible clone bundles available on server; '
2819 2819 b'falling back to regular clone\n'
2820 2820 )
2821 2821 )
2822 2822 repo.ui.warn(
2823 2823 _(b'(you may want to report this to the server operator)\n')
2824 2824 )
2825 2825 return
2826 2826
2827 2827 entries = bundlecaches.sortclonebundleentries(repo.ui, entries)
2828 2828
2829 2829 url = entries[0][b'URL']
2830 2830 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2831 2831 if trypullbundlefromurl(repo.ui, repo, url):
2832 2832 repo.ui.status(_(b'finished applying clone bundle\n'))
2833 2833 # Bundle failed.
2834 2834 #
2835 2835 # We abort by default to avoid the thundering herd of
2836 2836 # clients flooding a server that was expecting expensive
2837 2837 # clone load to be offloaded.
2838 2838 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2839 2839 repo.ui.warn(_(b'falling back to normal clone\n'))
2840 2840 else:
2841 2841 raise error.Abort(
2842 2842 _(b'error applying bundle'),
2843 2843 hint=_(
2844 2844 b'if this error persists, consider contacting '
2845 2845 b'the server operator or disable clone '
2846 2846 b'bundles via '
2847 2847 b'"--config ui.clonebundles=false"'
2848 2848 ),
2849 2849 )
2850 2850
2851 2851
2852 2852 def trypullbundlefromurl(ui, repo, url):
2853 2853 """Attempt to apply a bundle from a URL."""
2854 2854 with repo.lock(), repo.transaction(b'bundleurl') as tr:
2855 2855 try:
2856 2856 fh = urlmod.open(ui, url)
2857 2857 cg = readbundle(ui, fh, b'stream')
2858 2858
2859 2859 if isinstance(cg, streamclone.streamcloneapplier):
2860 2860 cg.apply(repo)
2861 2861 else:
2862 2862 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
2863 2863 return True
2864 2864 except urlerr.httperror as e:
2865 2865 ui.warn(
2866 2866 _(b'HTTP error fetching bundle: %s\n')
2867 2867 % stringutil.forcebytestr(e)
2868 2868 )
2869 2869 except urlerr.urlerror as e:
2870 2870 ui.warn(
2871 2871 _(b'error fetching bundle: %s\n')
2872 2872 % stringutil.forcebytestr(e.reason)
2873 2873 )
2874 2874
2875 2875 return False
General Comments 0
You need to be logged in to leave comments. Login now