##// END OF EJS Templates
bundlespec: fix the generation of bundlespec for `cg.version`...
marmoute -
r50229:6d15a897 default
parent child Browse files
Show More
@@ -1,2836 +1,2853
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import collections
10 10 import weakref
11 11
12 12 from .i18n import _
13 13 from .node import (
14 14 hex,
15 15 nullrev,
16 16 )
17 17 from . import (
18 18 bookmarks as bookmod,
19 19 bundle2,
20 20 bundlecaches,
21 21 changegroup,
22 22 discovery,
23 23 error,
24 24 lock as lockmod,
25 25 logexchange,
26 26 narrowspec,
27 27 obsolete,
28 28 obsutil,
29 29 phases,
30 30 pushkey,
31 31 pycompat,
32 32 requirements,
33 33 scmutil,
34 34 streamclone,
35 35 url as urlmod,
36 36 util,
37 37 wireprototypes,
38 38 )
39 39 from .utils import (
40 40 hashutil,
41 41 stringutil,
42 42 urlutil,
43 43 )
44 44 from .interfaces import repository
45 45
46 46 urlerr = util.urlerr
47 47 urlreq = util.urlreq
48 48
49 49 _NARROWACL_SECTION = b'narrowacl'
50 50
51 51
52 52 def readbundle(ui, fh, fname, vfs=None):
53 53 header = changegroup.readexactly(fh, 4)
54 54
55 55 alg = None
56 56 if not fname:
57 57 fname = b"stream"
58 58 if not header.startswith(b'HG') and header.startswith(b'\0'):
59 59 fh = changegroup.headerlessfixup(fh, header)
60 60 header = b"HG10"
61 61 alg = b'UN'
62 62 elif vfs:
63 63 fname = vfs.join(fname)
64 64
65 65 magic, version = header[0:2], header[2:4]
66 66
67 67 if magic != b'HG':
68 68 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
69 69 if version == b'10':
70 70 if alg is None:
71 71 alg = changegroup.readexactly(fh, 2)
72 72 return changegroup.cg1unpacker(fh, alg)
73 73 elif version.startswith(b'2'):
74 74 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
75 75 elif version == b'S1':
76 76 return streamclone.streamcloneapplier(fh)
77 77 else:
78 78 raise error.Abort(
79 79 _(b'%s: unknown bundle version %s') % (fname, version)
80 80 )
81 81
82 82
83 def _format_params(params):
84 parts = []
85 for key, value in sorted(params.items()):
86 value = urlreq.quote(value)
87 parts.append(b"%s=%s" % (key, value))
88 return b';'.join(parts)
89
90
83 91 def getbundlespec(ui, fh):
84 92 """Infer the bundlespec from a bundle file handle.
85 93
86 94 The input file handle is seeked and the original seek position is not
87 95 restored.
88 96 """
89 97
90 98 def speccompression(alg):
91 99 try:
92 100 return util.compengines.forbundletype(alg).bundletype()[0]
93 101 except KeyError:
94 102 return None
95 103
104 params = {}
105
96 106 b = readbundle(ui, fh, None)
97 107 if isinstance(b, changegroup.cg1unpacker):
98 108 alg = b._type
99 109 if alg == b'_truncatedBZ':
100 110 alg = b'BZ'
101 111 comp = speccompression(alg)
102 112 if not comp:
103 113 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
104 114 return b'%s-v1' % comp
105 115 elif isinstance(b, bundle2.unbundle20):
106 116 if b'Compression' in b.params:
107 117 comp = speccompression(b.params[b'Compression'])
108 118 if not comp:
109 119 raise error.Abort(
110 120 _(b'unknown compression algorithm: %s') % comp
111 121 )
112 122 else:
113 123 comp = b'none'
114 124
115 125 version = None
116 126 for part in b.iterparts():
117 127 if part.type == b'changegroup':
118 version = part.params[b'version']
119 if version in (b'01', b'02'):
128 cgversion = part.params[b'version']
129 if cgversion in (b'01', b'02'):
120 130 version = b'v2'
131 elif cgversion in (b'03',):
132 version = b'v2'
133 params[b'cg.version'] = cgversion
121 134 else:
122 135 raise error.Abort(
123 136 _(
124 137 b'changegroup version %s does not have '
125 138 b'a known bundlespec'
126 139 )
127 140 % version,
128 141 hint=_(b'try upgrading your Mercurial client'),
129 142 )
130 143 elif part.type == b'stream2' and version is None:
131 144 # A stream2 part requires to be part of a v2 bundle
132 145 requirements = urlreq.unquote(part.params[b'requirements'])
133 146 splitted = requirements.split()
134 147 params = bundle2._formatrequirementsparams(splitted)
135 148 return b'none-v2;stream=v2;%s' % params
136 149
137 150 if not version:
138 151 raise error.Abort(
139 152 _(b'could not identify changegroup version in bundle')
140 153 )
141
142 return b'%s-%s' % (comp, version)
154 spec = b'%s-%s' % (comp, version)
155 if params:
156 spec += b';'
157 spec += _format_params(params)
158 return spec
159
143 160 elif isinstance(b, streamclone.streamcloneapplier):
144 161 requirements = streamclone.readbundle1header(fh)[2]
145 162 formatted = bundle2._formatrequirementsparams(requirements)
146 163 return b'none-packed1;%s' % formatted
147 164 else:
148 165 raise error.Abort(_(b'unknown bundle type: %s') % b)
149 166
150 167
151 168 def _computeoutgoing(repo, heads, common):
152 169 """Computes which revs are outgoing given a set of common
153 170 and a set of heads.
154 171
155 172 This is a separate function so extensions can have access to
156 173 the logic.
157 174
158 175 Returns a discovery.outgoing object.
159 176 """
160 177 cl = repo.changelog
161 178 if common:
162 179 hasnode = cl.hasnode
163 180 common = [n for n in common if hasnode(n)]
164 181 else:
165 182 common = [repo.nullid]
166 183 if not heads:
167 184 heads = cl.heads()
168 185 return discovery.outgoing(repo, common, heads)
169 186
170 187
171 188 def _checkpublish(pushop):
172 189 repo = pushop.repo
173 190 ui = repo.ui
174 191 behavior = ui.config(b'experimental', b'auto-publish')
175 192 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
176 193 return
177 194 remotephases = listkeys(pushop.remote, b'phases')
178 195 if not remotephases.get(b'publishing', False):
179 196 return
180 197
181 198 if pushop.revs is None:
182 199 published = repo.filtered(b'served').revs(b'not public()')
183 200 else:
184 201 published = repo.revs(b'::%ln - public()', pushop.revs)
185 202 # we want to use pushop.revs in the revset even if they themselves are
186 203 # secret, but we don't want to have anything that the server won't see
187 204 # in the result of this expression
188 205 published &= repo.filtered(b'served')
189 206 if published:
190 207 if behavior == b'warn':
191 208 ui.warn(
192 209 _(b'%i changesets about to be published\n') % len(published)
193 210 )
194 211 elif behavior == b'confirm':
195 212 if ui.promptchoice(
196 213 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
197 214 % len(published)
198 215 ):
199 216 raise error.CanceledError(_(b'user quit'))
200 217 elif behavior == b'abort':
201 218 msg = _(b'push would publish %i changesets') % len(published)
202 219 hint = _(
203 220 b"use --publish or adjust 'experimental.auto-publish'"
204 221 b" config"
205 222 )
206 223 raise error.Abort(msg, hint=hint)
207 224
208 225
209 226 def _forcebundle1(op):
210 227 """return true if a pull/push must use bundle1
211 228
212 229 This function is used to allow testing of the older bundle version"""
213 230 ui = op.repo.ui
214 231 # The goal is this config is to allow developer to choose the bundle
215 232 # version used during exchanged. This is especially handy during test.
216 233 # Value is a list of bundle version to be picked from, highest version
217 234 # should be used.
218 235 #
219 236 # developer config: devel.legacy.exchange
220 237 exchange = ui.configlist(b'devel', b'legacy.exchange')
221 238 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
222 239 return forcebundle1 or not op.remote.capable(b'bundle2')
223 240
224 241
225 242 class pushoperation:
226 243 """A object that represent a single push operation
227 244
228 245 Its purpose is to carry push related state and very common operations.
229 246
230 247 A new pushoperation should be created at the beginning of each push and
231 248 discarded afterward.
232 249 """
233 250
234 251 def __init__(
235 252 self,
236 253 repo,
237 254 remote,
238 255 force=False,
239 256 revs=None,
240 257 newbranch=False,
241 258 bookmarks=(),
242 259 publish=False,
243 260 pushvars=None,
244 261 ):
245 262 # repo we push from
246 263 self.repo = repo
247 264 self.ui = repo.ui
248 265 # repo we push to
249 266 self.remote = remote
250 267 # force option provided
251 268 self.force = force
252 269 # revs to be pushed (None is "all")
253 270 self.revs = revs
254 271 # bookmark explicitly pushed
255 272 self.bookmarks = bookmarks
256 273 # allow push of new branch
257 274 self.newbranch = newbranch
258 275 # step already performed
259 276 # (used to check what steps have been already performed through bundle2)
260 277 self.stepsdone = set()
261 278 # Integer version of the changegroup push result
262 279 # - None means nothing to push
263 280 # - 0 means HTTP error
264 281 # - 1 means we pushed and remote head count is unchanged *or*
265 282 # we have outgoing changesets but refused to push
266 283 # - other values as described by addchangegroup()
267 284 self.cgresult = None
268 285 # Boolean value for the bookmark push
269 286 self.bkresult = None
270 287 # discover.outgoing object (contains common and outgoing data)
271 288 self.outgoing = None
272 289 # all remote topological heads before the push
273 290 self.remoteheads = None
274 291 # Details of the remote branch pre and post push
275 292 #
276 293 # mapping: {'branch': ([remoteheads],
277 294 # [newheads],
278 295 # [unsyncedheads],
279 296 # [discardedheads])}
280 297 # - branch: the branch name
281 298 # - remoteheads: the list of remote heads known locally
282 299 # None if the branch is new
283 300 # - newheads: the new remote heads (known locally) with outgoing pushed
284 301 # - unsyncedheads: the list of remote heads unknown locally.
285 302 # - discardedheads: the list of remote heads made obsolete by the push
286 303 self.pushbranchmap = None
287 304 # testable as a boolean indicating if any nodes are missing locally.
288 305 self.incoming = None
289 306 # summary of the remote phase situation
290 307 self.remotephases = None
291 308 # phases changes that must be pushed along side the changesets
292 309 self.outdatedphases = None
293 310 # phases changes that must be pushed if changeset push fails
294 311 self.fallbackoutdatedphases = None
295 312 # outgoing obsmarkers
296 313 self.outobsmarkers = set()
297 314 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
298 315 self.outbookmarks = []
299 316 # transaction manager
300 317 self.trmanager = None
301 318 # map { pushkey partid -> callback handling failure}
302 319 # used to handle exception from mandatory pushkey part failure
303 320 self.pkfailcb = {}
304 321 # an iterable of pushvars or None
305 322 self.pushvars = pushvars
306 323 # publish pushed changesets
307 324 self.publish = publish
308 325
309 326 @util.propertycache
310 327 def futureheads(self):
311 328 """future remote heads if the changeset push succeeds"""
312 329 return self.outgoing.ancestorsof
313 330
314 331 @util.propertycache
315 332 def fallbackheads(self):
316 333 """future remote heads if the changeset push fails"""
317 334 if self.revs is None:
318 335 # not target to push, all common are relevant
319 336 return self.outgoing.commonheads
320 337 unfi = self.repo.unfiltered()
321 338 # I want cheads = heads(::ancestorsof and ::commonheads)
322 339 # (ancestorsof is revs with secret changeset filtered out)
323 340 #
324 341 # This can be expressed as:
325 342 # cheads = ( (ancestorsof and ::commonheads)
326 343 # + (commonheads and ::ancestorsof))"
327 344 # )
328 345 #
329 346 # while trying to push we already computed the following:
330 347 # common = (::commonheads)
331 348 # missing = ((commonheads::ancestorsof) - commonheads)
332 349 #
333 350 # We can pick:
334 351 # * ancestorsof part of common (::commonheads)
335 352 common = self.outgoing.common
336 353 rev = self.repo.changelog.index.rev
337 354 cheads = [node for node in self.revs if rev(node) in common]
338 355 # and
339 356 # * commonheads parents on missing
340 357 revset = unfi.set(
341 358 b'%ln and parents(roots(%ln))',
342 359 self.outgoing.commonheads,
343 360 self.outgoing.missing,
344 361 )
345 362 cheads.extend(c.node() for c in revset)
346 363 return cheads
347 364
348 365 @property
349 366 def commonheads(self):
350 367 """set of all common heads after changeset bundle push"""
351 368 if self.cgresult:
352 369 return self.futureheads
353 370 else:
354 371 return self.fallbackheads
355 372
356 373
357 374 # mapping of message used when pushing bookmark
358 375 bookmsgmap = {
359 376 b'update': (
360 377 _(b"updating bookmark %s\n"),
361 378 _(b'updating bookmark %s failed\n'),
362 379 ),
363 380 b'export': (
364 381 _(b"exporting bookmark %s\n"),
365 382 _(b'exporting bookmark %s failed\n'),
366 383 ),
367 384 b'delete': (
368 385 _(b"deleting remote bookmark %s\n"),
369 386 _(b'deleting remote bookmark %s failed\n'),
370 387 ),
371 388 }
372 389
373 390
374 391 def push(
375 392 repo,
376 393 remote,
377 394 force=False,
378 395 revs=None,
379 396 newbranch=False,
380 397 bookmarks=(),
381 398 publish=False,
382 399 opargs=None,
383 400 ):
384 401 """Push outgoing changesets (limited by revs) from a local
385 402 repository to remote. Return an integer:
386 403 - None means nothing to push
387 404 - 0 means HTTP error
388 405 - 1 means we pushed and remote head count is unchanged *or*
389 406 we have outgoing changesets but refused to push
390 407 - other values as described by addchangegroup()
391 408 """
392 409 if opargs is None:
393 410 opargs = {}
394 411 pushop = pushoperation(
395 412 repo,
396 413 remote,
397 414 force,
398 415 revs,
399 416 newbranch,
400 417 bookmarks,
401 418 publish,
402 419 **pycompat.strkwargs(opargs)
403 420 )
404 421 if pushop.remote.local():
405 422 missing = (
406 423 set(pushop.repo.requirements) - pushop.remote.local().supported
407 424 )
408 425 if missing:
409 426 msg = _(
410 427 b"required features are not"
411 428 b" supported in the destination:"
412 429 b" %s"
413 430 ) % (b', '.join(sorted(missing)))
414 431 raise error.Abort(msg)
415 432
416 433 if not pushop.remote.canpush():
417 434 raise error.Abort(_(b"destination does not support push"))
418 435
419 436 if not pushop.remote.capable(b'unbundle'):
420 437 raise error.Abort(
421 438 _(
422 439 b'cannot push: destination does not support the '
423 440 b'unbundle wire protocol command'
424 441 )
425 442 )
426 443 for category in sorted(bundle2.read_remote_wanted_sidedata(pushop.remote)):
427 444 # Check that a computer is registered for that category for at least
428 445 # one revlog kind.
429 446 for kind, computers in repo._sidedata_computers.items():
430 447 if computers.get(category):
431 448 break
432 449 else:
433 450 raise error.Abort(
434 451 _(
435 452 b'cannot push: required sidedata category not supported'
436 453 b" by this client: '%s'"
437 454 )
438 455 % pycompat.bytestr(category)
439 456 )
440 457 # get lock as we might write phase data
441 458 wlock = lock = None
442 459 try:
443 460 # bundle2 push may receive a reply bundle touching bookmarks
444 461 # requiring the wlock. Take it now to ensure proper ordering.
445 462 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
446 463 if (
447 464 (not _forcebundle1(pushop))
448 465 and maypushback
449 466 and not bookmod.bookmarksinstore(repo)
450 467 ):
451 468 wlock = pushop.repo.wlock()
452 469 lock = pushop.repo.lock()
453 470 pushop.trmanager = transactionmanager(
454 471 pushop.repo, b'push-response', pushop.remote.url()
455 472 )
456 473 except error.LockUnavailable as err:
457 474 # source repo cannot be locked.
458 475 # We do not abort the push, but just disable the local phase
459 476 # synchronisation.
460 477 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
461 478 err
462 479 )
463 480 pushop.ui.debug(msg)
464 481
465 482 with wlock or util.nullcontextmanager():
466 483 with lock or util.nullcontextmanager():
467 484 with pushop.trmanager or util.nullcontextmanager():
468 485 pushop.repo.checkpush(pushop)
469 486 _checkpublish(pushop)
470 487 _pushdiscovery(pushop)
471 488 if not pushop.force:
472 489 _checksubrepostate(pushop)
473 490 if not _forcebundle1(pushop):
474 491 _pushbundle2(pushop)
475 492 _pushchangeset(pushop)
476 493 _pushsyncphase(pushop)
477 494 _pushobsolete(pushop)
478 495 _pushbookmark(pushop)
479 496
480 497 if repo.ui.configbool(b'experimental', b'remotenames'):
481 498 logexchange.pullremotenames(repo, remote)
482 499
483 500 return pushop
484 501
485 502
486 503 # list of steps to perform discovery before push
487 504 pushdiscoveryorder = []
488 505
489 506 # Mapping between step name and function
490 507 #
491 508 # This exists to help extensions wrap steps if necessary
492 509 pushdiscoverymapping = {}
493 510
494 511
495 512 def pushdiscovery(stepname):
496 513 """decorator for function performing discovery before push
497 514
498 515 The function is added to the step -> function mapping and appended to the
499 516 list of steps. Beware that decorated function will be added in order (this
500 517 may matter).
501 518
502 519 You can only use this decorator for a new step, if you want to wrap a step
503 520 from an extension, change the pushdiscovery dictionary directly."""
504 521
505 522 def dec(func):
506 523 assert stepname not in pushdiscoverymapping
507 524 pushdiscoverymapping[stepname] = func
508 525 pushdiscoveryorder.append(stepname)
509 526 return func
510 527
511 528 return dec
512 529
513 530
514 531 def _pushdiscovery(pushop):
515 532 """Run all discovery steps"""
516 533 for stepname in pushdiscoveryorder:
517 534 step = pushdiscoverymapping[stepname]
518 535 step(pushop)
519 536
520 537
521 538 def _checksubrepostate(pushop):
522 539 """Ensure all outgoing referenced subrepo revisions are present locally"""
523 540
524 541 repo = pushop.repo
525 542
526 543 # If the repository does not use subrepos, skip the expensive
527 544 # manifest checks.
528 545 if not len(repo.file(b'.hgsub')) or not len(repo.file(b'.hgsubstate')):
529 546 return
530 547
531 548 for n in pushop.outgoing.missing:
532 549 ctx = repo[n]
533 550
534 551 if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files():
535 552 for subpath in sorted(ctx.substate):
536 553 sub = ctx.sub(subpath)
537 554 sub.verify(onpush=True)
538 555
539 556
540 557 @pushdiscovery(b'changeset')
541 558 def _pushdiscoverychangeset(pushop):
542 559 """discover the changeset that need to be pushed"""
543 560 fci = discovery.findcommonincoming
544 561 if pushop.revs:
545 562 commoninc = fci(
546 563 pushop.repo,
547 564 pushop.remote,
548 565 force=pushop.force,
549 566 ancestorsof=pushop.revs,
550 567 )
551 568 else:
552 569 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
553 570 common, inc, remoteheads = commoninc
554 571 fco = discovery.findcommonoutgoing
555 572 outgoing = fco(
556 573 pushop.repo,
557 574 pushop.remote,
558 575 onlyheads=pushop.revs,
559 576 commoninc=commoninc,
560 577 force=pushop.force,
561 578 )
562 579 pushop.outgoing = outgoing
563 580 pushop.remoteheads = remoteheads
564 581 pushop.incoming = inc
565 582
566 583
567 584 @pushdiscovery(b'phase')
568 585 def _pushdiscoveryphase(pushop):
569 586 """discover the phase that needs to be pushed
570 587
571 588 (computed for both success and failure case for changesets push)"""
572 589 outgoing = pushop.outgoing
573 590 unfi = pushop.repo.unfiltered()
574 591 remotephases = listkeys(pushop.remote, b'phases')
575 592
576 593 if (
577 594 pushop.ui.configbool(b'ui', b'_usedassubrepo')
578 595 and remotephases # server supports phases
579 596 and not pushop.outgoing.missing # no changesets to be pushed
580 597 and remotephases.get(b'publishing', False)
581 598 ):
582 599 # When:
583 600 # - this is a subrepo push
584 601 # - and remote support phase
585 602 # - and no changeset are to be pushed
586 603 # - and remote is publishing
587 604 # We may be in issue 3781 case!
588 605 # We drop the possible phase synchronisation done by
589 606 # courtesy to publish changesets possibly locally draft
590 607 # on the remote.
591 608 pushop.outdatedphases = []
592 609 pushop.fallbackoutdatedphases = []
593 610 return
594 611
595 612 pushop.remotephases = phases.remotephasessummary(
596 613 pushop.repo, pushop.fallbackheads, remotephases
597 614 )
598 615 droots = pushop.remotephases.draftroots
599 616
600 617 extracond = b''
601 618 if not pushop.remotephases.publishing:
602 619 extracond = b' and public()'
603 620 revset = b'heads((%%ln::%%ln) %s)' % extracond
604 621 # Get the list of all revs draft on remote by public here.
605 622 # XXX Beware that revset break if droots is not strictly
606 623 # XXX root we may want to ensure it is but it is costly
607 624 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
608 625 if not pushop.remotephases.publishing and pushop.publish:
609 626 future = list(
610 627 unfi.set(
611 628 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
612 629 )
613 630 )
614 631 elif not outgoing.missing:
615 632 future = fallback
616 633 else:
617 634 # adds changeset we are going to push as draft
618 635 #
619 636 # should not be necessary for publishing server, but because of an
620 637 # issue fixed in xxxxx we have to do it anyway.
621 638 fdroots = list(
622 639 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
623 640 )
624 641 fdroots = [f.node() for f in fdroots]
625 642 future = list(unfi.set(revset, fdroots, pushop.futureheads))
626 643 pushop.outdatedphases = future
627 644 pushop.fallbackoutdatedphases = fallback
628 645
629 646
630 647 @pushdiscovery(b'obsmarker')
631 648 def _pushdiscoveryobsmarkers(pushop):
632 649 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
633 650 return
634 651
635 652 if not pushop.repo.obsstore:
636 653 return
637 654
638 655 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
639 656 return
640 657
641 658 repo = pushop.repo
642 659 # very naive computation, that can be quite expensive on big repo.
643 660 # However: evolution is currently slow on them anyway.
644 661 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
645 662 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
646 663
647 664
648 665 @pushdiscovery(b'bookmarks')
649 666 def _pushdiscoverybookmarks(pushop):
650 667 ui = pushop.ui
651 668 repo = pushop.repo.unfiltered()
652 669 remote = pushop.remote
653 670 ui.debug(b"checking for updated bookmarks\n")
654 671 ancestors = ()
655 672 if pushop.revs:
656 673 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
657 674 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
658 675
659 676 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
660 677
661 678 explicit = {
662 679 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
663 680 }
664 681
665 682 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
666 683 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
667 684
668 685
669 686 def _processcompared(pushop, pushed, explicit, remotebms, comp):
670 687 """take decision on bookmarks to push to the remote repo
671 688
672 689 Exists to help extensions alter this behavior.
673 690 """
674 691 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
675 692
676 693 repo = pushop.repo
677 694
678 695 for b, scid, dcid in advsrc:
679 696 if b in explicit:
680 697 explicit.remove(b)
681 698 if not pushed or repo[scid].rev() in pushed:
682 699 pushop.outbookmarks.append((b, dcid, scid))
683 700 # search added bookmark
684 701 for b, scid, dcid in addsrc:
685 702 if b in explicit:
686 703 explicit.remove(b)
687 704 if bookmod.isdivergent(b):
688 705 pushop.ui.warn(_(b'cannot push divergent bookmark %s!\n') % b)
689 706 pushop.bkresult = 2
690 707 else:
691 708 pushop.outbookmarks.append((b, b'', scid))
692 709 # search for overwritten bookmark
693 710 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
694 711 if b in explicit:
695 712 explicit.remove(b)
696 713 pushop.outbookmarks.append((b, dcid, scid))
697 714 # search for bookmark to delete
698 715 for b, scid, dcid in adddst:
699 716 if b in explicit:
700 717 explicit.remove(b)
701 718 # treat as "deleted locally"
702 719 pushop.outbookmarks.append((b, dcid, b''))
703 720 # identical bookmarks shouldn't get reported
704 721 for b, scid, dcid in same:
705 722 if b in explicit:
706 723 explicit.remove(b)
707 724
708 725 if explicit:
709 726 explicit = sorted(explicit)
710 727 # we should probably list all of them
711 728 pushop.ui.warn(
712 729 _(
713 730 b'bookmark %s does not exist on the local '
714 731 b'or remote repository!\n'
715 732 )
716 733 % explicit[0]
717 734 )
718 735 pushop.bkresult = 2
719 736
720 737 pushop.outbookmarks.sort()
721 738
722 739
723 740 def _pushcheckoutgoing(pushop):
724 741 outgoing = pushop.outgoing
725 742 unfi = pushop.repo.unfiltered()
726 743 if not outgoing.missing:
727 744 # nothing to push
728 745 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
729 746 return False
730 747 # something to push
731 748 if not pushop.force:
732 749 # if repo.obsstore == False --> no obsolete
733 750 # then, save the iteration
734 751 if unfi.obsstore:
735 752 # this message are here for 80 char limit reason
736 753 mso = _(b"push includes obsolete changeset: %s!")
737 754 mspd = _(b"push includes phase-divergent changeset: %s!")
738 755 mscd = _(b"push includes content-divergent changeset: %s!")
739 756 mst = {
740 757 b"orphan": _(b"push includes orphan changeset: %s!"),
741 758 b"phase-divergent": mspd,
742 759 b"content-divergent": mscd,
743 760 }
744 761 # If we are to push if there is at least one
745 762 # obsolete or unstable changeset in missing, at
746 763 # least one of the missinghead will be obsolete or
747 764 # unstable. So checking heads only is ok
748 765 for node in outgoing.ancestorsof:
749 766 ctx = unfi[node]
750 767 if ctx.obsolete():
751 768 raise error.Abort(mso % ctx)
752 769 elif ctx.isunstable():
753 770 # TODO print more than one instability in the abort
754 771 # message
755 772 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
756 773
757 774 discovery.checkheads(pushop)
758 775 return True
759 776
760 777
761 778 # List of names of steps to perform for an outgoing bundle2, order matters.
762 779 b2partsgenorder = []
763 780
764 781 # Mapping between step name and function
765 782 #
766 783 # This exists to help extensions wrap steps if necessary
767 784 b2partsgenmapping = {}
768 785
769 786
770 787 def b2partsgenerator(stepname, idx=None):
771 788 """decorator for function generating bundle2 part
772 789
773 790 The function is added to the step -> function mapping and appended to the
774 791 list of steps. Beware that decorated functions will be added in order
775 792 (this may matter).
776 793
777 794 You can only use this decorator for new steps, if you want to wrap a step
778 795 from an extension, attack the b2partsgenmapping dictionary directly."""
779 796
780 797 def dec(func):
781 798 assert stepname not in b2partsgenmapping
782 799 b2partsgenmapping[stepname] = func
783 800 if idx is None:
784 801 b2partsgenorder.append(stepname)
785 802 else:
786 803 b2partsgenorder.insert(idx, stepname)
787 804 return func
788 805
789 806 return dec
790 807
791 808
792 809 def _pushb2ctxcheckheads(pushop, bundler):
793 810 """Generate race condition checking parts
794 811
795 812 Exists as an independent function to aid extensions
796 813 """
797 814 # * 'force' do not check for push race,
798 815 # * if we don't push anything, there are nothing to check.
799 816 if not pushop.force and pushop.outgoing.ancestorsof:
800 817 allowunrelated = b'related' in bundler.capabilities.get(
801 818 b'checkheads', ()
802 819 )
803 820 emptyremote = pushop.pushbranchmap is None
804 821 if not allowunrelated or emptyremote:
805 822 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
806 823 else:
807 824 affected = set()
808 825 for branch, heads in pushop.pushbranchmap.items():
809 826 remoteheads, newheads, unsyncedheads, discardedheads = heads
810 827 if remoteheads is not None:
811 828 remote = set(remoteheads)
812 829 affected |= set(discardedheads) & remote
813 830 affected |= remote - set(newheads)
814 831 if affected:
815 832 data = iter(sorted(affected))
816 833 bundler.newpart(b'check:updated-heads', data=data)
817 834
818 835
819 836 def _pushing(pushop):
820 837 """return True if we are pushing anything"""
821 838 return bool(
822 839 pushop.outgoing.missing
823 840 or pushop.outdatedphases
824 841 or pushop.outobsmarkers
825 842 or pushop.outbookmarks
826 843 )
827 844
828 845
829 846 @b2partsgenerator(b'check-bookmarks')
830 847 def _pushb2checkbookmarks(pushop, bundler):
831 848 """insert bookmark move checking"""
832 849 if not _pushing(pushop) or pushop.force:
833 850 return
834 851 b2caps = bundle2.bundle2caps(pushop.remote)
835 852 hasbookmarkcheck = b'bookmarks' in b2caps
836 853 if not (pushop.outbookmarks and hasbookmarkcheck):
837 854 return
838 855 data = []
839 856 for book, old, new in pushop.outbookmarks:
840 857 data.append((book, old))
841 858 checkdata = bookmod.binaryencode(pushop.repo, data)
842 859 bundler.newpart(b'check:bookmarks', data=checkdata)
843 860
844 861
845 862 @b2partsgenerator(b'check-phases')
846 863 def _pushb2checkphases(pushop, bundler):
847 864 """insert phase move checking"""
848 865 if not _pushing(pushop) or pushop.force:
849 866 return
850 867 b2caps = bundle2.bundle2caps(pushop.remote)
851 868 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
852 869 if pushop.remotephases is not None and hasphaseheads:
853 870 # check that the remote phase has not changed
854 871 checks = {p: [] for p in phases.allphases}
855 872 checks[phases.public].extend(pushop.remotephases.publicheads)
856 873 checks[phases.draft].extend(pushop.remotephases.draftroots)
857 874 if any(checks.values()):
858 875 for phase in checks:
859 876 checks[phase].sort()
860 877 checkdata = phases.binaryencode(checks)
861 878 bundler.newpart(b'check:phases', data=checkdata)
862 879
863 880
864 881 @b2partsgenerator(b'changeset')
865 882 def _pushb2ctx(pushop, bundler):
866 883 """handle changegroup push through bundle2
867 884
868 885 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
869 886 """
870 887 if b'changesets' in pushop.stepsdone:
871 888 return
872 889 pushop.stepsdone.add(b'changesets')
873 890 # Send known heads to the server for race detection.
874 891 if not _pushcheckoutgoing(pushop):
875 892 return
876 893 pushop.repo.prepushoutgoinghooks(pushop)
877 894
878 895 _pushb2ctxcheckheads(pushop, bundler)
879 896
880 897 b2caps = bundle2.bundle2caps(pushop.remote)
881 898 version = b'01'
882 899 cgversions = b2caps.get(b'changegroup')
883 900 if cgversions: # 3.1 and 3.2 ship with an empty value
884 901 cgversions = [
885 902 v
886 903 for v in cgversions
887 904 if v in changegroup.supportedoutgoingversions(pushop.repo)
888 905 ]
889 906 if not cgversions:
890 907 raise error.Abort(_(b'no common changegroup version'))
891 908 version = max(cgversions)
892 909
893 910 remote_sidedata = bundle2.read_remote_wanted_sidedata(pushop.remote)
894 911 cgstream = changegroup.makestream(
895 912 pushop.repo,
896 913 pushop.outgoing,
897 914 version,
898 915 b'push',
899 916 bundlecaps=b2caps,
900 917 remote_sidedata=remote_sidedata,
901 918 )
902 919 cgpart = bundler.newpart(b'changegroup', data=cgstream)
903 920 if cgversions:
904 921 cgpart.addparam(b'version', version)
905 922 if scmutil.istreemanifest(pushop.repo):
906 923 cgpart.addparam(b'treemanifest', b'1')
907 924 if repository.REPO_FEATURE_SIDE_DATA in pushop.repo.features:
908 925 cgpart.addparam(b'exp-sidedata', b'1')
909 926
910 927 def handlereply(op):
911 928 """extract addchangegroup returns from server reply"""
912 929 cgreplies = op.records.getreplies(cgpart.id)
913 930 assert len(cgreplies[b'changegroup']) == 1
914 931 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
915 932
916 933 return handlereply
917 934
918 935
919 936 @b2partsgenerator(b'phase')
920 937 def _pushb2phases(pushop, bundler):
921 938 """handle phase push through bundle2"""
922 939 if b'phases' in pushop.stepsdone:
923 940 return
924 941 b2caps = bundle2.bundle2caps(pushop.remote)
925 942 ui = pushop.repo.ui
926 943
927 944 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
928 945 haspushkey = b'pushkey' in b2caps
929 946 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
930 947
931 948 if hasphaseheads and not legacyphase:
932 949 return _pushb2phaseheads(pushop, bundler)
933 950 elif haspushkey:
934 951 return _pushb2phasespushkey(pushop, bundler)
935 952
936 953
937 954 def _pushb2phaseheads(pushop, bundler):
938 955 """push phase information through a bundle2 - binary part"""
939 956 pushop.stepsdone.add(b'phases')
940 957 if pushop.outdatedphases:
941 958 updates = {p: [] for p in phases.allphases}
942 959 updates[0].extend(h.node() for h in pushop.outdatedphases)
943 960 phasedata = phases.binaryencode(updates)
944 961 bundler.newpart(b'phase-heads', data=phasedata)
945 962
946 963
947 964 def _pushb2phasespushkey(pushop, bundler):
948 965 """push phase information through a bundle2 - pushkey part"""
949 966 pushop.stepsdone.add(b'phases')
950 967 part2node = []
951 968
952 969 def handlefailure(pushop, exc):
953 970 targetid = int(exc.partid)
954 971 for partid, node in part2node:
955 972 if partid == targetid:
956 973 raise error.Abort(_(b'updating %s to public failed') % node)
957 974
958 975 enc = pushkey.encode
959 976 for newremotehead in pushop.outdatedphases:
960 977 part = bundler.newpart(b'pushkey')
961 978 part.addparam(b'namespace', enc(b'phases'))
962 979 part.addparam(b'key', enc(newremotehead.hex()))
963 980 part.addparam(b'old', enc(b'%d' % phases.draft))
964 981 part.addparam(b'new', enc(b'%d' % phases.public))
965 982 part2node.append((part.id, newremotehead))
966 983 pushop.pkfailcb[part.id] = handlefailure
967 984
968 985 def handlereply(op):
969 986 for partid, node in part2node:
970 987 partrep = op.records.getreplies(partid)
971 988 results = partrep[b'pushkey']
972 989 assert len(results) <= 1
973 990 msg = None
974 991 if not results:
975 992 msg = _(b'server ignored update of %s to public!\n') % node
976 993 elif not int(results[0][b'return']):
977 994 msg = _(b'updating %s to public failed!\n') % node
978 995 if msg is not None:
979 996 pushop.ui.warn(msg)
980 997
981 998 return handlereply
982 999
983 1000
984 1001 @b2partsgenerator(b'obsmarkers')
985 1002 def _pushb2obsmarkers(pushop, bundler):
986 1003 if b'obsmarkers' in pushop.stepsdone:
987 1004 return
988 1005 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
989 1006 if obsolete.commonversion(remoteversions) is None:
990 1007 return
991 1008 pushop.stepsdone.add(b'obsmarkers')
992 1009 if pushop.outobsmarkers:
993 1010 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
994 1011 bundle2.buildobsmarkerspart(bundler, markers)
995 1012
996 1013
997 1014 @b2partsgenerator(b'bookmarks')
998 1015 def _pushb2bookmarks(pushop, bundler):
999 1016 """handle bookmark push through bundle2"""
1000 1017 if b'bookmarks' in pushop.stepsdone:
1001 1018 return
1002 1019 b2caps = bundle2.bundle2caps(pushop.remote)
1003 1020
1004 1021 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1005 1022 legacybooks = b'bookmarks' in legacy
1006 1023
1007 1024 if not legacybooks and b'bookmarks' in b2caps:
1008 1025 return _pushb2bookmarkspart(pushop, bundler)
1009 1026 elif b'pushkey' in b2caps:
1010 1027 return _pushb2bookmarkspushkey(pushop, bundler)
1011 1028
1012 1029
1013 1030 def _bmaction(old, new):
1014 1031 """small utility for bookmark pushing"""
1015 1032 if not old:
1016 1033 return b'export'
1017 1034 elif not new:
1018 1035 return b'delete'
1019 1036 return b'update'
1020 1037
1021 1038
1022 1039 def _abortonsecretctx(pushop, node, b):
1023 1040 """abort if a given bookmark points to a secret changeset"""
1024 1041 if node and pushop.repo[node].phase() == phases.secret:
1025 1042 raise error.Abort(
1026 1043 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1027 1044 )
1028 1045
1029 1046
1030 1047 def _pushb2bookmarkspart(pushop, bundler):
1031 1048 pushop.stepsdone.add(b'bookmarks')
1032 1049 if not pushop.outbookmarks:
1033 1050 return
1034 1051
1035 1052 allactions = []
1036 1053 data = []
1037 1054 for book, old, new in pushop.outbookmarks:
1038 1055 _abortonsecretctx(pushop, new, book)
1039 1056 data.append((book, new))
1040 1057 allactions.append((book, _bmaction(old, new)))
1041 1058 checkdata = bookmod.binaryencode(pushop.repo, data)
1042 1059 bundler.newpart(b'bookmarks', data=checkdata)
1043 1060
1044 1061 def handlereply(op):
1045 1062 ui = pushop.ui
1046 1063 # if success
1047 1064 for book, action in allactions:
1048 1065 ui.status(bookmsgmap[action][0] % book)
1049 1066
1050 1067 return handlereply
1051 1068
1052 1069
1053 1070 def _pushb2bookmarkspushkey(pushop, bundler):
1054 1071 pushop.stepsdone.add(b'bookmarks')
1055 1072 part2book = []
1056 1073 enc = pushkey.encode
1057 1074
1058 1075 def handlefailure(pushop, exc):
1059 1076 targetid = int(exc.partid)
1060 1077 for partid, book, action in part2book:
1061 1078 if partid == targetid:
1062 1079 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1063 1080 # we should not be called for part we did not generated
1064 1081 assert False
1065 1082
1066 1083 for book, old, new in pushop.outbookmarks:
1067 1084 _abortonsecretctx(pushop, new, book)
1068 1085 part = bundler.newpart(b'pushkey')
1069 1086 part.addparam(b'namespace', enc(b'bookmarks'))
1070 1087 part.addparam(b'key', enc(book))
1071 1088 part.addparam(b'old', enc(hex(old)))
1072 1089 part.addparam(b'new', enc(hex(new)))
1073 1090 action = b'update'
1074 1091 if not old:
1075 1092 action = b'export'
1076 1093 elif not new:
1077 1094 action = b'delete'
1078 1095 part2book.append((part.id, book, action))
1079 1096 pushop.pkfailcb[part.id] = handlefailure
1080 1097
1081 1098 def handlereply(op):
1082 1099 ui = pushop.ui
1083 1100 for partid, book, action in part2book:
1084 1101 partrep = op.records.getreplies(partid)
1085 1102 results = partrep[b'pushkey']
1086 1103 assert len(results) <= 1
1087 1104 if not results:
1088 1105 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1089 1106 else:
1090 1107 ret = int(results[0][b'return'])
1091 1108 if ret:
1092 1109 ui.status(bookmsgmap[action][0] % book)
1093 1110 else:
1094 1111 ui.warn(bookmsgmap[action][1] % book)
1095 1112 if pushop.bkresult is not None:
1096 1113 pushop.bkresult = 1
1097 1114
1098 1115 return handlereply
1099 1116
1100 1117
1101 1118 @b2partsgenerator(b'pushvars', idx=0)
1102 1119 def _getbundlesendvars(pushop, bundler):
1103 1120 '''send shellvars via bundle2'''
1104 1121 pushvars = pushop.pushvars
1105 1122 if pushvars:
1106 1123 shellvars = {}
1107 1124 for raw in pushvars:
1108 1125 if b'=' not in raw:
1109 1126 msg = (
1110 1127 b"unable to parse variable '%s', should follow "
1111 1128 b"'KEY=VALUE' or 'KEY=' format"
1112 1129 )
1113 1130 raise error.Abort(msg % raw)
1114 1131 k, v = raw.split(b'=', 1)
1115 1132 shellvars[k] = v
1116 1133
1117 1134 part = bundler.newpart(b'pushvars')
1118 1135
1119 1136 for key, value in shellvars.items():
1120 1137 part.addparam(key, value, mandatory=False)
1121 1138
1122 1139
1123 1140 def _pushbundle2(pushop):
1124 1141 """push data to the remote using bundle2
1125 1142
1126 1143 The only currently supported type of data is changegroup but this will
1127 1144 evolve in the future."""
1128 1145 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1129 1146 pushback = pushop.trmanager and pushop.ui.configbool(
1130 1147 b'experimental', b'bundle2.pushback'
1131 1148 )
1132 1149
1133 1150 # create reply capability
1134 1151 capsblob = bundle2.encodecaps(
1135 1152 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1136 1153 )
1137 1154 bundler.newpart(b'replycaps', data=capsblob)
1138 1155 replyhandlers = []
1139 1156 for partgenname in b2partsgenorder:
1140 1157 partgen = b2partsgenmapping[partgenname]
1141 1158 ret = partgen(pushop, bundler)
1142 1159 if callable(ret):
1143 1160 replyhandlers.append(ret)
1144 1161 # do not push if nothing to push
1145 1162 if bundler.nbparts <= 1:
1146 1163 return
1147 1164 stream = util.chunkbuffer(bundler.getchunks())
1148 1165 try:
1149 1166 try:
1150 1167 with pushop.remote.commandexecutor() as e:
1151 1168 reply = e.callcommand(
1152 1169 b'unbundle',
1153 1170 {
1154 1171 b'bundle': stream,
1155 1172 b'heads': [b'force'],
1156 1173 b'url': pushop.remote.url(),
1157 1174 },
1158 1175 ).result()
1159 1176 except error.BundleValueError as exc:
1160 1177 raise error.RemoteError(_(b'missing support for %s') % exc)
1161 1178 try:
1162 1179 trgetter = None
1163 1180 if pushback:
1164 1181 trgetter = pushop.trmanager.transaction
1165 1182 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1166 1183 except error.BundleValueError as exc:
1167 1184 raise error.RemoteError(_(b'missing support for %s') % exc)
1168 1185 except bundle2.AbortFromPart as exc:
1169 1186 pushop.ui.error(_(b'remote: %s\n') % exc)
1170 1187 if exc.hint is not None:
1171 1188 pushop.ui.error(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1172 1189 raise error.RemoteError(_(b'push failed on remote'))
1173 1190 except error.PushkeyFailed as exc:
1174 1191 partid = int(exc.partid)
1175 1192 if partid not in pushop.pkfailcb:
1176 1193 raise
1177 1194 pushop.pkfailcb[partid](pushop, exc)
1178 1195 for rephand in replyhandlers:
1179 1196 rephand(op)
1180 1197
1181 1198
1182 1199 def _pushchangeset(pushop):
1183 1200 """Make the actual push of changeset bundle to remote repo"""
1184 1201 if b'changesets' in pushop.stepsdone:
1185 1202 return
1186 1203 pushop.stepsdone.add(b'changesets')
1187 1204 if not _pushcheckoutgoing(pushop):
1188 1205 return
1189 1206
1190 1207 # Should have verified this in push().
1191 1208 assert pushop.remote.capable(b'unbundle')
1192 1209
1193 1210 pushop.repo.prepushoutgoinghooks(pushop)
1194 1211 outgoing = pushop.outgoing
1195 1212 # TODO: get bundlecaps from remote
1196 1213 bundlecaps = None
1197 1214 # create a changegroup from local
1198 1215 if pushop.revs is None and not (
1199 1216 outgoing.excluded or pushop.repo.changelog.filteredrevs
1200 1217 ):
1201 1218 # push everything,
1202 1219 # use the fast path, no race possible on push
1203 1220 cg = changegroup.makechangegroup(
1204 1221 pushop.repo,
1205 1222 outgoing,
1206 1223 b'01',
1207 1224 b'push',
1208 1225 fastpath=True,
1209 1226 bundlecaps=bundlecaps,
1210 1227 )
1211 1228 else:
1212 1229 cg = changegroup.makechangegroup(
1213 1230 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1214 1231 )
1215 1232
1216 1233 # apply changegroup to remote
1217 1234 # local repo finds heads on server, finds out what
1218 1235 # revs it must push. once revs transferred, if server
1219 1236 # finds it has different heads (someone else won
1220 1237 # commit/push race), server aborts.
1221 1238 if pushop.force:
1222 1239 remoteheads = [b'force']
1223 1240 else:
1224 1241 remoteheads = pushop.remoteheads
1225 1242 # ssh: return remote's addchangegroup()
1226 1243 # http: return remote's addchangegroup() or 0 for error
1227 1244 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1228 1245
1229 1246
1230 1247 def _pushsyncphase(pushop):
1231 1248 """synchronise phase information locally and remotely"""
1232 1249 cheads = pushop.commonheads
1233 1250 # even when we don't push, exchanging phase data is useful
1234 1251 remotephases = listkeys(pushop.remote, b'phases')
1235 1252 if (
1236 1253 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1237 1254 and remotephases # server supports phases
1238 1255 and pushop.cgresult is None # nothing was pushed
1239 1256 and remotephases.get(b'publishing', False)
1240 1257 ):
1241 1258 # When:
1242 1259 # - this is a subrepo push
1243 1260 # - and remote support phase
1244 1261 # - and no changeset was pushed
1245 1262 # - and remote is publishing
1246 1263 # We may be in issue 3871 case!
1247 1264 # We drop the possible phase synchronisation done by
1248 1265 # courtesy to publish changesets possibly locally draft
1249 1266 # on the remote.
1250 1267 remotephases = {b'publishing': b'True'}
1251 1268 if not remotephases: # old server or public only reply from non-publishing
1252 1269 _localphasemove(pushop, cheads)
1253 1270 # don't push any phase data as there is nothing to push
1254 1271 else:
1255 1272 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1256 1273 pheads, droots = ana
1257 1274 ### Apply remote phase on local
1258 1275 if remotephases.get(b'publishing', False):
1259 1276 _localphasemove(pushop, cheads)
1260 1277 else: # publish = False
1261 1278 _localphasemove(pushop, pheads)
1262 1279 _localphasemove(pushop, cheads, phases.draft)
1263 1280 ### Apply local phase on remote
1264 1281
1265 1282 if pushop.cgresult:
1266 1283 if b'phases' in pushop.stepsdone:
1267 1284 # phases already pushed though bundle2
1268 1285 return
1269 1286 outdated = pushop.outdatedphases
1270 1287 else:
1271 1288 outdated = pushop.fallbackoutdatedphases
1272 1289
1273 1290 pushop.stepsdone.add(b'phases')
1274 1291
1275 1292 # filter heads already turned public by the push
1276 1293 outdated = [c for c in outdated if c.node() not in pheads]
1277 1294 # fallback to independent pushkey command
1278 1295 for newremotehead in outdated:
1279 1296 with pushop.remote.commandexecutor() as e:
1280 1297 r = e.callcommand(
1281 1298 b'pushkey',
1282 1299 {
1283 1300 b'namespace': b'phases',
1284 1301 b'key': newremotehead.hex(),
1285 1302 b'old': b'%d' % phases.draft,
1286 1303 b'new': b'%d' % phases.public,
1287 1304 },
1288 1305 ).result()
1289 1306
1290 1307 if not r:
1291 1308 pushop.ui.warn(
1292 1309 _(b'updating %s to public failed!\n') % newremotehead
1293 1310 )
1294 1311
1295 1312
1296 1313 def _localphasemove(pushop, nodes, phase=phases.public):
1297 1314 """move <nodes> to <phase> in the local source repo"""
1298 1315 if pushop.trmanager:
1299 1316 phases.advanceboundary(
1300 1317 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1301 1318 )
1302 1319 else:
1303 1320 # repo is not locked, do not change any phases!
1304 1321 # Informs the user that phases should have been moved when
1305 1322 # applicable.
1306 1323 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1307 1324 phasestr = phases.phasenames[phase]
1308 1325 if actualmoves:
1309 1326 pushop.ui.status(
1310 1327 _(
1311 1328 b'cannot lock source repo, skipping '
1312 1329 b'local %s phase update\n'
1313 1330 )
1314 1331 % phasestr
1315 1332 )
1316 1333
1317 1334
1318 1335 def _pushobsolete(pushop):
1319 1336 """utility function to push obsolete markers to a remote"""
1320 1337 if b'obsmarkers' in pushop.stepsdone:
1321 1338 return
1322 1339 repo = pushop.repo
1323 1340 remote = pushop.remote
1324 1341 pushop.stepsdone.add(b'obsmarkers')
1325 1342 if pushop.outobsmarkers:
1326 1343 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1327 1344 rslts = []
1328 1345 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1329 1346 remotedata = obsolete._pushkeyescape(markers)
1330 1347 for key in sorted(remotedata, reverse=True):
1331 1348 # reverse sort to ensure we end with dump0
1332 1349 data = remotedata[key]
1333 1350 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1334 1351 if [r for r in rslts if not r]:
1335 1352 msg = _(b'failed to push some obsolete markers!\n')
1336 1353 repo.ui.warn(msg)
1337 1354
1338 1355
1339 1356 def _pushbookmark(pushop):
1340 1357 """Update bookmark position on remote"""
1341 1358 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1342 1359 return
1343 1360 pushop.stepsdone.add(b'bookmarks')
1344 1361 ui = pushop.ui
1345 1362 remote = pushop.remote
1346 1363
1347 1364 for b, old, new in pushop.outbookmarks:
1348 1365 action = b'update'
1349 1366 if not old:
1350 1367 action = b'export'
1351 1368 elif not new:
1352 1369 action = b'delete'
1353 1370
1354 1371 with remote.commandexecutor() as e:
1355 1372 r = e.callcommand(
1356 1373 b'pushkey',
1357 1374 {
1358 1375 b'namespace': b'bookmarks',
1359 1376 b'key': b,
1360 1377 b'old': hex(old),
1361 1378 b'new': hex(new),
1362 1379 },
1363 1380 ).result()
1364 1381
1365 1382 if r:
1366 1383 ui.status(bookmsgmap[action][0] % b)
1367 1384 else:
1368 1385 ui.warn(bookmsgmap[action][1] % b)
1369 1386 # discovery can have set the value form invalid entry
1370 1387 if pushop.bkresult is not None:
1371 1388 pushop.bkresult = 1
1372 1389
1373 1390
1374 1391 class pulloperation:
1375 1392 """A object that represent a single pull operation
1376 1393
1377 1394 It purpose is to carry pull related state and very common operation.
1378 1395
1379 1396 A new should be created at the beginning of each pull and discarded
1380 1397 afterward.
1381 1398 """
1382 1399
1383 1400 def __init__(
1384 1401 self,
1385 1402 repo,
1386 1403 remote,
1387 1404 heads=None,
1388 1405 force=False,
1389 1406 bookmarks=(),
1390 1407 remotebookmarks=None,
1391 1408 streamclonerequested=None,
1392 1409 includepats=None,
1393 1410 excludepats=None,
1394 1411 depth=None,
1395 1412 path=None,
1396 1413 ):
1397 1414 # repo we pull into
1398 1415 self.repo = repo
1399 1416 # repo we pull from
1400 1417 self.remote = remote
1401 1418 # path object used to build this remote
1402 1419 #
1403 1420 # Ideally, the remote peer would carry that directly.
1404 1421 self.remote_path = path
1405 1422 # revision we try to pull (None is "all")
1406 1423 self.heads = heads
1407 1424 # bookmark pulled explicitly
1408 1425 self.explicitbookmarks = [
1409 1426 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1410 1427 ]
1411 1428 # do we force pull?
1412 1429 self.force = force
1413 1430 # whether a streaming clone was requested
1414 1431 self.streamclonerequested = streamclonerequested
1415 1432 # transaction manager
1416 1433 self.trmanager = None
1417 1434 # set of common changeset between local and remote before pull
1418 1435 self.common = None
1419 1436 # set of pulled head
1420 1437 self.rheads = None
1421 1438 # list of missing changeset to fetch remotely
1422 1439 self.fetch = None
1423 1440 # remote bookmarks data
1424 1441 self.remotebookmarks = remotebookmarks
1425 1442 # result of changegroup pulling (used as return code by pull)
1426 1443 self.cgresult = None
1427 1444 # list of step already done
1428 1445 self.stepsdone = set()
1429 1446 # Whether we attempted a clone from pre-generated bundles.
1430 1447 self.clonebundleattempted = False
1431 1448 # Set of file patterns to include.
1432 1449 self.includepats = includepats
1433 1450 # Set of file patterns to exclude.
1434 1451 self.excludepats = excludepats
1435 1452 # Number of ancestor changesets to pull from each pulled head.
1436 1453 self.depth = depth
1437 1454
1438 1455 @util.propertycache
1439 1456 def pulledsubset(self):
1440 1457 """heads of the set of changeset target by the pull"""
1441 1458 # compute target subset
1442 1459 if self.heads is None:
1443 1460 # We pulled every thing possible
1444 1461 # sync on everything common
1445 1462 c = set(self.common)
1446 1463 ret = list(self.common)
1447 1464 for n in self.rheads:
1448 1465 if n not in c:
1449 1466 ret.append(n)
1450 1467 return ret
1451 1468 else:
1452 1469 # We pulled a specific subset
1453 1470 # sync on this subset
1454 1471 return self.heads
1455 1472
1456 1473 @util.propertycache
1457 1474 def canusebundle2(self):
1458 1475 return not _forcebundle1(self)
1459 1476
1460 1477 @util.propertycache
1461 1478 def remotebundle2caps(self):
1462 1479 return bundle2.bundle2caps(self.remote)
1463 1480
1464 1481 def gettransaction(self):
1465 1482 # deprecated; talk to trmanager directly
1466 1483 return self.trmanager.transaction()
1467 1484
1468 1485
1469 1486 class transactionmanager(util.transactional):
1470 1487 """An object to manage the life cycle of a transaction
1471 1488
1472 1489 It creates the transaction on demand and calls the appropriate hooks when
1473 1490 closing the transaction."""
1474 1491
1475 1492 def __init__(self, repo, source, url):
1476 1493 self.repo = repo
1477 1494 self.source = source
1478 1495 self.url = url
1479 1496 self._tr = None
1480 1497
1481 1498 def transaction(self):
1482 1499 """Return an open transaction object, constructing if necessary"""
1483 1500 if not self._tr:
1484 1501 trname = b'%s\n%s' % (self.source, urlutil.hidepassword(self.url))
1485 1502 self._tr = self.repo.transaction(trname)
1486 1503 self._tr.hookargs[b'source'] = self.source
1487 1504 self._tr.hookargs[b'url'] = self.url
1488 1505 return self._tr
1489 1506
1490 1507 def close(self):
1491 1508 """close transaction if created"""
1492 1509 if self._tr is not None:
1493 1510 self._tr.close()
1494 1511
1495 1512 def release(self):
1496 1513 """release transaction if created"""
1497 1514 if self._tr is not None:
1498 1515 self._tr.release()
1499 1516
1500 1517
1501 1518 def listkeys(remote, namespace):
1502 1519 with remote.commandexecutor() as e:
1503 1520 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1504 1521
1505 1522
1506 1523 def _fullpullbundle2(repo, pullop):
1507 1524 # The server may send a partial reply, i.e. when inlining
1508 1525 # pre-computed bundles. In that case, update the common
1509 1526 # set based on the results and pull another bundle.
1510 1527 #
1511 1528 # There are two indicators that the process is finished:
1512 1529 # - no changeset has been added, or
1513 1530 # - all remote heads are known locally.
1514 1531 # The head check must use the unfiltered view as obsoletion
1515 1532 # markers can hide heads.
1516 1533 unfi = repo.unfiltered()
1517 1534 unficl = unfi.changelog
1518 1535
1519 1536 def headsofdiff(h1, h2):
1520 1537 """Returns heads(h1 % h2)"""
1521 1538 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1522 1539 return {ctx.node() for ctx in res}
1523 1540
1524 1541 def headsofunion(h1, h2):
1525 1542 """Returns heads((h1 + h2) - null)"""
1526 1543 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1527 1544 return {ctx.node() for ctx in res}
1528 1545
1529 1546 while True:
1530 1547 old_heads = unficl.heads()
1531 1548 clstart = len(unficl)
1532 1549 _pullbundle2(pullop)
1533 1550 if requirements.NARROW_REQUIREMENT in repo.requirements:
1534 1551 # XXX narrow clones filter the heads on the server side during
1535 1552 # XXX getbundle and result in partial replies as well.
1536 1553 # XXX Disable pull bundles in this case as band aid to avoid
1537 1554 # XXX extra round trips.
1538 1555 break
1539 1556 if clstart == len(unficl):
1540 1557 break
1541 1558 if all(unficl.hasnode(n) for n in pullop.rheads):
1542 1559 break
1543 1560 new_heads = headsofdiff(unficl.heads(), old_heads)
1544 1561 pullop.common = headsofunion(new_heads, pullop.common)
1545 1562 pullop.rheads = set(pullop.rheads) - pullop.common
1546 1563
1547 1564
1548 1565 def add_confirm_callback(repo, pullop):
1549 1566 """adds a finalize callback to transaction which can be used to show stats
1550 1567 to user and confirm the pull before committing transaction"""
1551 1568
1552 1569 tr = pullop.trmanager.transaction()
1553 1570 scmutil.registersummarycallback(
1554 1571 repo, tr, txnname=b'pull', as_validator=True
1555 1572 )
1556 1573 reporef = weakref.ref(repo.unfiltered())
1557 1574
1558 1575 def prompt(tr):
1559 1576 repo = reporef()
1560 1577 cm = _(b'accept incoming changes (yn)?$$ &Yes $$ &No')
1561 1578 if repo.ui.promptchoice(cm):
1562 1579 raise error.Abort(b"user aborted")
1563 1580
1564 1581 tr.addvalidator(b'900-pull-prompt', prompt)
1565 1582
1566 1583
1567 1584 def pull(
1568 1585 repo,
1569 1586 remote,
1570 1587 path=None,
1571 1588 heads=None,
1572 1589 force=False,
1573 1590 bookmarks=(),
1574 1591 opargs=None,
1575 1592 streamclonerequested=None,
1576 1593 includepats=None,
1577 1594 excludepats=None,
1578 1595 depth=None,
1579 1596 confirm=None,
1580 1597 ):
1581 1598 """Fetch repository data from a remote.
1582 1599
1583 1600 This is the main function used to retrieve data from a remote repository.
1584 1601
1585 1602 ``repo`` is the local repository to clone into.
1586 1603 ``remote`` is a peer instance.
1587 1604 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1588 1605 default) means to pull everything from the remote.
1589 1606 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1590 1607 default, all remote bookmarks are pulled.
1591 1608 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1592 1609 initialization.
1593 1610 ``streamclonerequested`` is a boolean indicating whether a "streaming
1594 1611 clone" is requested. A "streaming clone" is essentially a raw file copy
1595 1612 of revlogs from the server. This only works when the local repository is
1596 1613 empty. The default value of ``None`` means to respect the server
1597 1614 configuration for preferring stream clones.
1598 1615 ``includepats`` and ``excludepats`` define explicit file patterns to
1599 1616 include and exclude in storage, respectively. If not defined, narrow
1600 1617 patterns from the repo instance are used, if available.
1601 1618 ``depth`` is an integer indicating the DAG depth of history we're
1602 1619 interested in. If defined, for each revision specified in ``heads``, we
1603 1620 will fetch up to this many of its ancestors and data associated with them.
1604 1621 ``confirm`` is a boolean indicating whether the pull should be confirmed
1605 1622 before committing the transaction. This overrides HGPLAIN.
1606 1623
1607 1624 Returns the ``pulloperation`` created for this pull.
1608 1625 """
1609 1626 if opargs is None:
1610 1627 opargs = {}
1611 1628
1612 1629 # We allow the narrow patterns to be passed in explicitly to provide more
1613 1630 # flexibility for API consumers.
1614 1631 if includepats or excludepats:
1615 1632 includepats = includepats or set()
1616 1633 excludepats = excludepats or set()
1617 1634 else:
1618 1635 includepats, excludepats = repo.narrowpats
1619 1636
1620 1637 narrowspec.validatepatterns(includepats)
1621 1638 narrowspec.validatepatterns(excludepats)
1622 1639
1623 1640 pullop = pulloperation(
1624 1641 repo,
1625 1642 remote,
1626 1643 path=path,
1627 1644 heads=heads,
1628 1645 force=force,
1629 1646 bookmarks=bookmarks,
1630 1647 streamclonerequested=streamclonerequested,
1631 1648 includepats=includepats,
1632 1649 excludepats=excludepats,
1633 1650 depth=depth,
1634 1651 **pycompat.strkwargs(opargs)
1635 1652 )
1636 1653
1637 1654 peerlocal = pullop.remote.local()
1638 1655 if peerlocal:
1639 1656 missing = set(peerlocal.requirements) - pullop.repo.supported
1640 1657 if missing:
1641 1658 msg = _(
1642 1659 b"required features are not"
1643 1660 b" supported in the destination:"
1644 1661 b" %s"
1645 1662 ) % (b', '.join(sorted(missing)))
1646 1663 raise error.Abort(msg)
1647 1664
1648 1665 for category in repo._wanted_sidedata:
1649 1666 # Check that a computer is registered for that category for at least
1650 1667 # one revlog kind.
1651 1668 for kind, computers in repo._sidedata_computers.items():
1652 1669 if computers.get(category):
1653 1670 break
1654 1671 else:
1655 1672 # This should never happen since repos are supposed to be able to
1656 1673 # generate the sidedata they require.
1657 1674 raise error.ProgrammingError(
1658 1675 _(
1659 1676 b'sidedata category requested by local side without local'
1660 1677 b"support: '%s'"
1661 1678 )
1662 1679 % pycompat.bytestr(category)
1663 1680 )
1664 1681
1665 1682 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1666 1683 wlock = util.nullcontextmanager()
1667 1684 if not bookmod.bookmarksinstore(repo):
1668 1685 wlock = repo.wlock()
1669 1686 with wlock, repo.lock(), pullop.trmanager:
1670 1687 if confirm or (
1671 1688 repo.ui.configbool(b"pull", b"confirm") and not repo.ui.plain()
1672 1689 ):
1673 1690 add_confirm_callback(repo, pullop)
1674 1691
1675 1692 # This should ideally be in _pullbundle2(). However, it needs to run
1676 1693 # before discovery to avoid extra work.
1677 1694 _maybeapplyclonebundle(pullop)
1678 1695 streamclone.maybeperformlegacystreamclone(pullop)
1679 1696 _pulldiscovery(pullop)
1680 1697 if pullop.canusebundle2:
1681 1698 _fullpullbundle2(repo, pullop)
1682 1699 _pullchangeset(pullop)
1683 1700 _pullphase(pullop)
1684 1701 _pullbookmarks(pullop)
1685 1702 _pullobsolete(pullop)
1686 1703
1687 1704 # storing remotenames
1688 1705 if repo.ui.configbool(b'experimental', b'remotenames'):
1689 1706 logexchange.pullremotenames(repo, remote)
1690 1707
1691 1708 return pullop
1692 1709
1693 1710
1694 1711 # list of steps to perform discovery before pull
1695 1712 pulldiscoveryorder = []
1696 1713
1697 1714 # Mapping between step name and function
1698 1715 #
1699 1716 # This exists to help extensions wrap steps if necessary
1700 1717 pulldiscoverymapping = {}
1701 1718
1702 1719
1703 1720 def pulldiscovery(stepname):
1704 1721 """decorator for function performing discovery before pull
1705 1722
1706 1723 The function is added to the step -> function mapping and appended to the
1707 1724 list of steps. Beware that decorated function will be added in order (this
1708 1725 may matter).
1709 1726
1710 1727 You can only use this decorator for a new step, if you want to wrap a step
1711 1728 from an extension, change the pulldiscovery dictionary directly."""
1712 1729
1713 1730 def dec(func):
1714 1731 assert stepname not in pulldiscoverymapping
1715 1732 pulldiscoverymapping[stepname] = func
1716 1733 pulldiscoveryorder.append(stepname)
1717 1734 return func
1718 1735
1719 1736 return dec
1720 1737
1721 1738
1722 1739 def _pulldiscovery(pullop):
1723 1740 """Run all discovery steps"""
1724 1741 for stepname in pulldiscoveryorder:
1725 1742 step = pulldiscoverymapping[stepname]
1726 1743 step(pullop)
1727 1744
1728 1745
1729 1746 @pulldiscovery(b'b1:bookmarks')
1730 1747 def _pullbookmarkbundle1(pullop):
1731 1748 """fetch bookmark data in bundle1 case
1732 1749
1733 1750 If not using bundle2, we have to fetch bookmarks before changeset
1734 1751 discovery to reduce the chance and impact of race conditions."""
1735 1752 if pullop.remotebookmarks is not None:
1736 1753 return
1737 1754 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1738 1755 # all known bundle2 servers now support listkeys, but lets be nice with
1739 1756 # new implementation.
1740 1757 return
1741 1758 books = listkeys(pullop.remote, b'bookmarks')
1742 1759 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1743 1760
1744 1761
1745 1762 @pulldiscovery(b'changegroup')
1746 1763 def _pulldiscoverychangegroup(pullop):
1747 1764 """discovery phase for the pull
1748 1765
1749 1766 Current handle changeset discovery only, will change handle all discovery
1750 1767 at some point."""
1751 1768 tmp = discovery.findcommonincoming(
1752 1769 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1753 1770 )
1754 1771 common, fetch, rheads = tmp
1755 1772 has_node = pullop.repo.unfiltered().changelog.index.has_node
1756 1773 if fetch and rheads:
1757 1774 # If a remote heads is filtered locally, put in back in common.
1758 1775 #
1759 1776 # This is a hackish solution to catch most of "common but locally
1760 1777 # hidden situation". We do not performs discovery on unfiltered
1761 1778 # repository because it end up doing a pathological amount of round
1762 1779 # trip for w huge amount of changeset we do not care about.
1763 1780 #
1764 1781 # If a set of such "common but filtered" changeset exist on the server
1765 1782 # but are not including a remote heads, we'll not be able to detect it,
1766 1783 scommon = set(common)
1767 1784 for n in rheads:
1768 1785 if has_node(n):
1769 1786 if n not in scommon:
1770 1787 common.append(n)
1771 1788 if set(rheads).issubset(set(common)):
1772 1789 fetch = []
1773 1790 pullop.common = common
1774 1791 pullop.fetch = fetch
1775 1792 pullop.rheads = rheads
1776 1793
1777 1794
1778 1795 def _pullbundle2(pullop):
1779 1796 """pull data using bundle2
1780 1797
1781 1798 For now, the only supported data are changegroup."""
1782 1799 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1783 1800
1784 1801 # make ui easier to access
1785 1802 ui = pullop.repo.ui
1786 1803
1787 1804 # At the moment we don't do stream clones over bundle2. If that is
1788 1805 # implemented then here's where the check for that will go.
1789 1806 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1790 1807
1791 1808 # declare pull perimeters
1792 1809 kwargs[b'common'] = pullop.common
1793 1810 kwargs[b'heads'] = pullop.heads or pullop.rheads
1794 1811
1795 1812 # check server supports narrow and then adding includepats and excludepats
1796 1813 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1797 1814 if servernarrow and pullop.includepats:
1798 1815 kwargs[b'includepats'] = pullop.includepats
1799 1816 if servernarrow and pullop.excludepats:
1800 1817 kwargs[b'excludepats'] = pullop.excludepats
1801 1818
1802 1819 if streaming:
1803 1820 kwargs[b'cg'] = False
1804 1821 kwargs[b'stream'] = True
1805 1822 pullop.stepsdone.add(b'changegroup')
1806 1823 pullop.stepsdone.add(b'phases')
1807 1824
1808 1825 else:
1809 1826 # pulling changegroup
1810 1827 pullop.stepsdone.add(b'changegroup')
1811 1828
1812 1829 kwargs[b'cg'] = pullop.fetch
1813 1830
1814 1831 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1815 1832 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1816 1833 if not legacyphase and hasbinaryphase:
1817 1834 kwargs[b'phases'] = True
1818 1835 pullop.stepsdone.add(b'phases')
1819 1836
1820 1837 if b'listkeys' in pullop.remotebundle2caps:
1821 1838 if b'phases' not in pullop.stepsdone:
1822 1839 kwargs[b'listkeys'] = [b'phases']
1823 1840
1824 1841 bookmarksrequested = False
1825 1842 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1826 1843 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1827 1844
1828 1845 if pullop.remotebookmarks is not None:
1829 1846 pullop.stepsdone.add(b'request-bookmarks')
1830 1847
1831 1848 if (
1832 1849 b'request-bookmarks' not in pullop.stepsdone
1833 1850 and pullop.remotebookmarks is None
1834 1851 and not legacybookmark
1835 1852 and hasbinarybook
1836 1853 ):
1837 1854 kwargs[b'bookmarks'] = True
1838 1855 bookmarksrequested = True
1839 1856
1840 1857 if b'listkeys' in pullop.remotebundle2caps:
1841 1858 if b'request-bookmarks' not in pullop.stepsdone:
1842 1859 # make sure to always includes bookmark data when migrating
1843 1860 # `hg incoming --bundle` to using this function.
1844 1861 pullop.stepsdone.add(b'request-bookmarks')
1845 1862 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1846 1863
1847 1864 # If this is a full pull / clone and the server supports the clone bundles
1848 1865 # feature, tell the server whether we attempted a clone bundle. The
1849 1866 # presence of this flag indicates the client supports clone bundles. This
1850 1867 # will enable the server to treat clients that support clone bundles
1851 1868 # differently from those that don't.
1852 1869 if (
1853 1870 pullop.remote.capable(b'clonebundles')
1854 1871 and pullop.heads is None
1855 1872 and list(pullop.common) == [pullop.repo.nullid]
1856 1873 ):
1857 1874 kwargs[b'cbattempted'] = pullop.clonebundleattempted
1858 1875
1859 1876 if streaming:
1860 1877 pullop.repo.ui.status(_(b'streaming all changes\n'))
1861 1878 elif not pullop.fetch:
1862 1879 pullop.repo.ui.status(_(b"no changes found\n"))
1863 1880 pullop.cgresult = 0
1864 1881 else:
1865 1882 if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
1866 1883 pullop.repo.ui.status(_(b"requesting all changes\n"))
1867 1884 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1868 1885 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1869 1886 if obsolete.commonversion(remoteversions) is not None:
1870 1887 kwargs[b'obsmarkers'] = True
1871 1888 pullop.stepsdone.add(b'obsmarkers')
1872 1889 _pullbundle2extraprepare(pullop, kwargs)
1873 1890
1874 1891 remote_sidedata = bundle2.read_remote_wanted_sidedata(pullop.remote)
1875 1892 if remote_sidedata:
1876 1893 kwargs[b'remote_sidedata'] = remote_sidedata
1877 1894
1878 1895 with pullop.remote.commandexecutor() as e:
1879 1896 args = dict(kwargs)
1880 1897 args[b'source'] = b'pull'
1881 1898 bundle = e.callcommand(b'getbundle', args).result()
1882 1899
1883 1900 try:
1884 1901 op = bundle2.bundleoperation(
1885 1902 pullop.repo, pullop.gettransaction, source=b'pull'
1886 1903 )
1887 1904 op.modes[b'bookmarks'] = b'records'
1888 1905 bundle2.processbundle(pullop.repo, bundle, op=op)
1889 1906 except bundle2.AbortFromPart as exc:
1890 1907 pullop.repo.ui.error(_(b'remote: abort: %s\n') % exc)
1891 1908 raise error.RemoteError(_(b'pull failed on remote'), hint=exc.hint)
1892 1909 except error.BundleValueError as exc:
1893 1910 raise error.RemoteError(_(b'missing support for %s') % exc)
1894 1911
1895 1912 if pullop.fetch:
1896 1913 pullop.cgresult = bundle2.combinechangegroupresults(op)
1897 1914
1898 1915 # processing phases change
1899 1916 for namespace, value in op.records[b'listkeys']:
1900 1917 if namespace == b'phases':
1901 1918 _pullapplyphases(pullop, value)
1902 1919
1903 1920 # processing bookmark update
1904 1921 if bookmarksrequested:
1905 1922 books = {}
1906 1923 for record in op.records[b'bookmarks']:
1907 1924 books[record[b'bookmark']] = record[b"node"]
1908 1925 pullop.remotebookmarks = books
1909 1926 else:
1910 1927 for namespace, value in op.records[b'listkeys']:
1911 1928 if namespace == b'bookmarks':
1912 1929 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1913 1930
1914 1931 # bookmark data were either already there or pulled in the bundle
1915 1932 if pullop.remotebookmarks is not None:
1916 1933 _pullbookmarks(pullop)
1917 1934
1918 1935
1919 1936 def _pullbundle2extraprepare(pullop, kwargs):
1920 1937 """hook function so that extensions can extend the getbundle call"""
1921 1938
1922 1939
1923 1940 def _pullchangeset(pullop):
1924 1941 """pull changeset from unbundle into the local repo"""
1925 1942 # We delay the open of the transaction as late as possible so we
1926 1943 # don't open transaction for nothing or you break future useful
1927 1944 # rollback call
1928 1945 if b'changegroup' in pullop.stepsdone:
1929 1946 return
1930 1947 pullop.stepsdone.add(b'changegroup')
1931 1948 if not pullop.fetch:
1932 1949 pullop.repo.ui.status(_(b"no changes found\n"))
1933 1950 pullop.cgresult = 0
1934 1951 return
1935 1952 tr = pullop.gettransaction()
1936 1953 if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
1937 1954 pullop.repo.ui.status(_(b"requesting all changes\n"))
1938 1955 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
1939 1956 # issue1320, avoid a race if remote changed after discovery
1940 1957 pullop.heads = pullop.rheads
1941 1958
1942 1959 if pullop.remote.capable(b'getbundle'):
1943 1960 # TODO: get bundlecaps from remote
1944 1961 cg = pullop.remote.getbundle(
1945 1962 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
1946 1963 )
1947 1964 elif pullop.heads is None:
1948 1965 with pullop.remote.commandexecutor() as e:
1949 1966 cg = e.callcommand(
1950 1967 b'changegroup',
1951 1968 {
1952 1969 b'nodes': pullop.fetch,
1953 1970 b'source': b'pull',
1954 1971 },
1955 1972 ).result()
1956 1973
1957 1974 elif not pullop.remote.capable(b'changegroupsubset'):
1958 1975 raise error.Abort(
1959 1976 _(
1960 1977 b"partial pull cannot be done because "
1961 1978 b"other repository doesn't support "
1962 1979 b"changegroupsubset."
1963 1980 )
1964 1981 )
1965 1982 else:
1966 1983 with pullop.remote.commandexecutor() as e:
1967 1984 cg = e.callcommand(
1968 1985 b'changegroupsubset',
1969 1986 {
1970 1987 b'bases': pullop.fetch,
1971 1988 b'heads': pullop.heads,
1972 1989 b'source': b'pull',
1973 1990 },
1974 1991 ).result()
1975 1992
1976 1993 bundleop = bundle2.applybundle(
1977 1994 pullop.repo, cg, tr, b'pull', pullop.remote.url()
1978 1995 )
1979 1996 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1980 1997
1981 1998
1982 1999 def _pullphase(pullop):
1983 2000 # Get remote phases data from remote
1984 2001 if b'phases' in pullop.stepsdone:
1985 2002 return
1986 2003 remotephases = listkeys(pullop.remote, b'phases')
1987 2004 _pullapplyphases(pullop, remotephases)
1988 2005
1989 2006
1990 2007 def _pullapplyphases(pullop, remotephases):
1991 2008 """apply phase movement from observed remote state"""
1992 2009 if b'phases' in pullop.stepsdone:
1993 2010 return
1994 2011 pullop.stepsdone.add(b'phases')
1995 2012 publishing = bool(remotephases.get(b'publishing', False))
1996 2013 if remotephases and not publishing:
1997 2014 # remote is new and non-publishing
1998 2015 pheads, _dr = phases.analyzeremotephases(
1999 2016 pullop.repo, pullop.pulledsubset, remotephases
2000 2017 )
2001 2018 dheads = pullop.pulledsubset
2002 2019 else:
2003 2020 # Remote is old or publishing all common changesets
2004 2021 # should be seen as public
2005 2022 pheads = pullop.pulledsubset
2006 2023 dheads = []
2007 2024 unfi = pullop.repo.unfiltered()
2008 2025 phase = unfi._phasecache.phase
2009 2026 rev = unfi.changelog.index.get_rev
2010 2027 public = phases.public
2011 2028 draft = phases.draft
2012 2029
2013 2030 # exclude changesets already public locally and update the others
2014 2031 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2015 2032 if pheads:
2016 2033 tr = pullop.gettransaction()
2017 2034 phases.advanceboundary(pullop.repo, tr, public, pheads)
2018 2035
2019 2036 # exclude changesets already draft locally and update the others
2020 2037 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2021 2038 if dheads:
2022 2039 tr = pullop.gettransaction()
2023 2040 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2024 2041
2025 2042
2026 2043 def _pullbookmarks(pullop):
2027 2044 """process the remote bookmark information to update the local one"""
2028 2045 if b'bookmarks' in pullop.stepsdone:
2029 2046 return
2030 2047 pullop.stepsdone.add(b'bookmarks')
2031 2048 repo = pullop.repo
2032 2049 remotebookmarks = pullop.remotebookmarks
2033 2050 bookmarks_mode = None
2034 2051 if pullop.remote_path is not None:
2035 2052 bookmarks_mode = pullop.remote_path.bookmarks_mode
2036 2053 bookmod.updatefromremote(
2037 2054 repo.ui,
2038 2055 repo,
2039 2056 remotebookmarks,
2040 2057 pullop.remote.url(),
2041 2058 pullop.gettransaction,
2042 2059 explicit=pullop.explicitbookmarks,
2043 2060 mode=bookmarks_mode,
2044 2061 )
2045 2062
2046 2063
2047 2064 def _pullobsolete(pullop):
2048 2065 """utility function to pull obsolete markers from a remote
2049 2066
2050 2067 The `gettransaction` is function that return the pull transaction, creating
2051 2068 one if necessary. We return the transaction to inform the calling code that
2052 2069 a new transaction have been created (when applicable).
2053 2070
2054 2071 Exists mostly to allow overriding for experimentation purpose"""
2055 2072 if b'obsmarkers' in pullop.stepsdone:
2056 2073 return
2057 2074 pullop.stepsdone.add(b'obsmarkers')
2058 2075 tr = None
2059 2076 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2060 2077 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2061 2078 remoteobs = listkeys(pullop.remote, b'obsolete')
2062 2079 if b'dump0' in remoteobs:
2063 2080 tr = pullop.gettransaction()
2064 2081 markers = []
2065 2082 for key in sorted(remoteobs, reverse=True):
2066 2083 if key.startswith(b'dump'):
2067 2084 data = util.b85decode(remoteobs[key])
2068 2085 version, newmarks = obsolete._readmarkers(data)
2069 2086 markers += newmarks
2070 2087 if markers:
2071 2088 pullop.repo.obsstore.add(tr, markers)
2072 2089 pullop.repo.invalidatevolatilesets()
2073 2090 return tr
2074 2091
2075 2092
2076 2093 def applynarrowacl(repo, kwargs):
2077 2094 """Apply narrow fetch access control.
2078 2095
2079 2096 This massages the named arguments for getbundle wire protocol commands
2080 2097 so requested data is filtered through access control rules.
2081 2098 """
2082 2099 ui = repo.ui
2083 2100 # TODO this assumes existence of HTTP and is a layering violation.
2084 2101 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2085 2102 user_includes = ui.configlist(
2086 2103 _NARROWACL_SECTION,
2087 2104 username + b'.includes',
2088 2105 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2089 2106 )
2090 2107 user_excludes = ui.configlist(
2091 2108 _NARROWACL_SECTION,
2092 2109 username + b'.excludes',
2093 2110 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2094 2111 )
2095 2112 if not user_includes:
2096 2113 raise error.Abort(
2097 2114 _(b"%s configuration for user %s is empty")
2098 2115 % (_NARROWACL_SECTION, username)
2099 2116 )
2100 2117
2101 2118 user_includes = [
2102 2119 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2103 2120 ]
2104 2121 user_excludes = [
2105 2122 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2106 2123 ]
2107 2124
2108 2125 req_includes = set(kwargs.get('includepats', []))
2109 2126 req_excludes = set(kwargs.get('excludepats', []))
2110 2127
2111 2128 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2112 2129 req_includes, req_excludes, user_includes, user_excludes
2113 2130 )
2114 2131
2115 2132 if invalid_includes:
2116 2133 raise error.Abort(
2117 2134 _(b"The following includes are not accessible for %s: %s")
2118 2135 % (username, stringutil.pprint(invalid_includes))
2119 2136 )
2120 2137
2121 2138 new_args = {}
2122 2139 new_args.update(kwargs)
2123 2140 new_args['narrow'] = True
2124 2141 new_args['narrow_acl'] = True
2125 2142 new_args['includepats'] = req_includes
2126 2143 if req_excludes:
2127 2144 new_args['excludepats'] = req_excludes
2128 2145
2129 2146 return new_args
2130 2147
2131 2148
2132 2149 def _computeellipsis(repo, common, heads, known, match, depth=None):
2133 2150 """Compute the shape of a narrowed DAG.
2134 2151
2135 2152 Args:
2136 2153 repo: The repository we're transferring.
2137 2154 common: The roots of the DAG range we're transferring.
2138 2155 May be just [nullid], which means all ancestors of heads.
2139 2156 heads: The heads of the DAG range we're transferring.
2140 2157 match: The narrowmatcher that allows us to identify relevant changes.
2141 2158 depth: If not None, only consider nodes to be full nodes if they are at
2142 2159 most depth changesets away from one of heads.
2143 2160
2144 2161 Returns:
2145 2162 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2146 2163
2147 2164 visitnodes: The list of nodes (either full or ellipsis) which
2148 2165 need to be sent to the client.
2149 2166 relevant_nodes: The set of changelog nodes which change a file inside
2150 2167 the narrowspec. The client needs these as non-ellipsis nodes.
2151 2168 ellipsisroots: A dict of {rev: parents} that is used in
2152 2169 narrowchangegroup to produce ellipsis nodes with the
2153 2170 correct parents.
2154 2171 """
2155 2172 cl = repo.changelog
2156 2173 mfl = repo.manifestlog
2157 2174
2158 2175 clrev = cl.rev
2159 2176
2160 2177 commonrevs = {clrev(n) for n in common} | {nullrev}
2161 2178 headsrevs = {clrev(n) for n in heads}
2162 2179
2163 2180 if depth:
2164 2181 revdepth = {h: 0 for h in headsrevs}
2165 2182
2166 2183 ellipsisheads = collections.defaultdict(set)
2167 2184 ellipsisroots = collections.defaultdict(set)
2168 2185
2169 2186 def addroot(head, curchange):
2170 2187 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2171 2188 ellipsisroots[head].add(curchange)
2172 2189 # Recursively split ellipsis heads with 3 roots by finding the
2173 2190 # roots' youngest common descendant which is an elided merge commit.
2174 2191 # That descendant takes 2 of the 3 roots as its own, and becomes a
2175 2192 # root of the head.
2176 2193 while len(ellipsisroots[head]) > 2:
2177 2194 child, roots = splithead(head)
2178 2195 splitroots(head, child, roots)
2179 2196 head = child # Recurse in case we just added a 3rd root
2180 2197
2181 2198 def splitroots(head, child, roots):
2182 2199 ellipsisroots[head].difference_update(roots)
2183 2200 ellipsisroots[head].add(child)
2184 2201 ellipsisroots[child].update(roots)
2185 2202 ellipsisroots[child].discard(child)
2186 2203
2187 2204 def splithead(head):
2188 2205 r1, r2, r3 = sorted(ellipsisroots[head])
2189 2206 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2190 2207 mid = repo.revs(
2191 2208 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2192 2209 )
2193 2210 for j in mid:
2194 2211 if j == nr2:
2195 2212 return nr2, (nr1, nr2)
2196 2213 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2197 2214 return j, (nr1, nr2)
2198 2215 raise error.Abort(
2199 2216 _(
2200 2217 b'Failed to split up ellipsis node! head: %d, '
2201 2218 b'roots: %d %d %d'
2202 2219 )
2203 2220 % (head, r1, r2, r3)
2204 2221 )
2205 2222
2206 2223 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2207 2224 visit = reversed(missing)
2208 2225 relevant_nodes = set()
2209 2226 visitnodes = [cl.node(m) for m in missing]
2210 2227 required = set(headsrevs) | known
2211 2228 for rev in visit:
2212 2229 clrev = cl.changelogrevision(rev)
2213 2230 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2214 2231 if depth is not None:
2215 2232 curdepth = revdepth[rev]
2216 2233 for p in ps:
2217 2234 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2218 2235 needed = False
2219 2236 shallow_enough = depth is None or revdepth[rev] <= depth
2220 2237 if shallow_enough:
2221 2238 curmf = mfl[clrev.manifest].read()
2222 2239 if ps:
2223 2240 # We choose to not trust the changed files list in
2224 2241 # changesets because it's not always correct. TODO: could
2225 2242 # we trust it for the non-merge case?
2226 2243 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2227 2244 needed = bool(curmf.diff(p1mf, match))
2228 2245 if not needed and len(ps) > 1:
2229 2246 # For merge changes, the list of changed files is not
2230 2247 # helpful, since we need to emit the merge if a file
2231 2248 # in the narrow spec has changed on either side of the
2232 2249 # merge. As a result, we do a manifest diff to check.
2233 2250 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2234 2251 needed = bool(curmf.diff(p2mf, match))
2235 2252 else:
2236 2253 # For a root node, we need to include the node if any
2237 2254 # files in the node match the narrowspec.
2238 2255 needed = any(curmf.walk(match))
2239 2256
2240 2257 if needed:
2241 2258 for head in ellipsisheads[rev]:
2242 2259 addroot(head, rev)
2243 2260 for p in ps:
2244 2261 required.add(p)
2245 2262 relevant_nodes.add(cl.node(rev))
2246 2263 else:
2247 2264 if not ps:
2248 2265 ps = [nullrev]
2249 2266 if rev in required:
2250 2267 for head in ellipsisheads[rev]:
2251 2268 addroot(head, rev)
2252 2269 for p in ps:
2253 2270 ellipsisheads[p].add(rev)
2254 2271 else:
2255 2272 for p in ps:
2256 2273 ellipsisheads[p] |= ellipsisheads[rev]
2257 2274
2258 2275 # add common changesets as roots of their reachable ellipsis heads
2259 2276 for c in commonrevs:
2260 2277 for head in ellipsisheads[c]:
2261 2278 addroot(head, c)
2262 2279 return visitnodes, relevant_nodes, ellipsisroots
2263 2280
2264 2281
2265 2282 def caps20to10(repo, role):
2266 2283 """return a set with appropriate options to use bundle20 during getbundle"""
2267 2284 caps = {b'HG20'}
2268 2285 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2269 2286 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2270 2287 return caps
2271 2288
2272 2289
2273 2290 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2274 2291 getbundle2partsorder = []
2275 2292
2276 2293 # Mapping between step name and function
2277 2294 #
2278 2295 # This exists to help extensions wrap steps if necessary
2279 2296 getbundle2partsmapping = {}
2280 2297
2281 2298
2282 2299 def getbundle2partsgenerator(stepname, idx=None):
2283 2300 """decorator for function generating bundle2 part for getbundle
2284 2301
2285 2302 The function is added to the step -> function mapping and appended to the
2286 2303 list of steps. Beware that decorated functions will be added in order
2287 2304 (this may matter).
2288 2305
2289 2306 You can only use this decorator for new steps, if you want to wrap a step
2290 2307 from an extension, attack the getbundle2partsmapping dictionary directly."""
2291 2308
2292 2309 def dec(func):
2293 2310 assert stepname not in getbundle2partsmapping
2294 2311 getbundle2partsmapping[stepname] = func
2295 2312 if idx is None:
2296 2313 getbundle2partsorder.append(stepname)
2297 2314 else:
2298 2315 getbundle2partsorder.insert(idx, stepname)
2299 2316 return func
2300 2317
2301 2318 return dec
2302 2319
2303 2320
2304 2321 def bundle2requested(bundlecaps):
2305 2322 if bundlecaps is not None:
2306 2323 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2307 2324 return False
2308 2325
2309 2326
2310 2327 def getbundlechunks(
2311 2328 repo,
2312 2329 source,
2313 2330 heads=None,
2314 2331 common=None,
2315 2332 bundlecaps=None,
2316 2333 remote_sidedata=None,
2317 2334 **kwargs
2318 2335 ):
2319 2336 """Return chunks constituting a bundle's raw data.
2320 2337
2321 2338 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2322 2339 passed.
2323 2340
2324 2341 Returns a 2-tuple of a dict with metadata about the generated bundle
2325 2342 and an iterator over raw chunks (of varying sizes).
2326 2343 """
2327 2344 kwargs = pycompat.byteskwargs(kwargs)
2328 2345 info = {}
2329 2346 usebundle2 = bundle2requested(bundlecaps)
2330 2347 # bundle10 case
2331 2348 if not usebundle2:
2332 2349 if bundlecaps and not kwargs.get(b'cg', True):
2333 2350 raise ValueError(
2334 2351 _(b'request for bundle10 must include changegroup')
2335 2352 )
2336 2353
2337 2354 if kwargs:
2338 2355 raise ValueError(
2339 2356 _(b'unsupported getbundle arguments: %s')
2340 2357 % b', '.join(sorted(kwargs.keys()))
2341 2358 )
2342 2359 outgoing = _computeoutgoing(repo, heads, common)
2343 2360 info[b'bundleversion'] = 1
2344 2361 return (
2345 2362 info,
2346 2363 changegroup.makestream(
2347 2364 repo,
2348 2365 outgoing,
2349 2366 b'01',
2350 2367 source,
2351 2368 bundlecaps=bundlecaps,
2352 2369 remote_sidedata=remote_sidedata,
2353 2370 ),
2354 2371 )
2355 2372
2356 2373 # bundle20 case
2357 2374 info[b'bundleversion'] = 2
2358 2375 b2caps = {}
2359 2376 for bcaps in bundlecaps:
2360 2377 if bcaps.startswith(b'bundle2='):
2361 2378 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2362 2379 b2caps.update(bundle2.decodecaps(blob))
2363 2380 bundler = bundle2.bundle20(repo.ui, b2caps)
2364 2381
2365 2382 kwargs[b'heads'] = heads
2366 2383 kwargs[b'common'] = common
2367 2384
2368 2385 for name in getbundle2partsorder:
2369 2386 func = getbundle2partsmapping[name]
2370 2387 func(
2371 2388 bundler,
2372 2389 repo,
2373 2390 source,
2374 2391 bundlecaps=bundlecaps,
2375 2392 b2caps=b2caps,
2376 2393 remote_sidedata=remote_sidedata,
2377 2394 **pycompat.strkwargs(kwargs)
2378 2395 )
2379 2396
2380 2397 info[b'prefercompressed'] = bundler.prefercompressed
2381 2398
2382 2399 return info, bundler.getchunks()
2383 2400
2384 2401
2385 2402 @getbundle2partsgenerator(b'stream2')
2386 2403 def _getbundlestream2(bundler, repo, *args, **kwargs):
2387 2404 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2388 2405
2389 2406
2390 2407 @getbundle2partsgenerator(b'changegroup')
2391 2408 def _getbundlechangegrouppart(
2392 2409 bundler,
2393 2410 repo,
2394 2411 source,
2395 2412 bundlecaps=None,
2396 2413 b2caps=None,
2397 2414 heads=None,
2398 2415 common=None,
2399 2416 remote_sidedata=None,
2400 2417 **kwargs
2401 2418 ):
2402 2419 """add a changegroup part to the requested bundle"""
2403 2420 if not kwargs.get('cg', True) or not b2caps:
2404 2421 return
2405 2422
2406 2423 version = b'01'
2407 2424 cgversions = b2caps.get(b'changegroup')
2408 2425 if cgversions: # 3.1 and 3.2 ship with an empty value
2409 2426 cgversions = [
2410 2427 v
2411 2428 for v in cgversions
2412 2429 if v in changegroup.supportedoutgoingversions(repo)
2413 2430 ]
2414 2431 if not cgversions:
2415 2432 raise error.Abort(_(b'no common changegroup version'))
2416 2433 version = max(cgversions)
2417 2434
2418 2435 outgoing = _computeoutgoing(repo, heads, common)
2419 2436 if not outgoing.missing:
2420 2437 return
2421 2438
2422 2439 if kwargs.get('narrow', False):
2423 2440 include = sorted(filter(bool, kwargs.get('includepats', [])))
2424 2441 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2425 2442 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2426 2443 else:
2427 2444 matcher = None
2428 2445
2429 2446 cgstream = changegroup.makestream(
2430 2447 repo,
2431 2448 outgoing,
2432 2449 version,
2433 2450 source,
2434 2451 bundlecaps=bundlecaps,
2435 2452 matcher=matcher,
2436 2453 remote_sidedata=remote_sidedata,
2437 2454 )
2438 2455
2439 2456 part = bundler.newpart(b'changegroup', data=cgstream)
2440 2457 if cgversions:
2441 2458 part.addparam(b'version', version)
2442 2459
2443 2460 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2444 2461
2445 2462 if scmutil.istreemanifest(repo):
2446 2463 part.addparam(b'treemanifest', b'1')
2447 2464
2448 2465 if repository.REPO_FEATURE_SIDE_DATA in repo.features:
2449 2466 part.addparam(b'exp-sidedata', b'1')
2450 2467 sidedata = bundle2.format_remote_wanted_sidedata(repo)
2451 2468 part.addparam(b'exp-wanted-sidedata', sidedata)
2452 2469
2453 2470 if (
2454 2471 kwargs.get('narrow', False)
2455 2472 and kwargs.get('narrow_acl', False)
2456 2473 and (include or exclude)
2457 2474 ):
2458 2475 # this is mandatory because otherwise ACL clients won't work
2459 2476 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2460 2477 narrowspecpart.data = b'%s\0%s' % (
2461 2478 b'\n'.join(include),
2462 2479 b'\n'.join(exclude),
2463 2480 )
2464 2481
2465 2482
2466 2483 @getbundle2partsgenerator(b'bookmarks')
2467 2484 def _getbundlebookmarkpart(
2468 2485 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2469 2486 ):
2470 2487 """add a bookmark part to the requested bundle"""
2471 2488 if not kwargs.get('bookmarks', False):
2472 2489 return
2473 2490 if not b2caps or b'bookmarks' not in b2caps:
2474 2491 raise error.Abort(_(b'no common bookmarks exchange method'))
2475 2492 books = bookmod.listbinbookmarks(repo)
2476 2493 data = bookmod.binaryencode(repo, books)
2477 2494 if data:
2478 2495 bundler.newpart(b'bookmarks', data=data)
2479 2496
2480 2497
2481 2498 @getbundle2partsgenerator(b'listkeys')
2482 2499 def _getbundlelistkeysparts(
2483 2500 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2484 2501 ):
2485 2502 """add parts containing listkeys namespaces to the requested bundle"""
2486 2503 listkeys = kwargs.get('listkeys', ())
2487 2504 for namespace in listkeys:
2488 2505 part = bundler.newpart(b'listkeys')
2489 2506 part.addparam(b'namespace', namespace)
2490 2507 keys = repo.listkeys(namespace).items()
2491 2508 part.data = pushkey.encodekeys(keys)
2492 2509
2493 2510
2494 2511 @getbundle2partsgenerator(b'obsmarkers')
2495 2512 def _getbundleobsmarkerpart(
2496 2513 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2497 2514 ):
2498 2515 """add an obsolescence markers part to the requested bundle"""
2499 2516 if kwargs.get('obsmarkers', False):
2500 2517 if heads is None:
2501 2518 heads = repo.heads()
2502 2519 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2503 2520 markers = repo.obsstore.relevantmarkers(subset)
2504 2521 markers = obsutil.sortedmarkers(markers)
2505 2522 bundle2.buildobsmarkerspart(bundler, markers)
2506 2523
2507 2524
2508 2525 @getbundle2partsgenerator(b'phases')
2509 2526 def _getbundlephasespart(
2510 2527 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2511 2528 ):
2512 2529 """add phase heads part to the requested bundle"""
2513 2530 if kwargs.get('phases', False):
2514 2531 if not b2caps or b'heads' not in b2caps.get(b'phases'):
2515 2532 raise error.Abort(_(b'no common phases exchange method'))
2516 2533 if heads is None:
2517 2534 heads = repo.heads()
2518 2535
2519 2536 headsbyphase = collections.defaultdict(set)
2520 2537 if repo.publishing():
2521 2538 headsbyphase[phases.public] = heads
2522 2539 else:
2523 2540 # find the appropriate heads to move
2524 2541
2525 2542 phase = repo._phasecache.phase
2526 2543 node = repo.changelog.node
2527 2544 rev = repo.changelog.rev
2528 2545 for h in heads:
2529 2546 headsbyphase[phase(repo, rev(h))].add(h)
2530 2547 seenphases = list(headsbyphase.keys())
2531 2548
2532 2549 # We do not handle anything but public and draft phase for now)
2533 2550 if seenphases:
2534 2551 assert max(seenphases) <= phases.draft
2535 2552
2536 2553 # if client is pulling non-public changesets, we need to find
2537 2554 # intermediate public heads.
2538 2555 draftheads = headsbyphase.get(phases.draft, set())
2539 2556 if draftheads:
2540 2557 publicheads = headsbyphase.get(phases.public, set())
2541 2558
2542 2559 revset = b'heads(only(%ln, %ln) and public())'
2543 2560 extraheads = repo.revs(revset, draftheads, publicheads)
2544 2561 for r in extraheads:
2545 2562 headsbyphase[phases.public].add(node(r))
2546 2563
2547 2564 # transform data in a format used by the encoding function
2548 2565 phasemapping = {
2549 2566 phase: sorted(headsbyphase[phase]) for phase in phases.allphases
2550 2567 }
2551 2568
2552 2569 # generate the actual part
2553 2570 phasedata = phases.binaryencode(phasemapping)
2554 2571 bundler.newpart(b'phase-heads', data=phasedata)
2555 2572
2556 2573
2557 2574 @getbundle2partsgenerator(b'hgtagsfnodes')
2558 2575 def _getbundletagsfnodes(
2559 2576 bundler,
2560 2577 repo,
2561 2578 source,
2562 2579 bundlecaps=None,
2563 2580 b2caps=None,
2564 2581 heads=None,
2565 2582 common=None,
2566 2583 **kwargs
2567 2584 ):
2568 2585 """Transfer the .hgtags filenodes mapping.
2569 2586
2570 2587 Only values for heads in this bundle will be transferred.
2571 2588
2572 2589 The part data consists of pairs of 20 byte changeset node and .hgtags
2573 2590 filenodes raw values.
2574 2591 """
2575 2592 # Don't send unless:
2576 2593 # - changeset are being exchanged,
2577 2594 # - the client supports it.
2578 2595 if not b2caps or not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2579 2596 return
2580 2597
2581 2598 outgoing = _computeoutgoing(repo, heads, common)
2582 2599 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2583 2600
2584 2601
2585 2602 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2586 2603 def _getbundlerevbranchcache(
2587 2604 bundler,
2588 2605 repo,
2589 2606 source,
2590 2607 bundlecaps=None,
2591 2608 b2caps=None,
2592 2609 heads=None,
2593 2610 common=None,
2594 2611 **kwargs
2595 2612 ):
2596 2613 """Transfer the rev-branch-cache mapping
2597 2614
2598 2615 The payload is a series of data related to each branch
2599 2616
2600 2617 1) branch name length
2601 2618 2) number of open heads
2602 2619 3) number of closed heads
2603 2620 4) open heads nodes
2604 2621 5) closed heads nodes
2605 2622 """
2606 2623 # Don't send unless:
2607 2624 # - changeset are being exchanged,
2608 2625 # - the client supports it.
2609 2626 # - narrow bundle isn't in play (not currently compatible).
2610 2627 if (
2611 2628 not kwargs.get('cg', True)
2612 2629 or not b2caps
2613 2630 or b'rev-branch-cache' not in b2caps
2614 2631 or kwargs.get('narrow', False)
2615 2632 or repo.ui.has_section(_NARROWACL_SECTION)
2616 2633 ):
2617 2634 return
2618 2635
2619 2636 outgoing = _computeoutgoing(repo, heads, common)
2620 2637 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2621 2638
2622 2639
2623 2640 def check_heads(repo, their_heads, context):
2624 2641 """check if the heads of a repo have been modified
2625 2642
2626 2643 Used by peer for unbundling.
2627 2644 """
2628 2645 heads = repo.heads()
2629 2646 heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest()
2630 2647 if not (
2631 2648 their_heads == [b'force']
2632 2649 or their_heads == heads
2633 2650 or their_heads == [b'hashed', heads_hash]
2634 2651 ):
2635 2652 # someone else committed/pushed/unbundled while we
2636 2653 # were transferring data
2637 2654 raise error.PushRaced(
2638 2655 b'repository changed while %s - please try again' % context
2639 2656 )
2640 2657
2641 2658
2642 2659 def unbundle(repo, cg, heads, source, url):
2643 2660 """Apply a bundle to a repo.
2644 2661
2645 2662 this function makes sure the repo is locked during the application and have
2646 2663 mechanism to check that no push race occurred between the creation of the
2647 2664 bundle and its application.
2648 2665
2649 2666 If the push was raced as PushRaced exception is raised."""
2650 2667 r = 0
2651 2668 # need a transaction when processing a bundle2 stream
2652 2669 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2653 2670 lockandtr = [None, None, None]
2654 2671 recordout = None
2655 2672 # quick fix for output mismatch with bundle2 in 3.4
2656 2673 captureoutput = repo.ui.configbool(
2657 2674 b'experimental', b'bundle2-output-capture'
2658 2675 )
2659 2676 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2660 2677 captureoutput = True
2661 2678 try:
2662 2679 # note: outside bundle1, 'heads' is expected to be empty and this
2663 2680 # 'check_heads' call wil be a no-op
2664 2681 check_heads(repo, heads, b'uploading changes')
2665 2682 # push can proceed
2666 2683 if not isinstance(cg, bundle2.unbundle20):
2667 2684 # legacy case: bundle1 (changegroup 01)
2668 2685 txnname = b"\n".join([source, urlutil.hidepassword(url)])
2669 2686 with repo.lock(), repo.transaction(txnname) as tr:
2670 2687 op = bundle2.applybundle(repo, cg, tr, source, url)
2671 2688 r = bundle2.combinechangegroupresults(op)
2672 2689 else:
2673 2690 r = None
2674 2691 try:
2675 2692
2676 2693 def gettransaction():
2677 2694 if not lockandtr[2]:
2678 2695 if not bookmod.bookmarksinstore(repo):
2679 2696 lockandtr[0] = repo.wlock()
2680 2697 lockandtr[1] = repo.lock()
2681 2698 lockandtr[2] = repo.transaction(source)
2682 2699 lockandtr[2].hookargs[b'source'] = source
2683 2700 lockandtr[2].hookargs[b'url'] = url
2684 2701 lockandtr[2].hookargs[b'bundle2'] = b'1'
2685 2702 return lockandtr[2]
2686 2703
2687 2704 # Do greedy locking by default until we're satisfied with lazy
2688 2705 # locking.
2689 2706 if not repo.ui.configbool(
2690 2707 b'experimental', b'bundle2lazylocking'
2691 2708 ):
2692 2709 gettransaction()
2693 2710
2694 2711 op = bundle2.bundleoperation(
2695 2712 repo,
2696 2713 gettransaction,
2697 2714 captureoutput=captureoutput,
2698 2715 source=b'push',
2699 2716 )
2700 2717 try:
2701 2718 op = bundle2.processbundle(repo, cg, op=op)
2702 2719 finally:
2703 2720 r = op.reply
2704 2721 if captureoutput and r is not None:
2705 2722 repo.ui.pushbuffer(error=True, subproc=True)
2706 2723
2707 2724 def recordout(output):
2708 2725 r.newpart(b'output', data=output, mandatory=False)
2709 2726
2710 2727 if lockandtr[2] is not None:
2711 2728 lockandtr[2].close()
2712 2729 except BaseException as exc:
2713 2730 exc.duringunbundle2 = True
2714 2731 if captureoutput and r is not None:
2715 2732 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2716 2733
2717 2734 def recordout(output):
2718 2735 part = bundle2.bundlepart(
2719 2736 b'output', data=output, mandatory=False
2720 2737 )
2721 2738 parts.append(part)
2722 2739
2723 2740 raise
2724 2741 finally:
2725 2742 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2726 2743 if recordout is not None:
2727 2744 recordout(repo.ui.popbuffer())
2728 2745 return r
2729 2746
2730 2747
2731 2748 def _maybeapplyclonebundle(pullop):
2732 2749 """Apply a clone bundle from a remote, if possible."""
2733 2750
2734 2751 repo = pullop.repo
2735 2752 remote = pullop.remote
2736 2753
2737 2754 if not repo.ui.configbool(b'ui', b'clonebundles'):
2738 2755 return
2739 2756
2740 2757 # Only run if local repo is empty.
2741 2758 if len(repo):
2742 2759 return
2743 2760
2744 2761 if pullop.heads:
2745 2762 return
2746 2763
2747 2764 if not remote.capable(b'clonebundles'):
2748 2765 return
2749 2766
2750 2767 with remote.commandexecutor() as e:
2751 2768 res = e.callcommand(b'clonebundles', {}).result()
2752 2769
2753 2770 # If we call the wire protocol command, that's good enough to record the
2754 2771 # attempt.
2755 2772 pullop.clonebundleattempted = True
2756 2773
2757 2774 entries = bundlecaches.parseclonebundlesmanifest(repo, res)
2758 2775 if not entries:
2759 2776 repo.ui.note(
2760 2777 _(
2761 2778 b'no clone bundles available on remote; '
2762 2779 b'falling back to regular clone\n'
2763 2780 )
2764 2781 )
2765 2782 return
2766 2783
2767 2784 entries = bundlecaches.filterclonebundleentries(
2768 2785 repo, entries, streamclonerequested=pullop.streamclonerequested
2769 2786 )
2770 2787
2771 2788 if not entries:
2772 2789 # There is a thundering herd concern here. However, if a server
2773 2790 # operator doesn't advertise bundles appropriate for its clients,
2774 2791 # they deserve what's coming. Furthermore, from a client's
2775 2792 # perspective, no automatic fallback would mean not being able to
2776 2793 # clone!
2777 2794 repo.ui.warn(
2778 2795 _(
2779 2796 b'no compatible clone bundles available on server; '
2780 2797 b'falling back to regular clone\n'
2781 2798 )
2782 2799 )
2783 2800 repo.ui.warn(
2784 2801 _(b'(you may want to report this to the server operator)\n')
2785 2802 )
2786 2803 return
2787 2804
2788 2805 entries = bundlecaches.sortclonebundleentries(repo.ui, entries)
2789 2806
2790 2807 url = entries[0][b'URL']
2791 2808 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2792 2809 if trypullbundlefromurl(repo.ui, repo, url):
2793 2810 repo.ui.status(_(b'finished applying clone bundle\n'))
2794 2811 # Bundle failed.
2795 2812 #
2796 2813 # We abort by default to avoid the thundering herd of
2797 2814 # clients flooding a server that was expecting expensive
2798 2815 # clone load to be offloaded.
2799 2816 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2800 2817 repo.ui.warn(_(b'falling back to normal clone\n'))
2801 2818 else:
2802 2819 raise error.Abort(
2803 2820 _(b'error applying bundle'),
2804 2821 hint=_(
2805 2822 b'if this error persists, consider contacting '
2806 2823 b'the server operator or disable clone '
2807 2824 b'bundles via '
2808 2825 b'"--config ui.clonebundles=false"'
2809 2826 ),
2810 2827 )
2811 2828
2812 2829
2813 2830 def trypullbundlefromurl(ui, repo, url):
2814 2831 """Attempt to apply a bundle from a URL."""
2815 2832 with repo.lock(), repo.transaction(b'bundleurl') as tr:
2816 2833 try:
2817 2834 fh = urlmod.open(ui, url)
2818 2835 cg = readbundle(ui, fh, b'stream')
2819 2836
2820 2837 if isinstance(cg, streamclone.streamcloneapplier):
2821 2838 cg.apply(repo)
2822 2839 else:
2823 2840 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
2824 2841 return True
2825 2842 except urlerr.httperror as e:
2826 2843 ui.warn(
2827 2844 _(b'HTTP error fetching bundle: %s\n')
2828 2845 % stringutil.forcebytestr(e)
2829 2846 )
2830 2847 except urlerr.urlerror as e:
2831 2848 ui.warn(
2832 2849 _(b'error fetching bundle: %s\n')
2833 2850 % stringutil.forcebytestr(e.reason)
2834 2851 )
2835 2852
2836 2853 return False
@@ -1,270 +1,268
1 1 bundle w/o type option
2 2
3 3 $ hg init t1
4 4 $ hg init t2
5 5 $ cd t1
6 6 $ echo blablablablabla > file.txt
7 7 $ hg ci -Ama
8 8 adding file.txt
9 9 $ hg log | grep summary
10 10 summary: a
11 11 $ hg bundle ../b1 ../t2
12 12 searching for changes
13 13 1 changesets found
14 14
15 15 $ cd ../t2
16 16 $ hg unbundle ../b1
17 17 adding changesets
18 18 adding manifests
19 19 adding file changes
20 20 added 1 changesets with 1 changes to 1 files
21 21 new changesets c35a0f9217e6 (1 drafts)
22 22 (run 'hg update' to get a working copy)
23 23 $ hg up
24 24 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
25 25 $ hg log | grep summary
26 26 summary: a
27 27 $ cd ..
28 28
29 29 Unknown compression type is rejected
30 30
31 31 $ hg init t3
32 32 $ cd t3
33 33 $ hg -q unbundle ../b1
34 34 $ hg bundle -a -t unknown out.hg
35 35 abort: unknown is not a recognized bundle specification
36 36 (see 'hg help bundlespec' for supported values for --type)
37 37 [10]
38 38
39 39 $ hg bundle -a -t unknown-v2 out.hg
40 40 abort: unknown compression is not supported
41 41 (see 'hg help bundlespec' for supported values for --type)
42 42 [10]
43 43
44 44 $ cd ..
45 45
46 46 test bundle types
47 47
48 48 $ testbundle() {
49 49 > echo % test bundle type $1
50 50 > hg init t$1
51 51 > cd t1
52 52 > hg bundle -t $1 ../b$1 ../t$1
53 53 > f -q -B6 -D ../b$1; echo
54 54 > cd ../t$1
55 55 > hg debugbundle ../b$1
56 56 > hg debugbundle --spec ../b$1
57 57 > echo
58 58 > cd ..
59 59 > }
60 60
61 61 $ for t in "None" "bzip2" "gzip" "none-v2" "v2" "v1" "gzip-v1"; do
62 62 > testbundle $t
63 63 > done
64 64 % test bundle type None
65 65 searching for changes
66 66 1 changesets found
67 67 HG20\x00\x00 (esc)
68 68 Stream params: {}
69 69 changegroup -- {nbchanges: 1, version: 02} (mandatory: True)
70 70 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
71 71 cache:rev-branch-cache -- {} (mandatory: False)
72 72 none-v2
73 73
74 74 % test bundle type bzip2
75 75 searching for changes
76 76 1 changesets found
77 77 HG20\x00\x00 (esc)
78 78 Stream params: {Compression: BZ}
79 79 changegroup -- {nbchanges: 1, version: 02} (mandatory: True)
80 80 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
81 81 cache:rev-branch-cache -- {} (mandatory: False)
82 82 bzip2-v2
83 83
84 84 % test bundle type gzip
85 85 searching for changes
86 86 1 changesets found
87 87 HG20\x00\x00 (esc)
88 88 Stream params: {Compression: GZ}
89 89 changegroup -- {nbchanges: 1, version: 02} (mandatory: True)
90 90 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
91 91 cache:rev-branch-cache -- {} (mandatory: False)
92 92 gzip-v2
93 93
94 94 % test bundle type none-v2
95 95 searching for changes
96 96 1 changesets found
97 97 HG20\x00\x00 (esc)
98 98 Stream params: {}
99 99 changegroup -- {nbchanges: 1, version: 02} (mandatory: True)
100 100 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
101 101 cache:rev-branch-cache -- {} (mandatory: False)
102 102 none-v2
103 103
104 104 % test bundle type v2
105 105 searching for changes
106 106 1 changesets found
107 107 HG20\x00\x00 (esc)
108 108 Stream params: {Compression: BZ}
109 109 changegroup -- {nbchanges: 1, version: 02} (mandatory: True)
110 110 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
111 111 cache:rev-branch-cache -- {} (mandatory: False)
112 112 bzip2-v2
113 113
114 114 % test bundle type v1
115 115 searching for changes
116 116 1 changesets found
117 117 HG10BZ
118 118 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
119 119 bzip2-v1
120 120
121 121 % test bundle type gzip-v1
122 122 searching for changes
123 123 1 changesets found
124 124 HG10GZ
125 125 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
126 126 gzip-v1
127 127
128 128
129 129 Compression level can be adjusted for bundle2 bundles
130 130
131 131 $ hg init test-complevel
132 132 $ cd test-complevel
133 133
134 134 $ cat > file0 << EOF
135 135 > this is a file
136 136 > with some text
137 137 > and some more text
138 138 > and other content
139 139 > EOF
140 140 $ cat > file1 << EOF
141 141 > this is another file
142 142 > with some other content
143 143 > and repeated, repeated, repeated, repeated content
144 144 > EOF
145 145 $ hg -q commit -A -m initial
146 146
147 147 $ hg bundle -a -t gzip-v2 gzip-v2.hg
148 148 1 changesets found
149 149 $ f --size gzip-v2.hg
150 150 gzip-v2.hg: size=468
151 151
152 152 $ hg --config experimental.bundlecomplevel=1 bundle -a -t gzip-v2 gzip-v2-level1.hg
153 153 1 changesets found
154 154 $ f --size gzip-v2-level1.hg
155 155 gzip-v2-level1.hg: size=475
156 156
157 157 $ hg --config experimental.bundlecomplevel.gzip=1 --config experimental.bundlelevel=9 bundle -a -t gzip-v2 gzip-v2-level1.hg
158 158 1 changesets found
159 159 $ f --size gzip-v2-level1.hg
160 160 gzip-v2-level1.hg: size=475
161 161
162 162 $ cd ..
163 163
164 164 #if zstd
165 165
166 166 $ for t in "zstd" "zstd-v2"; do
167 167 > testbundle $t
168 168 > done
169 169 % test bundle type zstd
170 170 searching for changes
171 171 1 changesets found
172 172 HG20\x00\x00 (esc)
173 173 Stream params: {Compression: ZS}
174 174 changegroup -- {nbchanges: 1, version: 02} (mandatory: True)
175 175 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
176 176 cache:rev-branch-cache -- {} (mandatory: False)
177 177 zstd-v2
178 178
179 179 % test bundle type zstd-v2
180 180 searching for changes
181 181 1 changesets found
182 182 HG20\x00\x00 (esc)
183 183 Stream params: {Compression: ZS}
184 184 changegroup -- {nbchanges: 1, version: 02} (mandatory: True)
185 185 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
186 186 cache:rev-branch-cache -- {} (mandatory: False)
187 187 zstd-v2
188 188
189 189
190 190 Explicit request for zstd on non-generaldelta repos
191 191
192 192 $ hg --config format.usegeneraldelta=false init nogd
193 193 $ hg -q -R nogd pull t1
194 194 $ hg -R nogd bundle -a -t zstd nogd-zstd
195 195 1 changesets found
196 196
197 197 zstd-v1 always fails
198 198
199 199 $ hg -R tzstd bundle -a -t zstd-v1 zstd-v1
200 200 abort: compression engine zstd is not supported on v1 bundles
201 201 (see 'hg help bundlespec' for supported values for --type)
202 202 [10]
203 203
204 204 zstd supports threading
205 205
206 206 $ hg init test-compthreads
207 207 $ cd test-compthreads
208 208 $ hg debugbuilddag +3
209 209 $ hg --config experimental.bundlecompthreads=1 bundle -a -t zstd-v2 zstd-v2-threaded.hg
210 210 3 changesets found
211 211 $ cd ..
212 212
213 213 #else
214 214
215 215 zstd is a valid engine but isn't available
216 216
217 217 $ hg -R t1 bundle -a -t zstd irrelevant.hg
218 218 abort: compression engine zstd could not be loaded
219 219 [255]
220 220
221 221 #endif
222 222
223 223 test garbage file
224 224
225 225 $ echo garbage > bgarbage
226 226 $ hg init tgarbage
227 227 $ cd tgarbage
228 228 $ hg pull ../bgarbage
229 229 pulling from ../bgarbage
230 230 abort: ../bgarbage: not a Mercurial bundle
231 231 [255]
232 232 $ cd ..
233 233
234 234 test invalid bundle type
235 235
236 236 $ cd t1
237 237 $ hg bundle -a -t garbage ../bgarbage
238 238 abort: garbage is not a recognized bundle specification
239 239 (see 'hg help bundlespec' for supported values for --type)
240 240 [10]
241 241 $ cd ..
242 242
243 243 Test controlling the changegroup version
244 244
245 245 $ hg -R t1 bundle --config experimental.changegroup3=yes -a -t v2 ./v2-cg-default.hg
246 246 1 changesets found
247 247 $ hg debugbundle ./v2-cg-default.hg --part-type changegroup
248 248 Stream params: {Compression: BZ}
249 249 changegroup -- {nbchanges: 1, version: 02} (mandatory: True)
250 250 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
251 251 $ hg debugbundle ./v2-cg-default.hg --spec
252 252 bzip2-v2
253 253 $ hg -R t1 bundle --config experimental.changegroup3=yes -a -t 'v2;cg.version=02' ./v2-cg-02.hg
254 254 1 changesets found
255 255 $ hg debugbundle ./v2-cg-02.hg --part-type changegroup
256 256 Stream params: {Compression: BZ}
257 257 changegroup -- {nbchanges: 1, version: 02} (mandatory: True)
258 258 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
259 259 $ hg debugbundle ./v2-cg-02.hg --spec
260 260 bzip2-v2
261 261 $ hg -R t1 bundle --config experimental.changegroup3=yes -a -t 'v2;cg.version=03' ./v2-cg-03.hg
262 262 1 changesets found
263 263 $ hg debugbundle ./v2-cg-03.hg --part-type changegroup
264 264 Stream params: {Compression: BZ}
265 265 changegroup -- {nbchanges: 1, version: 03} (mandatory: True)
266 266 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
267 267 $ hg debugbundle ./v2-cg-03.hg --spec
268 abort: changegroup version 03 does not have a known bundlespec (known-bad-output !)
269 (try upgrading your Mercurial client) (known-bad-output !)
270 [255]
268 bzip2-v2;cg.version=03
General Comments 0
You need to be logged in to leave comments. Login now