##// END OF EJS Templates
bundle2: print "error:abort" message to stderr instead of stdout...
Martin von Zweigbergk -
r47592:db9e33be default
parent child Browse files
Show More
@@ -1,2756 +1,2756 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import weakref
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 hex,
16 16 nullid,
17 17 nullrev,
18 18 )
19 19 from . import (
20 20 bookmarks as bookmod,
21 21 bundle2,
22 22 bundlecaches,
23 23 changegroup,
24 24 discovery,
25 25 error,
26 26 exchangev2,
27 27 lock as lockmod,
28 28 logexchange,
29 29 narrowspec,
30 30 obsolete,
31 31 obsutil,
32 32 phases,
33 33 pushkey,
34 34 pycompat,
35 35 requirements,
36 36 scmutil,
37 37 streamclone,
38 38 url as urlmod,
39 39 util,
40 40 wireprototypes,
41 41 )
42 42 from .utils import (
43 43 hashutil,
44 44 stringutil,
45 45 )
46 46
47 47 urlerr = util.urlerr
48 48 urlreq = util.urlreq
49 49
50 50 _NARROWACL_SECTION = b'narrowacl'
51 51
52 52
53 53 def readbundle(ui, fh, fname, vfs=None):
54 54 header = changegroup.readexactly(fh, 4)
55 55
56 56 alg = None
57 57 if not fname:
58 58 fname = b"stream"
59 59 if not header.startswith(b'HG') and header.startswith(b'\0'):
60 60 fh = changegroup.headerlessfixup(fh, header)
61 61 header = b"HG10"
62 62 alg = b'UN'
63 63 elif vfs:
64 64 fname = vfs.join(fname)
65 65
66 66 magic, version = header[0:2], header[2:4]
67 67
68 68 if magic != b'HG':
69 69 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
70 70 if version == b'10':
71 71 if alg is None:
72 72 alg = changegroup.readexactly(fh, 2)
73 73 return changegroup.cg1unpacker(fh, alg)
74 74 elif version.startswith(b'2'):
75 75 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
76 76 elif version == b'S1':
77 77 return streamclone.streamcloneapplier(fh)
78 78 else:
79 79 raise error.Abort(
80 80 _(b'%s: unknown bundle version %s') % (fname, version)
81 81 )
82 82
83 83
84 84 def getbundlespec(ui, fh):
85 85 """Infer the bundlespec from a bundle file handle.
86 86
87 87 The input file handle is seeked and the original seek position is not
88 88 restored.
89 89 """
90 90
91 91 def speccompression(alg):
92 92 try:
93 93 return util.compengines.forbundletype(alg).bundletype()[0]
94 94 except KeyError:
95 95 return None
96 96
97 97 b = readbundle(ui, fh, None)
98 98 if isinstance(b, changegroup.cg1unpacker):
99 99 alg = b._type
100 100 if alg == b'_truncatedBZ':
101 101 alg = b'BZ'
102 102 comp = speccompression(alg)
103 103 if not comp:
104 104 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
105 105 return b'%s-v1' % comp
106 106 elif isinstance(b, bundle2.unbundle20):
107 107 if b'Compression' in b.params:
108 108 comp = speccompression(b.params[b'Compression'])
109 109 if not comp:
110 110 raise error.Abort(
111 111 _(b'unknown compression algorithm: %s') % comp
112 112 )
113 113 else:
114 114 comp = b'none'
115 115
116 116 version = None
117 117 for part in b.iterparts():
118 118 if part.type == b'changegroup':
119 119 version = part.params[b'version']
120 120 if version in (b'01', b'02'):
121 121 version = b'v2'
122 122 else:
123 123 raise error.Abort(
124 124 _(
125 125 b'changegroup version %s does not have '
126 126 b'a known bundlespec'
127 127 )
128 128 % version,
129 129 hint=_(b'try upgrading your Mercurial client'),
130 130 )
131 131 elif part.type == b'stream2' and version is None:
132 132 # A stream2 part requires to be part of a v2 bundle
133 133 requirements = urlreq.unquote(part.params[b'requirements'])
134 134 splitted = requirements.split()
135 135 params = bundle2._formatrequirementsparams(splitted)
136 136 return b'none-v2;stream=v2;%s' % params
137 137
138 138 if not version:
139 139 raise error.Abort(
140 140 _(b'could not identify changegroup version in bundle')
141 141 )
142 142
143 143 return b'%s-%s' % (comp, version)
144 144 elif isinstance(b, streamclone.streamcloneapplier):
145 145 requirements = streamclone.readbundle1header(fh)[2]
146 146 formatted = bundle2._formatrequirementsparams(requirements)
147 147 return b'none-packed1;%s' % formatted
148 148 else:
149 149 raise error.Abort(_(b'unknown bundle type: %s') % b)
150 150
151 151
152 152 def _computeoutgoing(repo, heads, common):
153 153 """Computes which revs are outgoing given a set of common
154 154 and a set of heads.
155 155
156 156 This is a separate function so extensions can have access to
157 157 the logic.
158 158
159 159 Returns a discovery.outgoing object.
160 160 """
161 161 cl = repo.changelog
162 162 if common:
163 163 hasnode = cl.hasnode
164 164 common = [n for n in common if hasnode(n)]
165 165 else:
166 166 common = [nullid]
167 167 if not heads:
168 168 heads = cl.heads()
169 169 return discovery.outgoing(repo, common, heads)
170 170
171 171
172 172 def _checkpublish(pushop):
173 173 repo = pushop.repo
174 174 ui = repo.ui
175 175 behavior = ui.config(b'experimental', b'auto-publish')
176 176 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
177 177 return
178 178 remotephases = listkeys(pushop.remote, b'phases')
179 179 if not remotephases.get(b'publishing', False):
180 180 return
181 181
182 182 if pushop.revs is None:
183 183 published = repo.filtered(b'served').revs(b'not public()')
184 184 else:
185 185 published = repo.revs(b'::%ln - public()', pushop.revs)
186 186 if published:
187 187 if behavior == b'warn':
188 188 ui.warn(
189 189 _(b'%i changesets about to be published\n') % len(published)
190 190 )
191 191 elif behavior == b'confirm':
192 192 if ui.promptchoice(
193 193 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
194 194 % len(published)
195 195 ):
196 196 raise error.CanceledError(_(b'user quit'))
197 197 elif behavior == b'abort':
198 198 msg = _(b'push would publish %i changesets') % len(published)
199 199 hint = _(
200 200 b"use --publish or adjust 'experimental.auto-publish'"
201 201 b" config"
202 202 )
203 203 raise error.Abort(msg, hint=hint)
204 204
205 205
206 206 def _forcebundle1(op):
207 207 """return true if a pull/push must use bundle1
208 208
209 209 This function is used to allow testing of the older bundle version"""
210 210 ui = op.repo.ui
211 211 # The goal is this config is to allow developer to choose the bundle
212 212 # version used during exchanged. This is especially handy during test.
213 213 # Value is a list of bundle version to be picked from, highest version
214 214 # should be used.
215 215 #
216 216 # developer config: devel.legacy.exchange
217 217 exchange = ui.configlist(b'devel', b'legacy.exchange')
218 218 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
219 219 return forcebundle1 or not op.remote.capable(b'bundle2')
220 220
221 221
222 222 class pushoperation(object):
223 223 """A object that represent a single push operation
224 224
225 225 Its purpose is to carry push related state and very common operations.
226 226
227 227 A new pushoperation should be created at the beginning of each push and
228 228 discarded afterward.
229 229 """
230 230
231 231 def __init__(
232 232 self,
233 233 repo,
234 234 remote,
235 235 force=False,
236 236 revs=None,
237 237 newbranch=False,
238 238 bookmarks=(),
239 239 publish=False,
240 240 pushvars=None,
241 241 ):
242 242 # repo we push from
243 243 self.repo = repo
244 244 self.ui = repo.ui
245 245 # repo we push to
246 246 self.remote = remote
247 247 # force option provided
248 248 self.force = force
249 249 # revs to be pushed (None is "all")
250 250 self.revs = revs
251 251 # bookmark explicitly pushed
252 252 self.bookmarks = bookmarks
253 253 # allow push of new branch
254 254 self.newbranch = newbranch
255 255 # step already performed
256 256 # (used to check what steps have been already performed through bundle2)
257 257 self.stepsdone = set()
258 258 # Integer version of the changegroup push result
259 259 # - None means nothing to push
260 260 # - 0 means HTTP error
261 261 # - 1 means we pushed and remote head count is unchanged *or*
262 262 # we have outgoing changesets but refused to push
263 263 # - other values as described by addchangegroup()
264 264 self.cgresult = None
265 265 # Boolean value for the bookmark push
266 266 self.bkresult = None
267 267 # discover.outgoing object (contains common and outgoing data)
268 268 self.outgoing = None
269 269 # all remote topological heads before the push
270 270 self.remoteheads = None
271 271 # Details of the remote branch pre and post push
272 272 #
273 273 # mapping: {'branch': ([remoteheads],
274 274 # [newheads],
275 275 # [unsyncedheads],
276 276 # [discardedheads])}
277 277 # - branch: the branch name
278 278 # - remoteheads: the list of remote heads known locally
279 279 # None if the branch is new
280 280 # - newheads: the new remote heads (known locally) with outgoing pushed
281 281 # - unsyncedheads: the list of remote heads unknown locally.
282 282 # - discardedheads: the list of remote heads made obsolete by the push
283 283 self.pushbranchmap = None
284 284 # testable as a boolean indicating if any nodes are missing locally.
285 285 self.incoming = None
286 286 # summary of the remote phase situation
287 287 self.remotephases = None
288 288 # phases changes that must be pushed along side the changesets
289 289 self.outdatedphases = None
290 290 # phases changes that must be pushed if changeset push fails
291 291 self.fallbackoutdatedphases = None
292 292 # outgoing obsmarkers
293 293 self.outobsmarkers = set()
294 294 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
295 295 self.outbookmarks = []
296 296 # transaction manager
297 297 self.trmanager = None
298 298 # map { pushkey partid -> callback handling failure}
299 299 # used to handle exception from mandatory pushkey part failure
300 300 self.pkfailcb = {}
301 301 # an iterable of pushvars or None
302 302 self.pushvars = pushvars
303 303 # publish pushed changesets
304 304 self.publish = publish
305 305
306 306 @util.propertycache
307 307 def futureheads(self):
308 308 """future remote heads if the changeset push succeeds"""
309 309 return self.outgoing.ancestorsof
310 310
311 311 @util.propertycache
312 312 def fallbackheads(self):
313 313 """future remote heads if the changeset push fails"""
314 314 if self.revs is None:
315 315 # not target to push, all common are relevant
316 316 return self.outgoing.commonheads
317 317 unfi = self.repo.unfiltered()
318 318 # I want cheads = heads(::ancestorsof and ::commonheads)
319 319 # (ancestorsof is revs with secret changeset filtered out)
320 320 #
321 321 # This can be expressed as:
322 322 # cheads = ( (ancestorsof and ::commonheads)
323 323 # + (commonheads and ::ancestorsof))"
324 324 # )
325 325 #
326 326 # while trying to push we already computed the following:
327 327 # common = (::commonheads)
328 328 # missing = ((commonheads::ancestorsof) - commonheads)
329 329 #
330 330 # We can pick:
331 331 # * ancestorsof part of common (::commonheads)
332 332 common = self.outgoing.common
333 333 rev = self.repo.changelog.index.rev
334 334 cheads = [node for node in self.revs if rev(node) in common]
335 335 # and
336 336 # * commonheads parents on missing
337 337 revset = unfi.set(
338 338 b'%ln and parents(roots(%ln))',
339 339 self.outgoing.commonheads,
340 340 self.outgoing.missing,
341 341 )
342 342 cheads.extend(c.node() for c in revset)
343 343 return cheads
344 344
345 345 @property
346 346 def commonheads(self):
347 347 """set of all common heads after changeset bundle push"""
348 348 if self.cgresult:
349 349 return self.futureheads
350 350 else:
351 351 return self.fallbackheads
352 352
353 353
354 354 # mapping of message used when pushing bookmark
355 355 bookmsgmap = {
356 356 b'update': (
357 357 _(b"updating bookmark %s\n"),
358 358 _(b'updating bookmark %s failed\n'),
359 359 ),
360 360 b'export': (
361 361 _(b"exporting bookmark %s\n"),
362 362 _(b'exporting bookmark %s failed\n'),
363 363 ),
364 364 b'delete': (
365 365 _(b"deleting remote bookmark %s\n"),
366 366 _(b'deleting remote bookmark %s failed\n'),
367 367 ),
368 368 }
369 369
370 370
371 371 def push(
372 372 repo,
373 373 remote,
374 374 force=False,
375 375 revs=None,
376 376 newbranch=False,
377 377 bookmarks=(),
378 378 publish=False,
379 379 opargs=None,
380 380 ):
381 381 """Push outgoing changesets (limited by revs) from a local
382 382 repository to remote. Return an integer:
383 383 - None means nothing to push
384 384 - 0 means HTTP error
385 385 - 1 means we pushed and remote head count is unchanged *or*
386 386 we have outgoing changesets but refused to push
387 387 - other values as described by addchangegroup()
388 388 """
389 389 if opargs is None:
390 390 opargs = {}
391 391 pushop = pushoperation(
392 392 repo,
393 393 remote,
394 394 force,
395 395 revs,
396 396 newbranch,
397 397 bookmarks,
398 398 publish,
399 399 **pycompat.strkwargs(opargs)
400 400 )
401 401 if pushop.remote.local():
402 402 missing = (
403 403 set(pushop.repo.requirements) - pushop.remote.local().supported
404 404 )
405 405 if missing:
406 406 msg = _(
407 407 b"required features are not"
408 408 b" supported in the destination:"
409 409 b" %s"
410 410 ) % (b', '.join(sorted(missing)))
411 411 raise error.Abort(msg)
412 412
413 413 if not pushop.remote.canpush():
414 414 raise error.Abort(_(b"destination does not support push"))
415 415
416 416 if not pushop.remote.capable(b'unbundle'):
417 417 raise error.Abort(
418 418 _(
419 419 b'cannot push: destination does not support the '
420 420 b'unbundle wire protocol command'
421 421 )
422 422 )
423 423
424 424 # get lock as we might write phase data
425 425 wlock = lock = None
426 426 try:
427 427 # bundle2 push may receive a reply bundle touching bookmarks
428 428 # requiring the wlock. Take it now to ensure proper ordering.
429 429 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
430 430 if (
431 431 (not _forcebundle1(pushop))
432 432 and maypushback
433 433 and not bookmod.bookmarksinstore(repo)
434 434 ):
435 435 wlock = pushop.repo.wlock()
436 436 lock = pushop.repo.lock()
437 437 pushop.trmanager = transactionmanager(
438 438 pushop.repo, b'push-response', pushop.remote.url()
439 439 )
440 440 except error.LockUnavailable as err:
441 441 # source repo cannot be locked.
442 442 # We do not abort the push, but just disable the local phase
443 443 # synchronisation.
444 444 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
445 445 err
446 446 )
447 447 pushop.ui.debug(msg)
448 448
449 449 with wlock or util.nullcontextmanager():
450 450 with lock or util.nullcontextmanager():
451 451 with pushop.trmanager or util.nullcontextmanager():
452 452 pushop.repo.checkpush(pushop)
453 453 _checkpublish(pushop)
454 454 _pushdiscovery(pushop)
455 455 if not pushop.force:
456 456 _checksubrepostate(pushop)
457 457 if not _forcebundle1(pushop):
458 458 _pushbundle2(pushop)
459 459 _pushchangeset(pushop)
460 460 _pushsyncphase(pushop)
461 461 _pushobsolete(pushop)
462 462 _pushbookmark(pushop)
463 463
464 464 if repo.ui.configbool(b'experimental', b'remotenames'):
465 465 logexchange.pullremotenames(repo, remote)
466 466
467 467 return pushop
468 468
469 469
470 470 # list of steps to perform discovery before push
471 471 pushdiscoveryorder = []
472 472
473 473 # Mapping between step name and function
474 474 #
475 475 # This exists to help extensions wrap steps if necessary
476 476 pushdiscoverymapping = {}
477 477
478 478
479 479 def pushdiscovery(stepname):
480 480 """decorator for function performing discovery before push
481 481
482 482 The function is added to the step -> function mapping and appended to the
483 483 list of steps. Beware that decorated function will be added in order (this
484 484 may matter).
485 485
486 486 You can only use this decorator for a new step, if you want to wrap a step
487 487 from an extension, change the pushdiscovery dictionary directly."""
488 488
489 489 def dec(func):
490 490 assert stepname not in pushdiscoverymapping
491 491 pushdiscoverymapping[stepname] = func
492 492 pushdiscoveryorder.append(stepname)
493 493 return func
494 494
495 495 return dec
496 496
497 497
498 498 def _pushdiscovery(pushop):
499 499 """Run all discovery steps"""
500 500 for stepname in pushdiscoveryorder:
501 501 step = pushdiscoverymapping[stepname]
502 502 step(pushop)
503 503
504 504
505 505 def _checksubrepostate(pushop):
506 506 """Ensure all outgoing referenced subrepo revisions are present locally"""
507 507 for n in pushop.outgoing.missing:
508 508 ctx = pushop.repo[n]
509 509
510 510 if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files():
511 511 for subpath in sorted(ctx.substate):
512 512 sub = ctx.sub(subpath)
513 513 sub.verify(onpush=True)
514 514
515 515
516 516 @pushdiscovery(b'changeset')
517 517 def _pushdiscoverychangeset(pushop):
518 518 """discover the changeset that need to be pushed"""
519 519 fci = discovery.findcommonincoming
520 520 if pushop.revs:
521 521 commoninc = fci(
522 522 pushop.repo,
523 523 pushop.remote,
524 524 force=pushop.force,
525 525 ancestorsof=pushop.revs,
526 526 )
527 527 else:
528 528 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
529 529 common, inc, remoteheads = commoninc
530 530 fco = discovery.findcommonoutgoing
531 531 outgoing = fco(
532 532 pushop.repo,
533 533 pushop.remote,
534 534 onlyheads=pushop.revs,
535 535 commoninc=commoninc,
536 536 force=pushop.force,
537 537 )
538 538 pushop.outgoing = outgoing
539 539 pushop.remoteheads = remoteheads
540 540 pushop.incoming = inc
541 541
542 542
543 543 @pushdiscovery(b'phase')
544 544 def _pushdiscoveryphase(pushop):
545 545 """discover the phase that needs to be pushed
546 546
547 547 (computed for both success and failure case for changesets push)"""
548 548 outgoing = pushop.outgoing
549 549 unfi = pushop.repo.unfiltered()
550 550 remotephases = listkeys(pushop.remote, b'phases')
551 551
552 552 if (
553 553 pushop.ui.configbool(b'ui', b'_usedassubrepo')
554 554 and remotephases # server supports phases
555 555 and not pushop.outgoing.missing # no changesets to be pushed
556 556 and remotephases.get(b'publishing', False)
557 557 ):
558 558 # When:
559 559 # - this is a subrepo push
560 560 # - and remote support phase
561 561 # - and no changeset are to be pushed
562 562 # - and remote is publishing
563 563 # We may be in issue 3781 case!
564 564 # We drop the possible phase synchronisation done by
565 565 # courtesy to publish changesets possibly locally draft
566 566 # on the remote.
567 567 pushop.outdatedphases = []
568 568 pushop.fallbackoutdatedphases = []
569 569 return
570 570
571 571 pushop.remotephases = phases.remotephasessummary(
572 572 pushop.repo, pushop.fallbackheads, remotephases
573 573 )
574 574 droots = pushop.remotephases.draftroots
575 575
576 576 extracond = b''
577 577 if not pushop.remotephases.publishing:
578 578 extracond = b' and public()'
579 579 revset = b'heads((%%ln::%%ln) %s)' % extracond
580 580 # Get the list of all revs draft on remote by public here.
581 581 # XXX Beware that revset break if droots is not strictly
582 582 # XXX root we may want to ensure it is but it is costly
583 583 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
584 584 if not pushop.remotephases.publishing and pushop.publish:
585 585 future = list(
586 586 unfi.set(
587 587 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
588 588 )
589 589 )
590 590 elif not outgoing.missing:
591 591 future = fallback
592 592 else:
593 593 # adds changeset we are going to push as draft
594 594 #
595 595 # should not be necessary for publishing server, but because of an
596 596 # issue fixed in xxxxx we have to do it anyway.
597 597 fdroots = list(
598 598 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
599 599 )
600 600 fdroots = [f.node() for f in fdroots]
601 601 future = list(unfi.set(revset, fdroots, pushop.futureheads))
602 602 pushop.outdatedphases = future
603 603 pushop.fallbackoutdatedphases = fallback
604 604
605 605
606 606 @pushdiscovery(b'obsmarker')
607 607 def _pushdiscoveryobsmarkers(pushop):
608 608 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
609 609 return
610 610
611 611 if not pushop.repo.obsstore:
612 612 return
613 613
614 614 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
615 615 return
616 616
617 617 repo = pushop.repo
618 618 # very naive computation, that can be quite expensive on big repo.
619 619 # However: evolution is currently slow on them anyway.
620 620 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
621 621 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
622 622
623 623
624 624 @pushdiscovery(b'bookmarks')
625 625 def _pushdiscoverybookmarks(pushop):
626 626 ui = pushop.ui
627 627 repo = pushop.repo.unfiltered()
628 628 remote = pushop.remote
629 629 ui.debug(b"checking for updated bookmarks\n")
630 630 ancestors = ()
631 631 if pushop.revs:
632 632 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
633 633 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
634 634
635 635 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
636 636
637 637 explicit = {
638 638 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
639 639 }
640 640
641 641 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
642 642 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
643 643
644 644
645 645 def _processcompared(pushop, pushed, explicit, remotebms, comp):
646 646 """take decision on bookmarks to push to the remote repo
647 647
648 648 Exists to help extensions alter this behavior.
649 649 """
650 650 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
651 651
652 652 repo = pushop.repo
653 653
654 654 for b, scid, dcid in advsrc:
655 655 if b in explicit:
656 656 explicit.remove(b)
657 657 if not pushed or repo[scid].rev() in pushed:
658 658 pushop.outbookmarks.append((b, dcid, scid))
659 659 # search added bookmark
660 660 for b, scid, dcid in addsrc:
661 661 if b in explicit:
662 662 explicit.remove(b)
663 663 if bookmod.isdivergent(b):
664 664 pushop.ui.warn(_(b'cannot push divergent bookmark %s!\n') % b)
665 665 pushop.bkresult = 2
666 666 else:
667 667 pushop.outbookmarks.append((b, b'', scid))
668 668 # search for overwritten bookmark
669 669 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
670 670 if b in explicit:
671 671 explicit.remove(b)
672 672 pushop.outbookmarks.append((b, dcid, scid))
673 673 # search for bookmark to delete
674 674 for b, scid, dcid in adddst:
675 675 if b in explicit:
676 676 explicit.remove(b)
677 677 # treat as "deleted locally"
678 678 pushop.outbookmarks.append((b, dcid, b''))
679 679 # identical bookmarks shouldn't get reported
680 680 for b, scid, dcid in same:
681 681 if b in explicit:
682 682 explicit.remove(b)
683 683
684 684 if explicit:
685 685 explicit = sorted(explicit)
686 686 # we should probably list all of them
687 687 pushop.ui.warn(
688 688 _(
689 689 b'bookmark %s does not exist on the local '
690 690 b'or remote repository!\n'
691 691 )
692 692 % explicit[0]
693 693 )
694 694 pushop.bkresult = 2
695 695
696 696 pushop.outbookmarks.sort()
697 697
698 698
699 699 def _pushcheckoutgoing(pushop):
700 700 outgoing = pushop.outgoing
701 701 unfi = pushop.repo.unfiltered()
702 702 if not outgoing.missing:
703 703 # nothing to push
704 704 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
705 705 return False
706 706 # something to push
707 707 if not pushop.force:
708 708 # if repo.obsstore == False --> no obsolete
709 709 # then, save the iteration
710 710 if unfi.obsstore:
711 711 # this message are here for 80 char limit reason
712 712 mso = _(b"push includes obsolete changeset: %s!")
713 713 mspd = _(b"push includes phase-divergent changeset: %s!")
714 714 mscd = _(b"push includes content-divergent changeset: %s!")
715 715 mst = {
716 716 b"orphan": _(b"push includes orphan changeset: %s!"),
717 717 b"phase-divergent": mspd,
718 718 b"content-divergent": mscd,
719 719 }
720 720 # If we are to push if there is at least one
721 721 # obsolete or unstable changeset in missing, at
722 722 # least one of the missinghead will be obsolete or
723 723 # unstable. So checking heads only is ok
724 724 for node in outgoing.ancestorsof:
725 725 ctx = unfi[node]
726 726 if ctx.obsolete():
727 727 raise error.Abort(mso % ctx)
728 728 elif ctx.isunstable():
729 729 # TODO print more than one instability in the abort
730 730 # message
731 731 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
732 732
733 733 discovery.checkheads(pushop)
734 734 return True
735 735
736 736
737 737 # List of names of steps to perform for an outgoing bundle2, order matters.
738 738 b2partsgenorder = []
739 739
740 740 # Mapping between step name and function
741 741 #
742 742 # This exists to help extensions wrap steps if necessary
743 743 b2partsgenmapping = {}
744 744
745 745
746 746 def b2partsgenerator(stepname, idx=None):
747 747 """decorator for function generating bundle2 part
748 748
749 749 The function is added to the step -> function mapping and appended to the
750 750 list of steps. Beware that decorated functions will be added in order
751 751 (this may matter).
752 752
753 753 You can only use this decorator for new steps, if you want to wrap a step
754 754 from an extension, attack the b2partsgenmapping dictionary directly."""
755 755
756 756 def dec(func):
757 757 assert stepname not in b2partsgenmapping
758 758 b2partsgenmapping[stepname] = func
759 759 if idx is None:
760 760 b2partsgenorder.append(stepname)
761 761 else:
762 762 b2partsgenorder.insert(idx, stepname)
763 763 return func
764 764
765 765 return dec
766 766
767 767
768 768 def _pushb2ctxcheckheads(pushop, bundler):
769 769 """Generate race condition checking parts
770 770
771 771 Exists as an independent function to aid extensions
772 772 """
773 773 # * 'force' do not check for push race,
774 774 # * if we don't push anything, there are nothing to check.
775 775 if not pushop.force and pushop.outgoing.ancestorsof:
776 776 allowunrelated = b'related' in bundler.capabilities.get(
777 777 b'checkheads', ()
778 778 )
779 779 emptyremote = pushop.pushbranchmap is None
780 780 if not allowunrelated or emptyremote:
781 781 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
782 782 else:
783 783 affected = set()
784 784 for branch, heads in pycompat.iteritems(pushop.pushbranchmap):
785 785 remoteheads, newheads, unsyncedheads, discardedheads = heads
786 786 if remoteheads is not None:
787 787 remote = set(remoteheads)
788 788 affected |= set(discardedheads) & remote
789 789 affected |= remote - set(newheads)
790 790 if affected:
791 791 data = iter(sorted(affected))
792 792 bundler.newpart(b'check:updated-heads', data=data)
793 793
794 794
795 795 def _pushing(pushop):
796 796 """return True if we are pushing anything"""
797 797 return bool(
798 798 pushop.outgoing.missing
799 799 or pushop.outdatedphases
800 800 or pushop.outobsmarkers
801 801 or pushop.outbookmarks
802 802 )
803 803
804 804
805 805 @b2partsgenerator(b'check-bookmarks')
806 806 def _pushb2checkbookmarks(pushop, bundler):
807 807 """insert bookmark move checking"""
808 808 if not _pushing(pushop) or pushop.force:
809 809 return
810 810 b2caps = bundle2.bundle2caps(pushop.remote)
811 811 hasbookmarkcheck = b'bookmarks' in b2caps
812 812 if not (pushop.outbookmarks and hasbookmarkcheck):
813 813 return
814 814 data = []
815 815 for book, old, new in pushop.outbookmarks:
816 816 data.append((book, old))
817 817 checkdata = bookmod.binaryencode(data)
818 818 bundler.newpart(b'check:bookmarks', data=checkdata)
819 819
820 820
821 821 @b2partsgenerator(b'check-phases')
822 822 def _pushb2checkphases(pushop, bundler):
823 823 """insert phase move checking"""
824 824 if not _pushing(pushop) or pushop.force:
825 825 return
826 826 b2caps = bundle2.bundle2caps(pushop.remote)
827 827 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
828 828 if pushop.remotephases is not None and hasphaseheads:
829 829 # check that the remote phase has not changed
830 830 checks = {p: [] for p in phases.allphases}
831 831 checks[phases.public].extend(pushop.remotephases.publicheads)
832 832 checks[phases.draft].extend(pushop.remotephases.draftroots)
833 833 if any(pycompat.itervalues(checks)):
834 834 for phase in checks:
835 835 checks[phase].sort()
836 836 checkdata = phases.binaryencode(checks)
837 837 bundler.newpart(b'check:phases', data=checkdata)
838 838
839 839
840 840 @b2partsgenerator(b'changeset')
841 841 def _pushb2ctx(pushop, bundler):
842 842 """handle changegroup push through bundle2
843 843
844 844 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
845 845 """
846 846 if b'changesets' in pushop.stepsdone:
847 847 return
848 848 pushop.stepsdone.add(b'changesets')
849 849 # Send known heads to the server for race detection.
850 850 if not _pushcheckoutgoing(pushop):
851 851 return
852 852 pushop.repo.prepushoutgoinghooks(pushop)
853 853
854 854 _pushb2ctxcheckheads(pushop, bundler)
855 855
856 856 b2caps = bundle2.bundle2caps(pushop.remote)
857 857 version = b'01'
858 858 cgversions = b2caps.get(b'changegroup')
859 859 if cgversions: # 3.1 and 3.2 ship with an empty value
860 860 cgversions = [
861 861 v
862 862 for v in cgversions
863 863 if v in changegroup.supportedoutgoingversions(pushop.repo)
864 864 ]
865 865 if not cgversions:
866 866 raise error.Abort(_(b'no common changegroup version'))
867 867 version = max(cgversions)
868 868 cgstream = changegroup.makestream(
869 869 pushop.repo, pushop.outgoing, version, b'push'
870 870 )
871 871 cgpart = bundler.newpart(b'changegroup', data=cgstream)
872 872 if cgversions:
873 873 cgpart.addparam(b'version', version)
874 874 if scmutil.istreemanifest(pushop.repo):
875 875 cgpart.addparam(b'treemanifest', b'1')
876 876 if b'exp-sidedata-flag' in pushop.repo.requirements:
877 877 cgpart.addparam(b'exp-sidedata', b'1')
878 878
879 879 def handlereply(op):
880 880 """extract addchangegroup returns from server reply"""
881 881 cgreplies = op.records.getreplies(cgpart.id)
882 882 assert len(cgreplies[b'changegroup']) == 1
883 883 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
884 884
885 885 return handlereply
886 886
887 887
888 888 @b2partsgenerator(b'phase')
889 889 def _pushb2phases(pushop, bundler):
890 890 """handle phase push through bundle2"""
891 891 if b'phases' in pushop.stepsdone:
892 892 return
893 893 b2caps = bundle2.bundle2caps(pushop.remote)
894 894 ui = pushop.repo.ui
895 895
896 896 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
897 897 haspushkey = b'pushkey' in b2caps
898 898 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
899 899
900 900 if hasphaseheads and not legacyphase:
901 901 return _pushb2phaseheads(pushop, bundler)
902 902 elif haspushkey:
903 903 return _pushb2phasespushkey(pushop, bundler)
904 904
905 905
906 906 def _pushb2phaseheads(pushop, bundler):
907 907 """push phase information through a bundle2 - binary part"""
908 908 pushop.stepsdone.add(b'phases')
909 909 if pushop.outdatedphases:
910 910 updates = {p: [] for p in phases.allphases}
911 911 updates[0].extend(h.node() for h in pushop.outdatedphases)
912 912 phasedata = phases.binaryencode(updates)
913 913 bundler.newpart(b'phase-heads', data=phasedata)
914 914
915 915
916 916 def _pushb2phasespushkey(pushop, bundler):
917 917 """push phase information through a bundle2 - pushkey part"""
918 918 pushop.stepsdone.add(b'phases')
919 919 part2node = []
920 920
921 921 def handlefailure(pushop, exc):
922 922 targetid = int(exc.partid)
923 923 for partid, node in part2node:
924 924 if partid == targetid:
925 925 raise error.Abort(_(b'updating %s to public failed') % node)
926 926
927 927 enc = pushkey.encode
928 928 for newremotehead in pushop.outdatedphases:
929 929 part = bundler.newpart(b'pushkey')
930 930 part.addparam(b'namespace', enc(b'phases'))
931 931 part.addparam(b'key', enc(newremotehead.hex()))
932 932 part.addparam(b'old', enc(b'%d' % phases.draft))
933 933 part.addparam(b'new', enc(b'%d' % phases.public))
934 934 part2node.append((part.id, newremotehead))
935 935 pushop.pkfailcb[part.id] = handlefailure
936 936
937 937 def handlereply(op):
938 938 for partid, node in part2node:
939 939 partrep = op.records.getreplies(partid)
940 940 results = partrep[b'pushkey']
941 941 assert len(results) <= 1
942 942 msg = None
943 943 if not results:
944 944 msg = _(b'server ignored update of %s to public!\n') % node
945 945 elif not int(results[0][b'return']):
946 946 msg = _(b'updating %s to public failed!\n') % node
947 947 if msg is not None:
948 948 pushop.ui.warn(msg)
949 949
950 950 return handlereply
951 951
952 952
953 953 @b2partsgenerator(b'obsmarkers')
954 954 def _pushb2obsmarkers(pushop, bundler):
955 955 if b'obsmarkers' in pushop.stepsdone:
956 956 return
957 957 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
958 958 if obsolete.commonversion(remoteversions) is None:
959 959 return
960 960 pushop.stepsdone.add(b'obsmarkers')
961 961 if pushop.outobsmarkers:
962 962 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
963 963 bundle2.buildobsmarkerspart(bundler, markers)
964 964
965 965
966 966 @b2partsgenerator(b'bookmarks')
967 967 def _pushb2bookmarks(pushop, bundler):
968 968 """handle bookmark push through bundle2"""
969 969 if b'bookmarks' in pushop.stepsdone:
970 970 return
971 971 b2caps = bundle2.bundle2caps(pushop.remote)
972 972
973 973 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
974 974 legacybooks = b'bookmarks' in legacy
975 975
976 976 if not legacybooks and b'bookmarks' in b2caps:
977 977 return _pushb2bookmarkspart(pushop, bundler)
978 978 elif b'pushkey' in b2caps:
979 979 return _pushb2bookmarkspushkey(pushop, bundler)
980 980
981 981
982 982 def _bmaction(old, new):
983 983 """small utility for bookmark pushing"""
984 984 if not old:
985 985 return b'export'
986 986 elif not new:
987 987 return b'delete'
988 988 return b'update'
989 989
990 990
991 991 def _abortonsecretctx(pushop, node, b):
992 992 """abort if a given bookmark points to a secret changeset"""
993 993 if node and pushop.repo[node].phase() == phases.secret:
994 994 raise error.Abort(
995 995 _(b'cannot push bookmark %s as it points to a secret changeset') % b
996 996 )
997 997
998 998
999 999 def _pushb2bookmarkspart(pushop, bundler):
1000 1000 pushop.stepsdone.add(b'bookmarks')
1001 1001 if not pushop.outbookmarks:
1002 1002 return
1003 1003
1004 1004 allactions = []
1005 1005 data = []
1006 1006 for book, old, new in pushop.outbookmarks:
1007 1007 _abortonsecretctx(pushop, new, book)
1008 1008 data.append((book, new))
1009 1009 allactions.append((book, _bmaction(old, new)))
1010 1010 checkdata = bookmod.binaryencode(data)
1011 1011 bundler.newpart(b'bookmarks', data=checkdata)
1012 1012
1013 1013 def handlereply(op):
1014 1014 ui = pushop.ui
1015 1015 # if success
1016 1016 for book, action in allactions:
1017 1017 ui.status(bookmsgmap[action][0] % book)
1018 1018
1019 1019 return handlereply
1020 1020
1021 1021
1022 1022 def _pushb2bookmarkspushkey(pushop, bundler):
1023 1023 pushop.stepsdone.add(b'bookmarks')
1024 1024 part2book = []
1025 1025 enc = pushkey.encode
1026 1026
1027 1027 def handlefailure(pushop, exc):
1028 1028 targetid = int(exc.partid)
1029 1029 for partid, book, action in part2book:
1030 1030 if partid == targetid:
1031 1031 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1032 1032 # we should not be called for part we did not generated
1033 1033 assert False
1034 1034
1035 1035 for book, old, new in pushop.outbookmarks:
1036 1036 _abortonsecretctx(pushop, new, book)
1037 1037 part = bundler.newpart(b'pushkey')
1038 1038 part.addparam(b'namespace', enc(b'bookmarks'))
1039 1039 part.addparam(b'key', enc(book))
1040 1040 part.addparam(b'old', enc(hex(old)))
1041 1041 part.addparam(b'new', enc(hex(new)))
1042 1042 action = b'update'
1043 1043 if not old:
1044 1044 action = b'export'
1045 1045 elif not new:
1046 1046 action = b'delete'
1047 1047 part2book.append((part.id, book, action))
1048 1048 pushop.pkfailcb[part.id] = handlefailure
1049 1049
1050 1050 def handlereply(op):
1051 1051 ui = pushop.ui
1052 1052 for partid, book, action in part2book:
1053 1053 partrep = op.records.getreplies(partid)
1054 1054 results = partrep[b'pushkey']
1055 1055 assert len(results) <= 1
1056 1056 if not results:
1057 1057 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1058 1058 else:
1059 1059 ret = int(results[0][b'return'])
1060 1060 if ret:
1061 1061 ui.status(bookmsgmap[action][0] % book)
1062 1062 else:
1063 1063 ui.warn(bookmsgmap[action][1] % book)
1064 1064 if pushop.bkresult is not None:
1065 1065 pushop.bkresult = 1
1066 1066
1067 1067 return handlereply
1068 1068
1069 1069
1070 1070 @b2partsgenerator(b'pushvars', idx=0)
1071 1071 def _getbundlesendvars(pushop, bundler):
1072 1072 '''send shellvars via bundle2'''
1073 1073 pushvars = pushop.pushvars
1074 1074 if pushvars:
1075 1075 shellvars = {}
1076 1076 for raw in pushvars:
1077 1077 if b'=' not in raw:
1078 1078 msg = (
1079 1079 b"unable to parse variable '%s', should follow "
1080 1080 b"'KEY=VALUE' or 'KEY=' format"
1081 1081 )
1082 1082 raise error.Abort(msg % raw)
1083 1083 k, v = raw.split(b'=', 1)
1084 1084 shellvars[k] = v
1085 1085
1086 1086 part = bundler.newpart(b'pushvars')
1087 1087
1088 1088 for key, value in pycompat.iteritems(shellvars):
1089 1089 part.addparam(key, value, mandatory=False)
1090 1090
1091 1091
1092 1092 def _pushbundle2(pushop):
1093 1093 """push data to the remote using bundle2
1094 1094
1095 1095 The only currently supported type of data is changegroup but this will
1096 1096 evolve in the future."""
1097 1097 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1098 1098 pushback = pushop.trmanager and pushop.ui.configbool(
1099 1099 b'experimental', b'bundle2.pushback'
1100 1100 )
1101 1101
1102 1102 # create reply capability
1103 1103 capsblob = bundle2.encodecaps(
1104 1104 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1105 1105 )
1106 1106 bundler.newpart(b'replycaps', data=capsblob)
1107 1107 replyhandlers = []
1108 1108 for partgenname in b2partsgenorder:
1109 1109 partgen = b2partsgenmapping[partgenname]
1110 1110 ret = partgen(pushop, bundler)
1111 1111 if callable(ret):
1112 1112 replyhandlers.append(ret)
1113 1113 # do not push if nothing to push
1114 1114 if bundler.nbparts <= 1:
1115 1115 return
1116 1116 stream = util.chunkbuffer(bundler.getchunks())
1117 1117 try:
1118 1118 try:
1119 1119 with pushop.remote.commandexecutor() as e:
1120 1120 reply = e.callcommand(
1121 1121 b'unbundle',
1122 1122 {
1123 1123 b'bundle': stream,
1124 1124 b'heads': [b'force'],
1125 1125 b'url': pushop.remote.url(),
1126 1126 },
1127 1127 ).result()
1128 1128 except error.BundleValueError as exc:
1129 1129 raise error.Abort(_(b'missing support for %s') % exc)
1130 1130 try:
1131 1131 trgetter = None
1132 1132 if pushback:
1133 1133 trgetter = pushop.trmanager.transaction
1134 1134 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1135 1135 except error.BundleValueError as exc:
1136 1136 raise error.Abort(_(b'missing support for %s') % exc)
1137 1137 except bundle2.AbortFromPart as exc:
1138 pushop.ui.status(_(b'remote: %s\n') % exc)
1138 pushop.ui.error(_(b'remote: %s\n') % exc)
1139 1139 if exc.hint is not None:
1140 pushop.ui.status(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1140 pushop.ui.error(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1141 1141 raise error.Abort(_(b'push failed on remote'))
1142 1142 except error.PushkeyFailed as exc:
1143 1143 partid = int(exc.partid)
1144 1144 if partid not in pushop.pkfailcb:
1145 1145 raise
1146 1146 pushop.pkfailcb[partid](pushop, exc)
1147 1147 for rephand in replyhandlers:
1148 1148 rephand(op)
1149 1149
1150 1150
1151 1151 def _pushchangeset(pushop):
1152 1152 """Make the actual push of changeset bundle to remote repo"""
1153 1153 if b'changesets' in pushop.stepsdone:
1154 1154 return
1155 1155 pushop.stepsdone.add(b'changesets')
1156 1156 if not _pushcheckoutgoing(pushop):
1157 1157 return
1158 1158
1159 1159 # Should have verified this in push().
1160 1160 assert pushop.remote.capable(b'unbundle')
1161 1161
1162 1162 pushop.repo.prepushoutgoinghooks(pushop)
1163 1163 outgoing = pushop.outgoing
1164 1164 # TODO: get bundlecaps from remote
1165 1165 bundlecaps = None
1166 1166 # create a changegroup from local
1167 1167 if pushop.revs is None and not (
1168 1168 outgoing.excluded or pushop.repo.changelog.filteredrevs
1169 1169 ):
1170 1170 # push everything,
1171 1171 # use the fast path, no race possible on push
1172 1172 cg = changegroup.makechangegroup(
1173 1173 pushop.repo,
1174 1174 outgoing,
1175 1175 b'01',
1176 1176 b'push',
1177 1177 fastpath=True,
1178 1178 bundlecaps=bundlecaps,
1179 1179 )
1180 1180 else:
1181 1181 cg = changegroup.makechangegroup(
1182 1182 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1183 1183 )
1184 1184
1185 1185 # apply changegroup to remote
1186 1186 # local repo finds heads on server, finds out what
1187 1187 # revs it must push. once revs transferred, if server
1188 1188 # finds it has different heads (someone else won
1189 1189 # commit/push race), server aborts.
1190 1190 if pushop.force:
1191 1191 remoteheads = [b'force']
1192 1192 else:
1193 1193 remoteheads = pushop.remoteheads
1194 1194 # ssh: return remote's addchangegroup()
1195 1195 # http: return remote's addchangegroup() or 0 for error
1196 1196 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1197 1197
1198 1198
1199 1199 def _pushsyncphase(pushop):
1200 1200 """synchronise phase information locally and remotely"""
1201 1201 cheads = pushop.commonheads
1202 1202 # even when we don't push, exchanging phase data is useful
1203 1203 remotephases = listkeys(pushop.remote, b'phases')
1204 1204 if (
1205 1205 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1206 1206 and remotephases # server supports phases
1207 1207 and pushop.cgresult is None # nothing was pushed
1208 1208 and remotephases.get(b'publishing', False)
1209 1209 ):
1210 1210 # When:
1211 1211 # - this is a subrepo push
1212 1212 # - and remote support phase
1213 1213 # - and no changeset was pushed
1214 1214 # - and remote is publishing
1215 1215 # We may be in issue 3871 case!
1216 1216 # We drop the possible phase synchronisation done by
1217 1217 # courtesy to publish changesets possibly locally draft
1218 1218 # on the remote.
1219 1219 remotephases = {b'publishing': b'True'}
1220 1220 if not remotephases: # old server or public only reply from non-publishing
1221 1221 _localphasemove(pushop, cheads)
1222 1222 # don't push any phase data as there is nothing to push
1223 1223 else:
1224 1224 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1225 1225 pheads, droots = ana
1226 1226 ### Apply remote phase on local
1227 1227 if remotephases.get(b'publishing', False):
1228 1228 _localphasemove(pushop, cheads)
1229 1229 else: # publish = False
1230 1230 _localphasemove(pushop, pheads)
1231 1231 _localphasemove(pushop, cheads, phases.draft)
1232 1232 ### Apply local phase on remote
1233 1233
1234 1234 if pushop.cgresult:
1235 1235 if b'phases' in pushop.stepsdone:
1236 1236 # phases already pushed though bundle2
1237 1237 return
1238 1238 outdated = pushop.outdatedphases
1239 1239 else:
1240 1240 outdated = pushop.fallbackoutdatedphases
1241 1241
1242 1242 pushop.stepsdone.add(b'phases')
1243 1243
1244 1244 # filter heads already turned public by the push
1245 1245 outdated = [c for c in outdated if c.node() not in pheads]
1246 1246 # fallback to independent pushkey command
1247 1247 for newremotehead in outdated:
1248 1248 with pushop.remote.commandexecutor() as e:
1249 1249 r = e.callcommand(
1250 1250 b'pushkey',
1251 1251 {
1252 1252 b'namespace': b'phases',
1253 1253 b'key': newremotehead.hex(),
1254 1254 b'old': b'%d' % phases.draft,
1255 1255 b'new': b'%d' % phases.public,
1256 1256 },
1257 1257 ).result()
1258 1258
1259 1259 if not r:
1260 1260 pushop.ui.warn(
1261 1261 _(b'updating %s to public failed!\n') % newremotehead
1262 1262 )
1263 1263
1264 1264
1265 1265 def _localphasemove(pushop, nodes, phase=phases.public):
1266 1266 """move <nodes> to <phase> in the local source repo"""
1267 1267 if pushop.trmanager:
1268 1268 phases.advanceboundary(
1269 1269 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1270 1270 )
1271 1271 else:
1272 1272 # repo is not locked, do not change any phases!
1273 1273 # Informs the user that phases should have been moved when
1274 1274 # applicable.
1275 1275 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1276 1276 phasestr = phases.phasenames[phase]
1277 1277 if actualmoves:
1278 1278 pushop.ui.status(
1279 1279 _(
1280 1280 b'cannot lock source repo, skipping '
1281 1281 b'local %s phase update\n'
1282 1282 )
1283 1283 % phasestr
1284 1284 )
1285 1285
1286 1286
1287 1287 def _pushobsolete(pushop):
1288 1288 """utility function to push obsolete markers to a remote"""
1289 1289 if b'obsmarkers' in pushop.stepsdone:
1290 1290 return
1291 1291 repo = pushop.repo
1292 1292 remote = pushop.remote
1293 1293 pushop.stepsdone.add(b'obsmarkers')
1294 1294 if pushop.outobsmarkers:
1295 1295 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1296 1296 rslts = []
1297 1297 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1298 1298 remotedata = obsolete._pushkeyescape(markers)
1299 1299 for key in sorted(remotedata, reverse=True):
1300 1300 # reverse sort to ensure we end with dump0
1301 1301 data = remotedata[key]
1302 1302 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1303 1303 if [r for r in rslts if not r]:
1304 1304 msg = _(b'failed to push some obsolete markers!\n')
1305 1305 repo.ui.warn(msg)
1306 1306
1307 1307
1308 1308 def _pushbookmark(pushop):
1309 1309 """Update bookmark position on remote"""
1310 1310 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1311 1311 return
1312 1312 pushop.stepsdone.add(b'bookmarks')
1313 1313 ui = pushop.ui
1314 1314 remote = pushop.remote
1315 1315
1316 1316 for b, old, new in pushop.outbookmarks:
1317 1317 action = b'update'
1318 1318 if not old:
1319 1319 action = b'export'
1320 1320 elif not new:
1321 1321 action = b'delete'
1322 1322
1323 1323 with remote.commandexecutor() as e:
1324 1324 r = e.callcommand(
1325 1325 b'pushkey',
1326 1326 {
1327 1327 b'namespace': b'bookmarks',
1328 1328 b'key': b,
1329 1329 b'old': hex(old),
1330 1330 b'new': hex(new),
1331 1331 },
1332 1332 ).result()
1333 1333
1334 1334 if r:
1335 1335 ui.status(bookmsgmap[action][0] % b)
1336 1336 else:
1337 1337 ui.warn(bookmsgmap[action][1] % b)
1338 1338 # discovery can have set the value form invalid entry
1339 1339 if pushop.bkresult is not None:
1340 1340 pushop.bkresult = 1
1341 1341
1342 1342
1343 1343 class pulloperation(object):
1344 1344 """A object that represent a single pull operation
1345 1345
1346 1346 It purpose is to carry pull related state and very common operation.
1347 1347
1348 1348 A new should be created at the beginning of each pull and discarded
1349 1349 afterward.
1350 1350 """
1351 1351
1352 1352 def __init__(
1353 1353 self,
1354 1354 repo,
1355 1355 remote,
1356 1356 heads=None,
1357 1357 force=False,
1358 1358 bookmarks=(),
1359 1359 remotebookmarks=None,
1360 1360 streamclonerequested=None,
1361 1361 includepats=None,
1362 1362 excludepats=None,
1363 1363 depth=None,
1364 1364 ):
1365 1365 # repo we pull into
1366 1366 self.repo = repo
1367 1367 # repo we pull from
1368 1368 self.remote = remote
1369 1369 # revision we try to pull (None is "all")
1370 1370 self.heads = heads
1371 1371 # bookmark pulled explicitly
1372 1372 self.explicitbookmarks = [
1373 1373 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1374 1374 ]
1375 1375 # do we force pull?
1376 1376 self.force = force
1377 1377 # whether a streaming clone was requested
1378 1378 self.streamclonerequested = streamclonerequested
1379 1379 # transaction manager
1380 1380 self.trmanager = None
1381 1381 # set of common changeset between local and remote before pull
1382 1382 self.common = None
1383 1383 # set of pulled head
1384 1384 self.rheads = None
1385 1385 # list of missing changeset to fetch remotely
1386 1386 self.fetch = None
1387 1387 # remote bookmarks data
1388 1388 self.remotebookmarks = remotebookmarks
1389 1389 # result of changegroup pulling (used as return code by pull)
1390 1390 self.cgresult = None
1391 1391 # list of step already done
1392 1392 self.stepsdone = set()
1393 1393 # Whether we attempted a clone from pre-generated bundles.
1394 1394 self.clonebundleattempted = False
1395 1395 # Set of file patterns to include.
1396 1396 self.includepats = includepats
1397 1397 # Set of file patterns to exclude.
1398 1398 self.excludepats = excludepats
1399 1399 # Number of ancestor changesets to pull from each pulled head.
1400 1400 self.depth = depth
1401 1401
1402 1402 @util.propertycache
1403 1403 def pulledsubset(self):
1404 1404 """heads of the set of changeset target by the pull"""
1405 1405 # compute target subset
1406 1406 if self.heads is None:
1407 1407 # We pulled every thing possible
1408 1408 # sync on everything common
1409 1409 c = set(self.common)
1410 1410 ret = list(self.common)
1411 1411 for n in self.rheads:
1412 1412 if n not in c:
1413 1413 ret.append(n)
1414 1414 return ret
1415 1415 else:
1416 1416 # We pulled a specific subset
1417 1417 # sync on this subset
1418 1418 return self.heads
1419 1419
1420 1420 @util.propertycache
1421 1421 def canusebundle2(self):
1422 1422 return not _forcebundle1(self)
1423 1423
1424 1424 @util.propertycache
1425 1425 def remotebundle2caps(self):
1426 1426 return bundle2.bundle2caps(self.remote)
1427 1427
1428 1428 def gettransaction(self):
1429 1429 # deprecated; talk to trmanager directly
1430 1430 return self.trmanager.transaction()
1431 1431
1432 1432
1433 1433 class transactionmanager(util.transactional):
1434 1434 """An object to manage the life cycle of a transaction
1435 1435
1436 1436 It creates the transaction on demand and calls the appropriate hooks when
1437 1437 closing the transaction."""
1438 1438
1439 1439 def __init__(self, repo, source, url):
1440 1440 self.repo = repo
1441 1441 self.source = source
1442 1442 self.url = url
1443 1443 self._tr = None
1444 1444
1445 1445 def transaction(self):
1446 1446 """Return an open transaction object, constructing if necessary"""
1447 1447 if not self._tr:
1448 1448 trname = b'%s\n%s' % (self.source, util.hidepassword(self.url))
1449 1449 self._tr = self.repo.transaction(trname)
1450 1450 self._tr.hookargs[b'source'] = self.source
1451 1451 self._tr.hookargs[b'url'] = self.url
1452 1452 return self._tr
1453 1453
1454 1454 def close(self):
1455 1455 """close transaction if created"""
1456 1456 if self._tr is not None:
1457 1457 self._tr.close()
1458 1458
1459 1459 def release(self):
1460 1460 """release transaction if created"""
1461 1461 if self._tr is not None:
1462 1462 self._tr.release()
1463 1463
1464 1464
1465 1465 def listkeys(remote, namespace):
1466 1466 with remote.commandexecutor() as e:
1467 1467 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1468 1468
1469 1469
1470 1470 def _fullpullbundle2(repo, pullop):
1471 1471 # The server may send a partial reply, i.e. when inlining
1472 1472 # pre-computed bundles. In that case, update the common
1473 1473 # set based on the results and pull another bundle.
1474 1474 #
1475 1475 # There are two indicators that the process is finished:
1476 1476 # - no changeset has been added, or
1477 1477 # - all remote heads are known locally.
1478 1478 # The head check must use the unfiltered view as obsoletion
1479 1479 # markers can hide heads.
1480 1480 unfi = repo.unfiltered()
1481 1481 unficl = unfi.changelog
1482 1482
1483 1483 def headsofdiff(h1, h2):
1484 1484 """Returns heads(h1 % h2)"""
1485 1485 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1486 1486 return {ctx.node() for ctx in res}
1487 1487
1488 1488 def headsofunion(h1, h2):
1489 1489 """Returns heads((h1 + h2) - null)"""
1490 1490 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1491 1491 return {ctx.node() for ctx in res}
1492 1492
1493 1493 while True:
1494 1494 old_heads = unficl.heads()
1495 1495 clstart = len(unficl)
1496 1496 _pullbundle2(pullop)
1497 1497 if requirements.NARROW_REQUIREMENT in repo.requirements:
1498 1498 # XXX narrow clones filter the heads on the server side during
1499 1499 # XXX getbundle and result in partial replies as well.
1500 1500 # XXX Disable pull bundles in this case as band aid to avoid
1501 1501 # XXX extra round trips.
1502 1502 break
1503 1503 if clstart == len(unficl):
1504 1504 break
1505 1505 if all(unficl.hasnode(n) for n in pullop.rheads):
1506 1506 break
1507 1507 new_heads = headsofdiff(unficl.heads(), old_heads)
1508 1508 pullop.common = headsofunion(new_heads, pullop.common)
1509 1509 pullop.rheads = set(pullop.rheads) - pullop.common
1510 1510
1511 1511
1512 1512 def add_confirm_callback(repo, pullop):
1513 1513 """adds a finalize callback to transaction which can be used to show stats
1514 1514 to user and confirm the pull before committing transaction"""
1515 1515
1516 1516 tr = pullop.trmanager.transaction()
1517 1517 scmutil.registersummarycallback(
1518 1518 repo, tr, txnname=b'pull', as_validator=True
1519 1519 )
1520 1520 reporef = weakref.ref(repo.unfiltered())
1521 1521
1522 1522 def prompt(tr):
1523 1523 repo = reporef()
1524 1524 cm = _(b'accept incoming changes (yn)?$$ &Yes $$ &No')
1525 1525 if repo.ui.promptchoice(cm):
1526 1526 raise error.Abort(b"user aborted")
1527 1527
1528 1528 tr.addvalidator(b'900-pull-prompt', prompt)
1529 1529
1530 1530
1531 1531 def pull(
1532 1532 repo,
1533 1533 remote,
1534 1534 heads=None,
1535 1535 force=False,
1536 1536 bookmarks=(),
1537 1537 opargs=None,
1538 1538 streamclonerequested=None,
1539 1539 includepats=None,
1540 1540 excludepats=None,
1541 1541 depth=None,
1542 1542 confirm=None,
1543 1543 ):
1544 1544 """Fetch repository data from a remote.
1545 1545
1546 1546 This is the main function used to retrieve data from a remote repository.
1547 1547
1548 1548 ``repo`` is the local repository to clone into.
1549 1549 ``remote`` is a peer instance.
1550 1550 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1551 1551 default) means to pull everything from the remote.
1552 1552 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1553 1553 default, all remote bookmarks are pulled.
1554 1554 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1555 1555 initialization.
1556 1556 ``streamclonerequested`` is a boolean indicating whether a "streaming
1557 1557 clone" is requested. A "streaming clone" is essentially a raw file copy
1558 1558 of revlogs from the server. This only works when the local repository is
1559 1559 empty. The default value of ``None`` means to respect the server
1560 1560 configuration for preferring stream clones.
1561 1561 ``includepats`` and ``excludepats`` define explicit file patterns to
1562 1562 include and exclude in storage, respectively. If not defined, narrow
1563 1563 patterns from the repo instance are used, if available.
1564 1564 ``depth`` is an integer indicating the DAG depth of history we're
1565 1565 interested in. If defined, for each revision specified in ``heads``, we
1566 1566 will fetch up to this many of its ancestors and data associated with them.
1567 1567 ``confirm`` is a boolean indicating whether the pull should be confirmed
1568 1568 before committing the transaction. This overrides HGPLAIN.
1569 1569
1570 1570 Returns the ``pulloperation`` created for this pull.
1571 1571 """
1572 1572 if opargs is None:
1573 1573 opargs = {}
1574 1574
1575 1575 # We allow the narrow patterns to be passed in explicitly to provide more
1576 1576 # flexibility for API consumers.
1577 1577 if includepats or excludepats:
1578 1578 includepats = includepats or set()
1579 1579 excludepats = excludepats or set()
1580 1580 else:
1581 1581 includepats, excludepats = repo.narrowpats
1582 1582
1583 1583 narrowspec.validatepatterns(includepats)
1584 1584 narrowspec.validatepatterns(excludepats)
1585 1585
1586 1586 pullop = pulloperation(
1587 1587 repo,
1588 1588 remote,
1589 1589 heads,
1590 1590 force,
1591 1591 bookmarks=bookmarks,
1592 1592 streamclonerequested=streamclonerequested,
1593 1593 includepats=includepats,
1594 1594 excludepats=excludepats,
1595 1595 depth=depth,
1596 1596 **pycompat.strkwargs(opargs)
1597 1597 )
1598 1598
1599 1599 peerlocal = pullop.remote.local()
1600 1600 if peerlocal:
1601 1601 missing = set(peerlocal.requirements) - pullop.repo.supported
1602 1602 if missing:
1603 1603 msg = _(
1604 1604 b"required features are not"
1605 1605 b" supported in the destination:"
1606 1606 b" %s"
1607 1607 ) % (b', '.join(sorted(missing)))
1608 1608 raise error.Abort(msg)
1609 1609
1610 1610 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1611 1611 wlock = util.nullcontextmanager()
1612 1612 if not bookmod.bookmarksinstore(repo):
1613 1613 wlock = repo.wlock()
1614 1614 with wlock, repo.lock(), pullop.trmanager:
1615 1615 if confirm or (
1616 1616 repo.ui.configbool(b"pull", b"confirm") and not repo.ui.plain()
1617 1617 ):
1618 1618 add_confirm_callback(repo, pullop)
1619 1619
1620 1620 # Use the modern wire protocol, if available.
1621 1621 if remote.capable(b'command-changesetdata'):
1622 1622 exchangev2.pull(pullop)
1623 1623 else:
1624 1624 # This should ideally be in _pullbundle2(). However, it needs to run
1625 1625 # before discovery to avoid extra work.
1626 1626 _maybeapplyclonebundle(pullop)
1627 1627 streamclone.maybeperformlegacystreamclone(pullop)
1628 1628 _pulldiscovery(pullop)
1629 1629 if pullop.canusebundle2:
1630 1630 _fullpullbundle2(repo, pullop)
1631 1631 _pullchangeset(pullop)
1632 1632 _pullphase(pullop)
1633 1633 _pullbookmarks(pullop)
1634 1634 _pullobsolete(pullop)
1635 1635
1636 1636 # storing remotenames
1637 1637 if repo.ui.configbool(b'experimental', b'remotenames'):
1638 1638 logexchange.pullremotenames(repo, remote)
1639 1639
1640 1640 return pullop
1641 1641
1642 1642
1643 1643 # list of steps to perform discovery before pull
1644 1644 pulldiscoveryorder = []
1645 1645
1646 1646 # Mapping between step name and function
1647 1647 #
1648 1648 # This exists to help extensions wrap steps if necessary
1649 1649 pulldiscoverymapping = {}
1650 1650
1651 1651
1652 1652 def pulldiscovery(stepname):
1653 1653 """decorator for function performing discovery before pull
1654 1654
1655 1655 The function is added to the step -> function mapping and appended to the
1656 1656 list of steps. Beware that decorated function will be added in order (this
1657 1657 may matter).
1658 1658
1659 1659 You can only use this decorator for a new step, if you want to wrap a step
1660 1660 from an extension, change the pulldiscovery dictionary directly."""
1661 1661
1662 1662 def dec(func):
1663 1663 assert stepname not in pulldiscoverymapping
1664 1664 pulldiscoverymapping[stepname] = func
1665 1665 pulldiscoveryorder.append(stepname)
1666 1666 return func
1667 1667
1668 1668 return dec
1669 1669
1670 1670
1671 1671 def _pulldiscovery(pullop):
1672 1672 """Run all discovery steps"""
1673 1673 for stepname in pulldiscoveryorder:
1674 1674 step = pulldiscoverymapping[stepname]
1675 1675 step(pullop)
1676 1676
1677 1677
1678 1678 @pulldiscovery(b'b1:bookmarks')
1679 1679 def _pullbookmarkbundle1(pullop):
1680 1680 """fetch bookmark data in bundle1 case
1681 1681
1682 1682 If not using bundle2, we have to fetch bookmarks before changeset
1683 1683 discovery to reduce the chance and impact of race conditions."""
1684 1684 if pullop.remotebookmarks is not None:
1685 1685 return
1686 1686 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1687 1687 # all known bundle2 servers now support listkeys, but lets be nice with
1688 1688 # new implementation.
1689 1689 return
1690 1690 books = listkeys(pullop.remote, b'bookmarks')
1691 1691 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1692 1692
1693 1693
1694 1694 @pulldiscovery(b'changegroup')
1695 1695 def _pulldiscoverychangegroup(pullop):
1696 1696 """discovery phase for the pull
1697 1697
1698 1698 Current handle changeset discovery only, will change handle all discovery
1699 1699 at some point."""
1700 1700 tmp = discovery.findcommonincoming(
1701 1701 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1702 1702 )
1703 1703 common, fetch, rheads = tmp
1704 1704 has_node = pullop.repo.unfiltered().changelog.index.has_node
1705 1705 if fetch and rheads:
1706 1706 # If a remote heads is filtered locally, put in back in common.
1707 1707 #
1708 1708 # This is a hackish solution to catch most of "common but locally
1709 1709 # hidden situation". We do not performs discovery on unfiltered
1710 1710 # repository because it end up doing a pathological amount of round
1711 1711 # trip for w huge amount of changeset we do not care about.
1712 1712 #
1713 1713 # If a set of such "common but filtered" changeset exist on the server
1714 1714 # but are not including a remote heads, we'll not be able to detect it,
1715 1715 scommon = set(common)
1716 1716 for n in rheads:
1717 1717 if has_node(n):
1718 1718 if n not in scommon:
1719 1719 common.append(n)
1720 1720 if set(rheads).issubset(set(common)):
1721 1721 fetch = []
1722 1722 pullop.common = common
1723 1723 pullop.fetch = fetch
1724 1724 pullop.rheads = rheads
1725 1725
1726 1726
1727 1727 def _pullbundle2(pullop):
1728 1728 """pull data using bundle2
1729 1729
1730 1730 For now, the only supported data are changegroup."""
1731 1731 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1732 1732
1733 1733 # make ui easier to access
1734 1734 ui = pullop.repo.ui
1735 1735
1736 1736 # At the moment we don't do stream clones over bundle2. If that is
1737 1737 # implemented then here's where the check for that will go.
1738 1738 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1739 1739
1740 1740 # declare pull perimeters
1741 1741 kwargs[b'common'] = pullop.common
1742 1742 kwargs[b'heads'] = pullop.heads or pullop.rheads
1743 1743
1744 1744 # check server supports narrow and then adding includepats and excludepats
1745 1745 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1746 1746 if servernarrow and pullop.includepats:
1747 1747 kwargs[b'includepats'] = pullop.includepats
1748 1748 if servernarrow and pullop.excludepats:
1749 1749 kwargs[b'excludepats'] = pullop.excludepats
1750 1750
1751 1751 if streaming:
1752 1752 kwargs[b'cg'] = False
1753 1753 kwargs[b'stream'] = True
1754 1754 pullop.stepsdone.add(b'changegroup')
1755 1755 pullop.stepsdone.add(b'phases')
1756 1756
1757 1757 else:
1758 1758 # pulling changegroup
1759 1759 pullop.stepsdone.add(b'changegroup')
1760 1760
1761 1761 kwargs[b'cg'] = pullop.fetch
1762 1762
1763 1763 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1764 1764 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1765 1765 if not legacyphase and hasbinaryphase:
1766 1766 kwargs[b'phases'] = True
1767 1767 pullop.stepsdone.add(b'phases')
1768 1768
1769 1769 if b'listkeys' in pullop.remotebundle2caps:
1770 1770 if b'phases' not in pullop.stepsdone:
1771 1771 kwargs[b'listkeys'] = [b'phases']
1772 1772
1773 1773 bookmarksrequested = False
1774 1774 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1775 1775 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1776 1776
1777 1777 if pullop.remotebookmarks is not None:
1778 1778 pullop.stepsdone.add(b'request-bookmarks')
1779 1779
1780 1780 if (
1781 1781 b'request-bookmarks' not in pullop.stepsdone
1782 1782 and pullop.remotebookmarks is None
1783 1783 and not legacybookmark
1784 1784 and hasbinarybook
1785 1785 ):
1786 1786 kwargs[b'bookmarks'] = True
1787 1787 bookmarksrequested = True
1788 1788
1789 1789 if b'listkeys' in pullop.remotebundle2caps:
1790 1790 if b'request-bookmarks' not in pullop.stepsdone:
1791 1791 # make sure to always includes bookmark data when migrating
1792 1792 # `hg incoming --bundle` to using this function.
1793 1793 pullop.stepsdone.add(b'request-bookmarks')
1794 1794 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1795 1795
1796 1796 # If this is a full pull / clone and the server supports the clone bundles
1797 1797 # feature, tell the server whether we attempted a clone bundle. The
1798 1798 # presence of this flag indicates the client supports clone bundles. This
1799 1799 # will enable the server to treat clients that support clone bundles
1800 1800 # differently from those that don't.
1801 1801 if (
1802 1802 pullop.remote.capable(b'clonebundles')
1803 1803 and pullop.heads is None
1804 1804 and list(pullop.common) == [nullid]
1805 1805 ):
1806 1806 kwargs[b'cbattempted'] = pullop.clonebundleattempted
1807 1807
1808 1808 if streaming:
1809 1809 pullop.repo.ui.status(_(b'streaming all changes\n'))
1810 1810 elif not pullop.fetch:
1811 1811 pullop.repo.ui.status(_(b"no changes found\n"))
1812 1812 pullop.cgresult = 0
1813 1813 else:
1814 1814 if pullop.heads is None and list(pullop.common) == [nullid]:
1815 1815 pullop.repo.ui.status(_(b"requesting all changes\n"))
1816 1816 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1817 1817 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1818 1818 if obsolete.commonversion(remoteversions) is not None:
1819 1819 kwargs[b'obsmarkers'] = True
1820 1820 pullop.stepsdone.add(b'obsmarkers')
1821 1821 _pullbundle2extraprepare(pullop, kwargs)
1822 1822
1823 1823 with pullop.remote.commandexecutor() as e:
1824 1824 args = dict(kwargs)
1825 1825 args[b'source'] = b'pull'
1826 1826 bundle = e.callcommand(b'getbundle', args).result()
1827 1827
1828 1828 try:
1829 1829 op = bundle2.bundleoperation(
1830 1830 pullop.repo, pullop.gettransaction, source=b'pull'
1831 1831 )
1832 1832 op.modes[b'bookmarks'] = b'records'
1833 1833 bundle2.processbundle(pullop.repo, bundle, op=op)
1834 1834 except bundle2.AbortFromPart as exc:
1835 pullop.repo.ui.status(_(b'remote: abort: %s\n') % exc)
1835 pullop.repo.ui.error(_(b'remote: abort: %s\n') % exc)
1836 1836 raise error.Abort(_(b'pull failed on remote'), hint=exc.hint)
1837 1837 except error.BundleValueError as exc:
1838 1838 raise error.Abort(_(b'missing support for %s') % exc)
1839 1839
1840 1840 if pullop.fetch:
1841 1841 pullop.cgresult = bundle2.combinechangegroupresults(op)
1842 1842
1843 1843 # processing phases change
1844 1844 for namespace, value in op.records[b'listkeys']:
1845 1845 if namespace == b'phases':
1846 1846 _pullapplyphases(pullop, value)
1847 1847
1848 1848 # processing bookmark update
1849 1849 if bookmarksrequested:
1850 1850 books = {}
1851 1851 for record in op.records[b'bookmarks']:
1852 1852 books[record[b'bookmark']] = record[b"node"]
1853 1853 pullop.remotebookmarks = books
1854 1854 else:
1855 1855 for namespace, value in op.records[b'listkeys']:
1856 1856 if namespace == b'bookmarks':
1857 1857 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1858 1858
1859 1859 # bookmark data were either already there or pulled in the bundle
1860 1860 if pullop.remotebookmarks is not None:
1861 1861 _pullbookmarks(pullop)
1862 1862
1863 1863
1864 1864 def _pullbundle2extraprepare(pullop, kwargs):
1865 1865 """hook function so that extensions can extend the getbundle call"""
1866 1866
1867 1867
1868 1868 def _pullchangeset(pullop):
1869 1869 """pull changeset from unbundle into the local repo"""
1870 1870 # We delay the open of the transaction as late as possible so we
1871 1871 # don't open transaction for nothing or you break future useful
1872 1872 # rollback call
1873 1873 if b'changegroup' in pullop.stepsdone:
1874 1874 return
1875 1875 pullop.stepsdone.add(b'changegroup')
1876 1876 if not pullop.fetch:
1877 1877 pullop.repo.ui.status(_(b"no changes found\n"))
1878 1878 pullop.cgresult = 0
1879 1879 return
1880 1880 tr = pullop.gettransaction()
1881 1881 if pullop.heads is None and list(pullop.common) == [nullid]:
1882 1882 pullop.repo.ui.status(_(b"requesting all changes\n"))
1883 1883 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
1884 1884 # issue1320, avoid a race if remote changed after discovery
1885 1885 pullop.heads = pullop.rheads
1886 1886
1887 1887 if pullop.remote.capable(b'getbundle'):
1888 1888 # TODO: get bundlecaps from remote
1889 1889 cg = pullop.remote.getbundle(
1890 1890 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
1891 1891 )
1892 1892 elif pullop.heads is None:
1893 1893 with pullop.remote.commandexecutor() as e:
1894 1894 cg = e.callcommand(
1895 1895 b'changegroup',
1896 1896 {
1897 1897 b'nodes': pullop.fetch,
1898 1898 b'source': b'pull',
1899 1899 },
1900 1900 ).result()
1901 1901
1902 1902 elif not pullop.remote.capable(b'changegroupsubset'):
1903 1903 raise error.Abort(
1904 1904 _(
1905 1905 b"partial pull cannot be done because "
1906 1906 b"other repository doesn't support "
1907 1907 b"changegroupsubset."
1908 1908 )
1909 1909 )
1910 1910 else:
1911 1911 with pullop.remote.commandexecutor() as e:
1912 1912 cg = e.callcommand(
1913 1913 b'changegroupsubset',
1914 1914 {
1915 1915 b'bases': pullop.fetch,
1916 1916 b'heads': pullop.heads,
1917 1917 b'source': b'pull',
1918 1918 },
1919 1919 ).result()
1920 1920
1921 1921 bundleop = bundle2.applybundle(
1922 1922 pullop.repo, cg, tr, b'pull', pullop.remote.url()
1923 1923 )
1924 1924 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1925 1925
1926 1926
1927 1927 def _pullphase(pullop):
1928 1928 # Get remote phases data from remote
1929 1929 if b'phases' in pullop.stepsdone:
1930 1930 return
1931 1931 remotephases = listkeys(pullop.remote, b'phases')
1932 1932 _pullapplyphases(pullop, remotephases)
1933 1933
1934 1934
1935 1935 def _pullapplyphases(pullop, remotephases):
1936 1936 """apply phase movement from observed remote state"""
1937 1937 if b'phases' in pullop.stepsdone:
1938 1938 return
1939 1939 pullop.stepsdone.add(b'phases')
1940 1940 publishing = bool(remotephases.get(b'publishing', False))
1941 1941 if remotephases and not publishing:
1942 1942 # remote is new and non-publishing
1943 1943 pheads, _dr = phases.analyzeremotephases(
1944 1944 pullop.repo, pullop.pulledsubset, remotephases
1945 1945 )
1946 1946 dheads = pullop.pulledsubset
1947 1947 else:
1948 1948 # Remote is old or publishing all common changesets
1949 1949 # should be seen as public
1950 1950 pheads = pullop.pulledsubset
1951 1951 dheads = []
1952 1952 unfi = pullop.repo.unfiltered()
1953 1953 phase = unfi._phasecache.phase
1954 1954 rev = unfi.changelog.index.get_rev
1955 1955 public = phases.public
1956 1956 draft = phases.draft
1957 1957
1958 1958 # exclude changesets already public locally and update the others
1959 1959 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1960 1960 if pheads:
1961 1961 tr = pullop.gettransaction()
1962 1962 phases.advanceboundary(pullop.repo, tr, public, pheads)
1963 1963
1964 1964 # exclude changesets already draft locally and update the others
1965 1965 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1966 1966 if dheads:
1967 1967 tr = pullop.gettransaction()
1968 1968 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1969 1969
1970 1970
1971 1971 def _pullbookmarks(pullop):
1972 1972 """process the remote bookmark information to update the local one"""
1973 1973 if b'bookmarks' in pullop.stepsdone:
1974 1974 return
1975 1975 pullop.stepsdone.add(b'bookmarks')
1976 1976 repo = pullop.repo
1977 1977 remotebookmarks = pullop.remotebookmarks
1978 1978 bookmod.updatefromremote(
1979 1979 repo.ui,
1980 1980 repo,
1981 1981 remotebookmarks,
1982 1982 pullop.remote.url(),
1983 1983 pullop.gettransaction,
1984 1984 explicit=pullop.explicitbookmarks,
1985 1985 )
1986 1986
1987 1987
1988 1988 def _pullobsolete(pullop):
1989 1989 """utility function to pull obsolete markers from a remote
1990 1990
1991 1991 The `gettransaction` is function that return the pull transaction, creating
1992 1992 one if necessary. We return the transaction to inform the calling code that
1993 1993 a new transaction have been created (when applicable).
1994 1994
1995 1995 Exists mostly to allow overriding for experimentation purpose"""
1996 1996 if b'obsmarkers' in pullop.stepsdone:
1997 1997 return
1998 1998 pullop.stepsdone.add(b'obsmarkers')
1999 1999 tr = None
2000 2000 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2001 2001 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2002 2002 remoteobs = listkeys(pullop.remote, b'obsolete')
2003 2003 if b'dump0' in remoteobs:
2004 2004 tr = pullop.gettransaction()
2005 2005 markers = []
2006 2006 for key in sorted(remoteobs, reverse=True):
2007 2007 if key.startswith(b'dump'):
2008 2008 data = util.b85decode(remoteobs[key])
2009 2009 version, newmarks = obsolete._readmarkers(data)
2010 2010 markers += newmarks
2011 2011 if markers:
2012 2012 pullop.repo.obsstore.add(tr, markers)
2013 2013 pullop.repo.invalidatevolatilesets()
2014 2014 return tr
2015 2015
2016 2016
2017 2017 def applynarrowacl(repo, kwargs):
2018 2018 """Apply narrow fetch access control.
2019 2019
2020 2020 This massages the named arguments for getbundle wire protocol commands
2021 2021 so requested data is filtered through access control rules.
2022 2022 """
2023 2023 ui = repo.ui
2024 2024 # TODO this assumes existence of HTTP and is a layering violation.
2025 2025 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2026 2026 user_includes = ui.configlist(
2027 2027 _NARROWACL_SECTION,
2028 2028 username + b'.includes',
2029 2029 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2030 2030 )
2031 2031 user_excludes = ui.configlist(
2032 2032 _NARROWACL_SECTION,
2033 2033 username + b'.excludes',
2034 2034 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2035 2035 )
2036 2036 if not user_includes:
2037 2037 raise error.Abort(
2038 2038 _(b"%s configuration for user %s is empty")
2039 2039 % (_NARROWACL_SECTION, username)
2040 2040 )
2041 2041
2042 2042 user_includes = [
2043 2043 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2044 2044 ]
2045 2045 user_excludes = [
2046 2046 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2047 2047 ]
2048 2048
2049 2049 req_includes = set(kwargs.get('includepats', []))
2050 2050 req_excludes = set(kwargs.get('excludepats', []))
2051 2051
2052 2052 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2053 2053 req_includes, req_excludes, user_includes, user_excludes
2054 2054 )
2055 2055
2056 2056 if invalid_includes:
2057 2057 raise error.Abort(
2058 2058 _(b"The following includes are not accessible for %s: %s")
2059 2059 % (username, stringutil.pprint(invalid_includes))
2060 2060 )
2061 2061
2062 2062 new_args = {}
2063 2063 new_args.update(kwargs)
2064 2064 new_args['narrow'] = True
2065 2065 new_args['narrow_acl'] = True
2066 2066 new_args['includepats'] = req_includes
2067 2067 if req_excludes:
2068 2068 new_args['excludepats'] = req_excludes
2069 2069
2070 2070 return new_args
2071 2071
2072 2072
2073 2073 def _computeellipsis(repo, common, heads, known, match, depth=None):
2074 2074 """Compute the shape of a narrowed DAG.
2075 2075
2076 2076 Args:
2077 2077 repo: The repository we're transferring.
2078 2078 common: The roots of the DAG range we're transferring.
2079 2079 May be just [nullid], which means all ancestors of heads.
2080 2080 heads: The heads of the DAG range we're transferring.
2081 2081 match: The narrowmatcher that allows us to identify relevant changes.
2082 2082 depth: If not None, only consider nodes to be full nodes if they are at
2083 2083 most depth changesets away from one of heads.
2084 2084
2085 2085 Returns:
2086 2086 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2087 2087
2088 2088 visitnodes: The list of nodes (either full or ellipsis) which
2089 2089 need to be sent to the client.
2090 2090 relevant_nodes: The set of changelog nodes which change a file inside
2091 2091 the narrowspec. The client needs these as non-ellipsis nodes.
2092 2092 ellipsisroots: A dict of {rev: parents} that is used in
2093 2093 narrowchangegroup to produce ellipsis nodes with the
2094 2094 correct parents.
2095 2095 """
2096 2096 cl = repo.changelog
2097 2097 mfl = repo.manifestlog
2098 2098
2099 2099 clrev = cl.rev
2100 2100
2101 2101 commonrevs = {clrev(n) for n in common} | {nullrev}
2102 2102 headsrevs = {clrev(n) for n in heads}
2103 2103
2104 2104 if depth:
2105 2105 revdepth = {h: 0 for h in headsrevs}
2106 2106
2107 2107 ellipsisheads = collections.defaultdict(set)
2108 2108 ellipsisroots = collections.defaultdict(set)
2109 2109
2110 2110 def addroot(head, curchange):
2111 2111 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2112 2112 ellipsisroots[head].add(curchange)
2113 2113 # Recursively split ellipsis heads with 3 roots by finding the
2114 2114 # roots' youngest common descendant which is an elided merge commit.
2115 2115 # That descendant takes 2 of the 3 roots as its own, and becomes a
2116 2116 # root of the head.
2117 2117 while len(ellipsisroots[head]) > 2:
2118 2118 child, roots = splithead(head)
2119 2119 splitroots(head, child, roots)
2120 2120 head = child # Recurse in case we just added a 3rd root
2121 2121
2122 2122 def splitroots(head, child, roots):
2123 2123 ellipsisroots[head].difference_update(roots)
2124 2124 ellipsisroots[head].add(child)
2125 2125 ellipsisroots[child].update(roots)
2126 2126 ellipsisroots[child].discard(child)
2127 2127
2128 2128 def splithead(head):
2129 2129 r1, r2, r3 = sorted(ellipsisroots[head])
2130 2130 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2131 2131 mid = repo.revs(
2132 2132 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2133 2133 )
2134 2134 for j in mid:
2135 2135 if j == nr2:
2136 2136 return nr2, (nr1, nr2)
2137 2137 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2138 2138 return j, (nr1, nr2)
2139 2139 raise error.Abort(
2140 2140 _(
2141 2141 b'Failed to split up ellipsis node! head: %d, '
2142 2142 b'roots: %d %d %d'
2143 2143 )
2144 2144 % (head, r1, r2, r3)
2145 2145 )
2146 2146
2147 2147 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2148 2148 visit = reversed(missing)
2149 2149 relevant_nodes = set()
2150 2150 visitnodes = [cl.node(m) for m in missing]
2151 2151 required = set(headsrevs) | known
2152 2152 for rev in visit:
2153 2153 clrev = cl.changelogrevision(rev)
2154 2154 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2155 2155 if depth is not None:
2156 2156 curdepth = revdepth[rev]
2157 2157 for p in ps:
2158 2158 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2159 2159 needed = False
2160 2160 shallow_enough = depth is None or revdepth[rev] <= depth
2161 2161 if shallow_enough:
2162 2162 curmf = mfl[clrev.manifest].read()
2163 2163 if ps:
2164 2164 # We choose to not trust the changed files list in
2165 2165 # changesets because it's not always correct. TODO: could
2166 2166 # we trust it for the non-merge case?
2167 2167 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2168 2168 needed = bool(curmf.diff(p1mf, match))
2169 2169 if not needed and len(ps) > 1:
2170 2170 # For merge changes, the list of changed files is not
2171 2171 # helpful, since we need to emit the merge if a file
2172 2172 # in the narrow spec has changed on either side of the
2173 2173 # merge. As a result, we do a manifest diff to check.
2174 2174 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2175 2175 needed = bool(curmf.diff(p2mf, match))
2176 2176 else:
2177 2177 # For a root node, we need to include the node if any
2178 2178 # files in the node match the narrowspec.
2179 2179 needed = any(curmf.walk(match))
2180 2180
2181 2181 if needed:
2182 2182 for head in ellipsisheads[rev]:
2183 2183 addroot(head, rev)
2184 2184 for p in ps:
2185 2185 required.add(p)
2186 2186 relevant_nodes.add(cl.node(rev))
2187 2187 else:
2188 2188 if not ps:
2189 2189 ps = [nullrev]
2190 2190 if rev in required:
2191 2191 for head in ellipsisheads[rev]:
2192 2192 addroot(head, rev)
2193 2193 for p in ps:
2194 2194 ellipsisheads[p].add(rev)
2195 2195 else:
2196 2196 for p in ps:
2197 2197 ellipsisheads[p] |= ellipsisheads[rev]
2198 2198
2199 2199 # add common changesets as roots of their reachable ellipsis heads
2200 2200 for c in commonrevs:
2201 2201 for head in ellipsisheads[c]:
2202 2202 addroot(head, c)
2203 2203 return visitnodes, relevant_nodes, ellipsisroots
2204 2204
2205 2205
2206 2206 def caps20to10(repo, role):
2207 2207 """return a set with appropriate options to use bundle20 during getbundle"""
2208 2208 caps = {b'HG20'}
2209 2209 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2210 2210 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2211 2211 return caps
2212 2212
2213 2213
2214 2214 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2215 2215 getbundle2partsorder = []
2216 2216
2217 2217 # Mapping between step name and function
2218 2218 #
2219 2219 # This exists to help extensions wrap steps if necessary
2220 2220 getbundle2partsmapping = {}
2221 2221
2222 2222
2223 2223 def getbundle2partsgenerator(stepname, idx=None):
2224 2224 """decorator for function generating bundle2 part for getbundle
2225 2225
2226 2226 The function is added to the step -> function mapping and appended to the
2227 2227 list of steps. Beware that decorated functions will be added in order
2228 2228 (this may matter).
2229 2229
2230 2230 You can only use this decorator for new steps, if you want to wrap a step
2231 2231 from an extension, attack the getbundle2partsmapping dictionary directly."""
2232 2232
2233 2233 def dec(func):
2234 2234 assert stepname not in getbundle2partsmapping
2235 2235 getbundle2partsmapping[stepname] = func
2236 2236 if idx is None:
2237 2237 getbundle2partsorder.append(stepname)
2238 2238 else:
2239 2239 getbundle2partsorder.insert(idx, stepname)
2240 2240 return func
2241 2241
2242 2242 return dec
2243 2243
2244 2244
2245 2245 def bundle2requested(bundlecaps):
2246 2246 if bundlecaps is not None:
2247 2247 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2248 2248 return False
2249 2249
2250 2250
2251 2251 def getbundlechunks(
2252 2252 repo, source, heads=None, common=None, bundlecaps=None, **kwargs
2253 2253 ):
2254 2254 """Return chunks constituting a bundle's raw data.
2255 2255
2256 2256 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2257 2257 passed.
2258 2258
2259 2259 Returns a 2-tuple of a dict with metadata about the generated bundle
2260 2260 and an iterator over raw chunks (of varying sizes).
2261 2261 """
2262 2262 kwargs = pycompat.byteskwargs(kwargs)
2263 2263 info = {}
2264 2264 usebundle2 = bundle2requested(bundlecaps)
2265 2265 # bundle10 case
2266 2266 if not usebundle2:
2267 2267 if bundlecaps and not kwargs.get(b'cg', True):
2268 2268 raise ValueError(
2269 2269 _(b'request for bundle10 must include changegroup')
2270 2270 )
2271 2271
2272 2272 if kwargs:
2273 2273 raise ValueError(
2274 2274 _(b'unsupported getbundle arguments: %s')
2275 2275 % b', '.join(sorted(kwargs.keys()))
2276 2276 )
2277 2277 outgoing = _computeoutgoing(repo, heads, common)
2278 2278 info[b'bundleversion'] = 1
2279 2279 return (
2280 2280 info,
2281 2281 changegroup.makestream(
2282 2282 repo, outgoing, b'01', source, bundlecaps=bundlecaps
2283 2283 ),
2284 2284 )
2285 2285
2286 2286 # bundle20 case
2287 2287 info[b'bundleversion'] = 2
2288 2288 b2caps = {}
2289 2289 for bcaps in bundlecaps:
2290 2290 if bcaps.startswith(b'bundle2='):
2291 2291 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2292 2292 b2caps.update(bundle2.decodecaps(blob))
2293 2293 bundler = bundle2.bundle20(repo.ui, b2caps)
2294 2294
2295 2295 kwargs[b'heads'] = heads
2296 2296 kwargs[b'common'] = common
2297 2297
2298 2298 for name in getbundle2partsorder:
2299 2299 func = getbundle2partsmapping[name]
2300 2300 func(
2301 2301 bundler,
2302 2302 repo,
2303 2303 source,
2304 2304 bundlecaps=bundlecaps,
2305 2305 b2caps=b2caps,
2306 2306 **pycompat.strkwargs(kwargs)
2307 2307 )
2308 2308
2309 2309 info[b'prefercompressed'] = bundler.prefercompressed
2310 2310
2311 2311 return info, bundler.getchunks()
2312 2312
2313 2313
2314 2314 @getbundle2partsgenerator(b'stream2')
2315 2315 def _getbundlestream2(bundler, repo, *args, **kwargs):
2316 2316 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2317 2317
2318 2318
2319 2319 @getbundle2partsgenerator(b'changegroup')
2320 2320 def _getbundlechangegrouppart(
2321 2321 bundler,
2322 2322 repo,
2323 2323 source,
2324 2324 bundlecaps=None,
2325 2325 b2caps=None,
2326 2326 heads=None,
2327 2327 common=None,
2328 2328 **kwargs
2329 2329 ):
2330 2330 """add a changegroup part to the requested bundle"""
2331 2331 if not kwargs.get('cg', True) or not b2caps:
2332 2332 return
2333 2333
2334 2334 version = b'01'
2335 2335 cgversions = b2caps.get(b'changegroup')
2336 2336 if cgversions: # 3.1 and 3.2 ship with an empty value
2337 2337 cgversions = [
2338 2338 v
2339 2339 for v in cgversions
2340 2340 if v in changegroup.supportedoutgoingversions(repo)
2341 2341 ]
2342 2342 if not cgversions:
2343 2343 raise error.Abort(_(b'no common changegroup version'))
2344 2344 version = max(cgversions)
2345 2345
2346 2346 outgoing = _computeoutgoing(repo, heads, common)
2347 2347 if not outgoing.missing:
2348 2348 return
2349 2349
2350 2350 if kwargs.get('narrow', False):
2351 2351 include = sorted(filter(bool, kwargs.get('includepats', [])))
2352 2352 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2353 2353 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2354 2354 else:
2355 2355 matcher = None
2356 2356
2357 2357 cgstream = changegroup.makestream(
2358 2358 repo, outgoing, version, source, bundlecaps=bundlecaps, matcher=matcher
2359 2359 )
2360 2360
2361 2361 part = bundler.newpart(b'changegroup', data=cgstream)
2362 2362 if cgversions:
2363 2363 part.addparam(b'version', version)
2364 2364
2365 2365 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2366 2366
2367 2367 if scmutil.istreemanifest(repo):
2368 2368 part.addparam(b'treemanifest', b'1')
2369 2369
2370 2370 if b'exp-sidedata-flag' in repo.requirements:
2371 2371 part.addparam(b'exp-sidedata', b'1')
2372 2372
2373 2373 if (
2374 2374 kwargs.get('narrow', False)
2375 2375 and kwargs.get('narrow_acl', False)
2376 2376 and (include or exclude)
2377 2377 ):
2378 2378 # this is mandatory because otherwise ACL clients won't work
2379 2379 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2380 2380 narrowspecpart.data = b'%s\0%s' % (
2381 2381 b'\n'.join(include),
2382 2382 b'\n'.join(exclude),
2383 2383 )
2384 2384
2385 2385
2386 2386 @getbundle2partsgenerator(b'bookmarks')
2387 2387 def _getbundlebookmarkpart(
2388 2388 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2389 2389 ):
2390 2390 """add a bookmark part to the requested bundle"""
2391 2391 if not kwargs.get('bookmarks', False):
2392 2392 return
2393 2393 if not b2caps or b'bookmarks' not in b2caps:
2394 2394 raise error.Abort(_(b'no common bookmarks exchange method'))
2395 2395 books = bookmod.listbinbookmarks(repo)
2396 2396 data = bookmod.binaryencode(books)
2397 2397 if data:
2398 2398 bundler.newpart(b'bookmarks', data=data)
2399 2399
2400 2400
2401 2401 @getbundle2partsgenerator(b'listkeys')
2402 2402 def _getbundlelistkeysparts(
2403 2403 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2404 2404 ):
2405 2405 """add parts containing listkeys namespaces to the requested bundle"""
2406 2406 listkeys = kwargs.get('listkeys', ())
2407 2407 for namespace in listkeys:
2408 2408 part = bundler.newpart(b'listkeys')
2409 2409 part.addparam(b'namespace', namespace)
2410 2410 keys = repo.listkeys(namespace).items()
2411 2411 part.data = pushkey.encodekeys(keys)
2412 2412
2413 2413
2414 2414 @getbundle2partsgenerator(b'obsmarkers')
2415 2415 def _getbundleobsmarkerpart(
2416 2416 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2417 2417 ):
2418 2418 """add an obsolescence markers part to the requested bundle"""
2419 2419 if kwargs.get('obsmarkers', False):
2420 2420 if heads is None:
2421 2421 heads = repo.heads()
2422 2422 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2423 2423 markers = repo.obsstore.relevantmarkers(subset)
2424 2424 markers = obsutil.sortedmarkers(markers)
2425 2425 bundle2.buildobsmarkerspart(bundler, markers)
2426 2426
2427 2427
2428 2428 @getbundle2partsgenerator(b'phases')
2429 2429 def _getbundlephasespart(
2430 2430 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2431 2431 ):
2432 2432 """add phase heads part to the requested bundle"""
2433 2433 if kwargs.get('phases', False):
2434 2434 if not b2caps or b'heads' not in b2caps.get(b'phases'):
2435 2435 raise error.Abort(_(b'no common phases exchange method'))
2436 2436 if heads is None:
2437 2437 heads = repo.heads()
2438 2438
2439 2439 headsbyphase = collections.defaultdict(set)
2440 2440 if repo.publishing():
2441 2441 headsbyphase[phases.public] = heads
2442 2442 else:
2443 2443 # find the appropriate heads to move
2444 2444
2445 2445 phase = repo._phasecache.phase
2446 2446 node = repo.changelog.node
2447 2447 rev = repo.changelog.rev
2448 2448 for h in heads:
2449 2449 headsbyphase[phase(repo, rev(h))].add(h)
2450 2450 seenphases = list(headsbyphase.keys())
2451 2451
2452 2452 # We do not handle anything but public and draft phase for now)
2453 2453 if seenphases:
2454 2454 assert max(seenphases) <= phases.draft
2455 2455
2456 2456 # if client is pulling non-public changesets, we need to find
2457 2457 # intermediate public heads.
2458 2458 draftheads = headsbyphase.get(phases.draft, set())
2459 2459 if draftheads:
2460 2460 publicheads = headsbyphase.get(phases.public, set())
2461 2461
2462 2462 revset = b'heads(only(%ln, %ln) and public())'
2463 2463 extraheads = repo.revs(revset, draftheads, publicheads)
2464 2464 for r in extraheads:
2465 2465 headsbyphase[phases.public].add(node(r))
2466 2466
2467 2467 # transform data in a format used by the encoding function
2468 2468 phasemapping = {
2469 2469 phase: sorted(headsbyphase[phase]) for phase in phases.allphases
2470 2470 }
2471 2471
2472 2472 # generate the actual part
2473 2473 phasedata = phases.binaryencode(phasemapping)
2474 2474 bundler.newpart(b'phase-heads', data=phasedata)
2475 2475
2476 2476
2477 2477 @getbundle2partsgenerator(b'hgtagsfnodes')
2478 2478 def _getbundletagsfnodes(
2479 2479 bundler,
2480 2480 repo,
2481 2481 source,
2482 2482 bundlecaps=None,
2483 2483 b2caps=None,
2484 2484 heads=None,
2485 2485 common=None,
2486 2486 **kwargs
2487 2487 ):
2488 2488 """Transfer the .hgtags filenodes mapping.
2489 2489
2490 2490 Only values for heads in this bundle will be transferred.
2491 2491
2492 2492 The part data consists of pairs of 20 byte changeset node and .hgtags
2493 2493 filenodes raw values.
2494 2494 """
2495 2495 # Don't send unless:
2496 2496 # - changeset are being exchanged,
2497 2497 # - the client supports it.
2498 2498 if not b2caps or not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2499 2499 return
2500 2500
2501 2501 outgoing = _computeoutgoing(repo, heads, common)
2502 2502 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2503 2503
2504 2504
2505 2505 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2506 2506 def _getbundlerevbranchcache(
2507 2507 bundler,
2508 2508 repo,
2509 2509 source,
2510 2510 bundlecaps=None,
2511 2511 b2caps=None,
2512 2512 heads=None,
2513 2513 common=None,
2514 2514 **kwargs
2515 2515 ):
2516 2516 """Transfer the rev-branch-cache mapping
2517 2517
2518 2518 The payload is a series of data related to each branch
2519 2519
2520 2520 1) branch name length
2521 2521 2) number of open heads
2522 2522 3) number of closed heads
2523 2523 4) open heads nodes
2524 2524 5) closed heads nodes
2525 2525 """
2526 2526 # Don't send unless:
2527 2527 # - changeset are being exchanged,
2528 2528 # - the client supports it.
2529 2529 # - narrow bundle isn't in play (not currently compatible).
2530 2530 if (
2531 2531 not kwargs.get('cg', True)
2532 2532 or not b2caps
2533 2533 or b'rev-branch-cache' not in b2caps
2534 2534 or kwargs.get('narrow', False)
2535 2535 or repo.ui.has_section(_NARROWACL_SECTION)
2536 2536 ):
2537 2537 return
2538 2538
2539 2539 outgoing = _computeoutgoing(repo, heads, common)
2540 2540 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2541 2541
2542 2542
2543 2543 def check_heads(repo, their_heads, context):
2544 2544 """check if the heads of a repo have been modified
2545 2545
2546 2546 Used by peer for unbundling.
2547 2547 """
2548 2548 heads = repo.heads()
2549 2549 heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest()
2550 2550 if not (
2551 2551 their_heads == [b'force']
2552 2552 or their_heads == heads
2553 2553 or their_heads == [b'hashed', heads_hash]
2554 2554 ):
2555 2555 # someone else committed/pushed/unbundled while we
2556 2556 # were transferring data
2557 2557 raise error.PushRaced(
2558 2558 b'repository changed while %s - please try again' % context
2559 2559 )
2560 2560
2561 2561
2562 2562 def unbundle(repo, cg, heads, source, url):
2563 2563 """Apply a bundle to a repo.
2564 2564
2565 2565 this function makes sure the repo is locked during the application and have
2566 2566 mechanism to check that no push race occurred between the creation of the
2567 2567 bundle and its application.
2568 2568
2569 2569 If the push was raced as PushRaced exception is raised."""
2570 2570 r = 0
2571 2571 # need a transaction when processing a bundle2 stream
2572 2572 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2573 2573 lockandtr = [None, None, None]
2574 2574 recordout = None
2575 2575 # quick fix for output mismatch with bundle2 in 3.4
2576 2576 captureoutput = repo.ui.configbool(
2577 2577 b'experimental', b'bundle2-output-capture'
2578 2578 )
2579 2579 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2580 2580 captureoutput = True
2581 2581 try:
2582 2582 # note: outside bundle1, 'heads' is expected to be empty and this
2583 2583 # 'check_heads' call wil be a no-op
2584 2584 check_heads(repo, heads, b'uploading changes')
2585 2585 # push can proceed
2586 2586 if not isinstance(cg, bundle2.unbundle20):
2587 2587 # legacy case: bundle1 (changegroup 01)
2588 2588 txnname = b"\n".join([source, util.hidepassword(url)])
2589 2589 with repo.lock(), repo.transaction(txnname) as tr:
2590 2590 op = bundle2.applybundle(repo, cg, tr, source, url)
2591 2591 r = bundle2.combinechangegroupresults(op)
2592 2592 else:
2593 2593 r = None
2594 2594 try:
2595 2595
2596 2596 def gettransaction():
2597 2597 if not lockandtr[2]:
2598 2598 if not bookmod.bookmarksinstore(repo):
2599 2599 lockandtr[0] = repo.wlock()
2600 2600 lockandtr[1] = repo.lock()
2601 2601 lockandtr[2] = repo.transaction(source)
2602 2602 lockandtr[2].hookargs[b'source'] = source
2603 2603 lockandtr[2].hookargs[b'url'] = url
2604 2604 lockandtr[2].hookargs[b'bundle2'] = b'1'
2605 2605 return lockandtr[2]
2606 2606
2607 2607 # Do greedy locking by default until we're satisfied with lazy
2608 2608 # locking.
2609 2609 if not repo.ui.configbool(
2610 2610 b'experimental', b'bundle2lazylocking'
2611 2611 ):
2612 2612 gettransaction()
2613 2613
2614 2614 op = bundle2.bundleoperation(
2615 2615 repo,
2616 2616 gettransaction,
2617 2617 captureoutput=captureoutput,
2618 2618 source=b'push',
2619 2619 )
2620 2620 try:
2621 2621 op = bundle2.processbundle(repo, cg, op=op)
2622 2622 finally:
2623 2623 r = op.reply
2624 2624 if captureoutput and r is not None:
2625 2625 repo.ui.pushbuffer(error=True, subproc=True)
2626 2626
2627 2627 def recordout(output):
2628 2628 r.newpart(b'output', data=output, mandatory=False)
2629 2629
2630 2630 if lockandtr[2] is not None:
2631 2631 lockandtr[2].close()
2632 2632 except BaseException as exc:
2633 2633 exc.duringunbundle2 = True
2634 2634 if captureoutput and r is not None:
2635 2635 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2636 2636
2637 2637 def recordout(output):
2638 2638 part = bundle2.bundlepart(
2639 2639 b'output', data=output, mandatory=False
2640 2640 )
2641 2641 parts.append(part)
2642 2642
2643 2643 raise
2644 2644 finally:
2645 2645 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2646 2646 if recordout is not None:
2647 2647 recordout(repo.ui.popbuffer())
2648 2648 return r
2649 2649
2650 2650
2651 2651 def _maybeapplyclonebundle(pullop):
2652 2652 """Apply a clone bundle from a remote, if possible."""
2653 2653
2654 2654 repo = pullop.repo
2655 2655 remote = pullop.remote
2656 2656
2657 2657 if not repo.ui.configbool(b'ui', b'clonebundles'):
2658 2658 return
2659 2659
2660 2660 # Only run if local repo is empty.
2661 2661 if len(repo):
2662 2662 return
2663 2663
2664 2664 if pullop.heads:
2665 2665 return
2666 2666
2667 2667 if not remote.capable(b'clonebundles'):
2668 2668 return
2669 2669
2670 2670 with remote.commandexecutor() as e:
2671 2671 res = e.callcommand(b'clonebundles', {}).result()
2672 2672
2673 2673 # If we call the wire protocol command, that's good enough to record the
2674 2674 # attempt.
2675 2675 pullop.clonebundleattempted = True
2676 2676
2677 2677 entries = bundlecaches.parseclonebundlesmanifest(repo, res)
2678 2678 if not entries:
2679 2679 repo.ui.note(
2680 2680 _(
2681 2681 b'no clone bundles available on remote; '
2682 2682 b'falling back to regular clone\n'
2683 2683 )
2684 2684 )
2685 2685 return
2686 2686
2687 2687 entries = bundlecaches.filterclonebundleentries(
2688 2688 repo, entries, streamclonerequested=pullop.streamclonerequested
2689 2689 )
2690 2690
2691 2691 if not entries:
2692 2692 # There is a thundering herd concern here. However, if a server
2693 2693 # operator doesn't advertise bundles appropriate for its clients,
2694 2694 # they deserve what's coming. Furthermore, from a client's
2695 2695 # perspective, no automatic fallback would mean not being able to
2696 2696 # clone!
2697 2697 repo.ui.warn(
2698 2698 _(
2699 2699 b'no compatible clone bundles available on server; '
2700 2700 b'falling back to regular clone\n'
2701 2701 )
2702 2702 )
2703 2703 repo.ui.warn(
2704 2704 _(b'(you may want to report this to the server operator)\n')
2705 2705 )
2706 2706 return
2707 2707
2708 2708 entries = bundlecaches.sortclonebundleentries(repo.ui, entries)
2709 2709
2710 2710 url = entries[0][b'URL']
2711 2711 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2712 2712 if trypullbundlefromurl(repo.ui, repo, url):
2713 2713 repo.ui.status(_(b'finished applying clone bundle\n'))
2714 2714 # Bundle failed.
2715 2715 #
2716 2716 # We abort by default to avoid the thundering herd of
2717 2717 # clients flooding a server that was expecting expensive
2718 2718 # clone load to be offloaded.
2719 2719 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2720 2720 repo.ui.warn(_(b'falling back to normal clone\n'))
2721 2721 else:
2722 2722 raise error.Abort(
2723 2723 _(b'error applying bundle'),
2724 2724 hint=_(
2725 2725 b'if this error persists, consider contacting '
2726 2726 b'the server operator or disable clone '
2727 2727 b'bundles via '
2728 2728 b'"--config ui.clonebundles=false"'
2729 2729 ),
2730 2730 )
2731 2731
2732 2732
2733 2733 def trypullbundlefromurl(ui, repo, url):
2734 2734 """Attempt to apply a bundle from a URL."""
2735 2735 with repo.lock(), repo.transaction(b'bundleurl') as tr:
2736 2736 try:
2737 2737 fh = urlmod.open(ui, url)
2738 2738 cg = readbundle(ui, fh, b'stream')
2739 2739
2740 2740 if isinstance(cg, streamclone.streamcloneapplier):
2741 2741 cg.apply(repo)
2742 2742 else:
2743 2743 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
2744 2744 return True
2745 2745 except urlerr.httperror as e:
2746 2746 ui.warn(
2747 2747 _(b'HTTP error fetching bundle: %s\n')
2748 2748 % stringutil.forcebytestr(e)
2749 2749 )
2750 2750 except urlerr.urlerror as e:
2751 2751 ui.warn(
2752 2752 _(b'error fetching bundle: %s\n')
2753 2753 % stringutil.forcebytestr(e.reason)
2754 2754 )
2755 2755
2756 2756 return False
@@ -1,153 +1,154 b''
1 1 $ . "$TESTDIR/narrow-library.sh"
2 2
3 3 $ hg init master
4 4 $ cd master
5 5 $ cat >> .hg/hgrc <<EOF
6 6 > [narrow]
7 7 > serveellipses=True
8 8 > EOF
9 9 $ for x in `$TESTDIR/seq.py 10`
10 10 > do
11 11 > echo $x > "f$x"
12 12 > hg add "f$x"
13 13 > hg commit -m "Commit f$x"
14 14 > done
15 15 $ cd ..
16 16
17 17 narrow clone a couple files, f2 and f8
18 18
19 19 $ hg clone --narrow ssh://user@dummy/master narrow --include "f2" --include "f8"
20 20 requesting all changes
21 21 adding changesets
22 22 adding manifests
23 23 adding file changes
24 24 added 5 changesets with 2 changes to 2 files
25 25 new changesets *:* (glob)
26 26 updating to branch default
27 27 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
28 28 $ cd narrow
29 29 $ ls -A
30 30 .hg
31 31 f2
32 32 f8
33 33 $ cat f2 f8
34 34 2
35 35 8
36 36
37 37 $ cd ..
38 38
39 39 change every upstream file twice
40 40
41 41 $ cd master
42 42 $ for x in `$TESTDIR/seq.py 10`
43 43 > do
44 44 > echo "update#1 $x" >> "f$x"
45 45 > hg commit -m "Update#1 to f$x" "f$x"
46 46 > done
47 47 $ for x in `$TESTDIR/seq.py 10`
48 48 > do
49 49 > echo "update#2 $x" >> "f$x"
50 50 > hg commit -m "Update#2 to f$x" "f$x"
51 51 > done
52 52 $ cd ..
53 53
54 54 look for incoming changes
55 55
56 56 $ cd narrow
57 57 $ hg incoming --limit 3
58 58 comparing with ssh://user@dummy/master
59 59 searching for changes
60 60 changeset: 5:ddc055582556
61 61 user: test
62 62 date: Thu Jan 01 00:00:00 1970 +0000
63 63 summary: Update#1 to f1
64 64
65 65 changeset: 6:f66eb5ad621d
66 66 user: test
67 67 date: Thu Jan 01 00:00:00 1970 +0000
68 68 summary: Update#1 to f2
69 69
70 70 changeset: 7:c42ecff04e99
71 71 user: test
72 72 date: Thu Jan 01 00:00:00 1970 +0000
73 73 summary: Update#1 to f3
74 74
75 75
76 76 Interrupting the pull is safe
77 77 $ hg --config hooks.pretxnchangegroup.bad=false pull -q
78 78 transaction abort!
79 79 rollback completed
80 80 abort: pretxnchangegroup.bad hook exited with status 1
81 81 [40]
82 82 $ hg id
83 83 223311e70a6f tip
84 84
85 85 pull new changes down to the narrow clone. Should get 8 new changesets: 4
86 86 relevant to the narrow spec, and 4 ellipsis nodes gluing them all together.
87 87
88 88 $ hg pull
89 89 pulling from ssh://user@dummy/master
90 90 searching for changes
91 91 adding changesets
92 92 adding manifests
93 93 adding file changes
94 94 added 9 changesets with 4 changes to 2 files
95 95 new changesets *:* (glob)
96 96 (run 'hg update' to get a working copy)
97 97 $ hg log -T '{rev}: {desc}\n'
98 98 13: Update#2 to f10
99 99 12: Update#2 to f8
100 100 11: Update#2 to f7
101 101 10: Update#2 to f2
102 102 9: Update#2 to f1
103 103 8: Update#1 to f8
104 104 7: Update#1 to f7
105 105 6: Update#1 to f2
106 106 5: Update#1 to f1
107 107 4: Commit f10
108 108 3: Commit f8
109 109 2: Commit f7
110 110 1: Commit f2
111 111 0: Commit f1
112 112 $ hg update tip
113 113 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
114 114
115 115 add a change and push it
116 116
117 117 $ echo "update#3 2" >> f2
118 118 $ hg commit -m "Update#3 to f2" f2
119 119 $ hg log f2 -T '{rev}: {desc}\n'
120 120 14: Update#3 to f2
121 121 10: Update#2 to f2
122 122 6: Update#1 to f2
123 123 1: Commit f2
124 124 $ hg push
125 125 pushing to ssh://user@dummy/master
126 126 searching for changes
127 127 remote: adding changesets
128 128 remote: adding manifests
129 129 remote: adding file changes
130 130 remote: added 1 changesets with 1 changes to 1 files
131 131 $ cd ..
132 132
133 133 $ cd master
134 134 $ hg log f2 -T '{rev}: {desc}\n'
135 135 30: Update#3 to f2
136 136 21: Update#2 to f2
137 137 11: Update#1 to f2
138 138 1: Commit f2
139 139 $ hg log -l 3 -T '{rev}: {desc}\n'
140 140 30: Update#3 to f2
141 141 29: Update#2 to f10
142 142 28: Update#2 to f9
143 143
144 144 Can pull into repo with a single commit
145 145
146 146 $ cd ..
147 147 $ hg clone -q --narrow ssh://user@dummy/master narrow2 --include "f1" -r 0
148 148 $ cd narrow2
149 149 $ hg pull -q -r 1
150 remote: abort: unexpected error: unable to resolve parent while packing b'00manifest.i' 1 for changeset 0
150 151 transaction abort!
151 152 rollback completed
152 153 abort: pull failed on remote
153 154 [255]
@@ -1,119 +1,118 b''
1 1 #require no-windows
2 2
3 3 $ . "$TESTDIR/remotefilelog-library.sh"
4 4
5 5 $ hg init master
6 6 $ cd master
7 7 $ echo treemanifest >> .hg/requires
8 8 $ cat >> .hg/hgrc <<EOF
9 9 > [remotefilelog]
10 10 > server=True
11 11 > EOF
12 12 # uppercase directory name to test encoding
13 13 $ mkdir -p A/B
14 14 $ echo x > A/B/x
15 15 $ hg commit -qAm x
16 16
17 17 $ cd ..
18 18
19 19 # shallow clone from full
20 20
21 21 $ hgcloneshallow ssh://user@dummy/master shallow --noupdate
22 22 streaming all changes
23 23 4 files to transfer, 449 bytes of data
24 24 transferred 449 bytes in * seconds (*/sec) (glob)
25 25 searching for changes
26 26 no changes found
27 27 $ cd shallow
28 28 $ cat .hg/requires
29 29 dotencode
30 30 exp-remotefilelog-repo-req-1
31 31 fncache
32 32 generaldelta
33 33 revlogv1
34 34 sparserevlog
35 35 store
36 36 treemanifest
37 37 $ find .hg/store/meta | sort
38 38 .hg/store/meta
39 39 .hg/store/meta/_a
40 40 .hg/store/meta/_a/00manifest.i
41 41 .hg/store/meta/_a/_b
42 42 .hg/store/meta/_a/_b/00manifest.i
43 43
44 44 $ hg update
45 45 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
46 46 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
47 47
48 48 $ cat A/B/x
49 49 x
50 50
51 51 $ ls .hg/store/data
52 52 $ echo foo > A/B/F
53 53 $ hg add A/B/F
54 54 $ hg ci -m 'local content'
55 55 $ ls .hg/store/data
56 56 ca31988f085bfb945cb8115b78fabdee40f741aa
57 57
58 58 $ cd ..
59 59
60 60 # shallow clone from shallow
61 61
62 62 $ hgcloneshallow ssh://user@dummy/shallow shallow2 --noupdate
63 63 streaming all changes
64 64 5 files to transfer, 1008 bytes of data
65 65 transferred 1008 bytes in * seconds (*/sec) (glob)
66 66 searching for changes
67 67 no changes found
68 68 $ cd shallow2
69 69 $ cat .hg/requires
70 70 dotencode
71 71 exp-remotefilelog-repo-req-1
72 72 fncache
73 73 generaldelta
74 74 revlogv1
75 75 sparserevlog
76 76 store
77 77 treemanifest
78 78 $ ls .hg/store/data
79 79 ca31988f085bfb945cb8115b78fabdee40f741aa
80 80
81 81 $ hg update
82 82 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
83 83
84 84 $ cat A/B/x
85 85 x
86 86
87 87 $ cd ..
88 88
89 89 # full clone from shallow
90 90 # - send stderr to /dev/null because the order of stdout/err causes
91 91 # flakiness here
92 92 $ hg clone --noupdate ssh://user@dummy/shallow full 2>/dev/null
93 93 streaming all changes
94 remote: abort: Cannot clone from a shallow repo to a full repo.
95 94 [255]
96 95
97 96 # getbundle full clone
98 97
99 98 $ printf '[server]\npreferuncompressed=False\n' >> master/.hg/hgrc
100 99 $ hgcloneshallow ssh://user@dummy/master shallow3
101 100 requesting all changes
102 101 adding changesets
103 102 adding manifests
104 103 adding file changes
105 104 added 1 changesets with 0 changes to 0 files
106 105 new changesets 18d955ee7ba0
107 106 updating to branch default
108 107 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
109 108
110 109 $ ls shallow3/.hg/store/data
111 110 $ cat shallow3/.hg/requires
112 111 dotencode
113 112 exp-remotefilelog-repo-req-1
114 113 fncache
115 114 generaldelta
116 115 revlogv1
117 116 sparserevlog
118 117 store
119 118 treemanifest
@@ -1,115 +1,115 b''
1 1 #require no-windows
2 2
3 3 $ . "$TESTDIR/remotefilelog-library.sh"
4 4
5 5 $ hg init master
6 6 $ cd master
7 7 $ cat >> .hg/hgrc <<EOF
8 8 > [remotefilelog]
9 9 > server=True
10 10 > EOF
11 11 $ echo x > x
12 12 $ hg commit -qAm x
13 13
14 14 $ cd ..
15 15
16 16 # shallow clone from full
17 17
18 18 $ hgcloneshallow ssh://user@dummy/master shallow --noupdate
19 19 streaming all changes
20 20 2 files to transfer, 227 bytes of data
21 21 transferred 227 bytes in * seconds (*/sec) (glob)
22 22 searching for changes
23 23 no changes found
24 24 $ cd shallow
25 25 $ cat .hg/requires
26 26 dotencode
27 27 exp-remotefilelog-repo-req-1
28 28 fncache
29 29 generaldelta
30 30 revlogv1
31 31 sparserevlog
32 32 store
33 33
34 34 $ hg update
35 35 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
36 36 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
37 37
38 38 $ cat x
39 39 x
40 40
41 41 $ ls .hg/store/data
42 42 $ echo foo > f
43 43 $ hg add f
44 44 $ hg ci -m 'local content'
45 45 $ ls .hg/store/data
46 46 4a0a19218e082a343a1b17e5333409af9d98f0f5
47 47
48 48 $ cd ..
49 49
50 50 # shallow clone from shallow
51 51
52 52 $ hgcloneshallow ssh://user@dummy/shallow shallow2 --noupdate
53 53 streaming all changes
54 54 3 files to transfer, 564 bytes of data
55 55 transferred 564 bytes in * seconds (*/sec) (glob)
56 56 searching for changes
57 57 no changes found
58 58 $ cd shallow2
59 59 $ cat .hg/requires
60 60 dotencode
61 61 exp-remotefilelog-repo-req-1
62 62 fncache
63 63 generaldelta
64 64 revlogv1
65 65 sparserevlog
66 66 store
67 67 $ ls .hg/store/data
68 68 4a0a19218e082a343a1b17e5333409af9d98f0f5
69 69
70 70 $ hg update
71 71 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
72 72
73 73 $ cat x
74 74 x
75 75
76 76 $ cd ..
77 77
78 78 # full clone from shallow
79 79
80 80 Note: the output to STDERR comes from a different process to the output on
81 81 STDOUT and their relative ordering is not deterministic. As a result, the test
82 82 was failing sporadically. To avoid this, we capture STDERR to a file and
83 83 check its contents separately.
84 84
85 85 $ TEMP_STDERR=full-clone-from-shallow.stderr.tmp
86 86 $ hg clone --noupdate ssh://user@dummy/shallow full 2>$TEMP_STDERR
87 87 streaming all changes
88 remote: abort: Cannot clone from a shallow repo to a full repo.
89 88 [255]
90 89 $ cat $TEMP_STDERR
90 remote: abort: Cannot clone from a shallow repo to a full repo.
91 91 abort: pull failed on remote
92 92 $ rm $TEMP_STDERR
93 93
94 94 # getbundle full clone
95 95
96 96 $ printf '[server]\npreferuncompressed=False\n' >> master/.hg/hgrc
97 97 $ hgcloneshallow ssh://user@dummy/master shallow3
98 98 requesting all changes
99 99 adding changesets
100 100 adding manifests
101 101 adding file changes
102 102 added 1 changesets with 0 changes to 0 files
103 103 new changesets b292c1e3311f
104 104 updating to branch default
105 105 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
106 106
107 107 $ ls shallow3/.hg/store/data
108 108 $ cat shallow3/.hg/requires
109 109 dotencode
110 110 exp-remotefilelog-repo-req-1
111 111 fncache
112 112 generaldelta
113 113 revlogv1
114 114 sparserevlog
115 115 store
General Comments 0
You need to be logged in to leave comments. Login now