##// END OF EJS Templates
py3: fix sorting of obsolete markers when building bundle...
Denis Laxalde -
r43438:01e8eefd default
parent child Browse files
Show More
@@ -1,3084 +1,3085
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import hashlib
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 hex,
16 16 nullid,
17 17 nullrev,
18 18 )
19 19 from .thirdparty import attr
20 20 from . import (
21 21 bookmarks as bookmod,
22 22 bundle2,
23 23 changegroup,
24 24 discovery,
25 25 error,
26 26 exchangev2,
27 27 lock as lockmod,
28 28 logexchange,
29 29 narrowspec,
30 30 obsolete,
31 31 phases,
32 32 pushkey,
33 33 pycompat,
34 34 scmutil,
35 35 sslutil,
36 36 streamclone,
37 37 url as urlmod,
38 38 util,
39 39 wireprototypes,
40 40 )
41 41 from .interfaces import repository
42 42 from .utils import stringutil
43 43
44 44 urlerr = util.urlerr
45 45 urlreq = util.urlreq
46 46
47 47 _NARROWACL_SECTION = b'narrowacl'
48 48
49 49 # Maps bundle version human names to changegroup versions.
50 50 _bundlespeccgversions = {
51 51 b'v1': b'01',
52 52 b'v2': b'02',
53 53 b'packed1': b's1',
54 54 b'bundle2': b'02', # legacy
55 55 }
56 56
57 57 # Maps bundle version with content opts to choose which part to bundle
58 58 _bundlespeccontentopts = {
59 59 b'v1': {
60 60 b'changegroup': True,
61 61 b'cg.version': b'01',
62 62 b'obsolescence': False,
63 63 b'phases': False,
64 64 b'tagsfnodescache': False,
65 65 b'revbranchcache': False,
66 66 },
67 67 b'v2': {
68 68 b'changegroup': True,
69 69 b'cg.version': b'02',
70 70 b'obsolescence': False,
71 71 b'phases': False,
72 72 b'tagsfnodescache': True,
73 73 b'revbranchcache': True,
74 74 },
75 75 b'packed1': {b'cg.version': b's1'},
76 76 }
77 77 _bundlespeccontentopts[b'bundle2'] = _bundlespeccontentopts[b'v2']
78 78
79 79 _bundlespecvariants = {
80 80 b"streamv2": {
81 81 b"changegroup": False,
82 82 b"streamv2": True,
83 83 b"tagsfnodescache": False,
84 84 b"revbranchcache": False,
85 85 }
86 86 }
87 87
88 88 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
89 89 _bundlespecv1compengines = {b'gzip', b'bzip2', b'none'}
90 90
91 91
92 92 @attr.s
93 93 class bundlespec(object):
94 94 compression = attr.ib()
95 95 wirecompression = attr.ib()
96 96 version = attr.ib()
97 97 wireversion = attr.ib()
98 98 params = attr.ib()
99 99 contentopts = attr.ib()
100 100
101 101
102 102 def parsebundlespec(repo, spec, strict=True):
103 103 """Parse a bundle string specification into parts.
104 104
105 105 Bundle specifications denote a well-defined bundle/exchange format.
106 106 The content of a given specification should not change over time in
107 107 order to ensure that bundles produced by a newer version of Mercurial are
108 108 readable from an older version.
109 109
110 110 The string currently has the form:
111 111
112 112 <compression>-<type>[;<parameter0>[;<parameter1>]]
113 113
114 114 Where <compression> is one of the supported compression formats
115 115 and <type> is (currently) a version string. A ";" can follow the type and
116 116 all text afterwards is interpreted as URI encoded, ";" delimited key=value
117 117 pairs.
118 118
119 119 If ``strict`` is True (the default) <compression> is required. Otherwise,
120 120 it is optional.
121 121
122 122 Returns a bundlespec object of (compression, version, parameters).
123 123 Compression will be ``None`` if not in strict mode and a compression isn't
124 124 defined.
125 125
126 126 An ``InvalidBundleSpecification`` is raised when the specification is
127 127 not syntactically well formed.
128 128
129 129 An ``UnsupportedBundleSpecification`` is raised when the compression or
130 130 bundle type/version is not recognized.
131 131
132 132 Note: this function will likely eventually return a more complex data
133 133 structure, including bundle2 part information.
134 134 """
135 135
136 136 def parseparams(s):
137 137 if b';' not in s:
138 138 return s, {}
139 139
140 140 params = {}
141 141 version, paramstr = s.split(b';', 1)
142 142
143 143 for p in paramstr.split(b';'):
144 144 if b'=' not in p:
145 145 raise error.InvalidBundleSpecification(
146 146 _(
147 147 b'invalid bundle specification: '
148 148 b'missing "=" in parameter: %s'
149 149 )
150 150 % p
151 151 )
152 152
153 153 key, value = p.split(b'=', 1)
154 154 key = urlreq.unquote(key)
155 155 value = urlreq.unquote(value)
156 156 params[key] = value
157 157
158 158 return version, params
159 159
160 160 if strict and b'-' not in spec:
161 161 raise error.InvalidBundleSpecification(
162 162 _(
163 163 b'invalid bundle specification; '
164 164 b'must be prefixed with compression: %s'
165 165 )
166 166 % spec
167 167 )
168 168
169 169 if b'-' in spec:
170 170 compression, version = spec.split(b'-', 1)
171 171
172 172 if compression not in util.compengines.supportedbundlenames:
173 173 raise error.UnsupportedBundleSpecification(
174 174 _(b'%s compression is not supported') % compression
175 175 )
176 176
177 177 version, params = parseparams(version)
178 178
179 179 if version not in _bundlespeccgversions:
180 180 raise error.UnsupportedBundleSpecification(
181 181 _(b'%s is not a recognized bundle version') % version
182 182 )
183 183 else:
184 184 # Value could be just the compression or just the version, in which
185 185 # case some defaults are assumed (but only when not in strict mode).
186 186 assert not strict
187 187
188 188 spec, params = parseparams(spec)
189 189
190 190 if spec in util.compengines.supportedbundlenames:
191 191 compression = spec
192 192 version = b'v1'
193 193 # Generaldelta repos require v2.
194 194 if b'generaldelta' in repo.requirements:
195 195 version = b'v2'
196 196 # Modern compression engines require v2.
197 197 if compression not in _bundlespecv1compengines:
198 198 version = b'v2'
199 199 elif spec in _bundlespeccgversions:
200 200 if spec == b'packed1':
201 201 compression = b'none'
202 202 else:
203 203 compression = b'bzip2'
204 204 version = spec
205 205 else:
206 206 raise error.UnsupportedBundleSpecification(
207 207 _(b'%s is not a recognized bundle specification') % spec
208 208 )
209 209
210 210 # Bundle version 1 only supports a known set of compression engines.
211 211 if version == b'v1' and compression not in _bundlespecv1compengines:
212 212 raise error.UnsupportedBundleSpecification(
213 213 _(b'compression engine %s is not supported on v1 bundles')
214 214 % compression
215 215 )
216 216
217 217 # The specification for packed1 can optionally declare the data formats
218 218 # required to apply it. If we see this metadata, compare against what the
219 219 # repo supports and error if the bundle isn't compatible.
220 220 if version == b'packed1' and b'requirements' in params:
221 221 requirements = set(params[b'requirements'].split(b','))
222 222 missingreqs = requirements - repo.supportedformats
223 223 if missingreqs:
224 224 raise error.UnsupportedBundleSpecification(
225 225 _(b'missing support for repository features: %s')
226 226 % b', '.join(sorted(missingreqs))
227 227 )
228 228
229 229 # Compute contentopts based on the version
230 230 contentopts = _bundlespeccontentopts.get(version, {}).copy()
231 231
232 232 # Process the variants
233 233 if b"stream" in params and params[b"stream"] == b"v2":
234 234 variant = _bundlespecvariants[b"streamv2"]
235 235 contentopts.update(variant)
236 236
237 237 engine = util.compengines.forbundlename(compression)
238 238 compression, wirecompression = engine.bundletype()
239 239 wireversion = _bundlespeccgversions[version]
240 240
241 241 return bundlespec(
242 242 compression, wirecompression, version, wireversion, params, contentopts
243 243 )
244 244
245 245
246 246 def readbundle(ui, fh, fname, vfs=None):
247 247 header = changegroup.readexactly(fh, 4)
248 248
249 249 alg = None
250 250 if not fname:
251 251 fname = b"stream"
252 252 if not header.startswith(b'HG') and header.startswith(b'\0'):
253 253 fh = changegroup.headerlessfixup(fh, header)
254 254 header = b"HG10"
255 255 alg = b'UN'
256 256 elif vfs:
257 257 fname = vfs.join(fname)
258 258
259 259 magic, version = header[0:2], header[2:4]
260 260
261 261 if magic != b'HG':
262 262 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
263 263 if version == b'10':
264 264 if alg is None:
265 265 alg = changegroup.readexactly(fh, 2)
266 266 return changegroup.cg1unpacker(fh, alg)
267 267 elif version.startswith(b'2'):
268 268 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
269 269 elif version == b'S1':
270 270 return streamclone.streamcloneapplier(fh)
271 271 else:
272 272 raise error.Abort(
273 273 _(b'%s: unknown bundle version %s') % (fname, version)
274 274 )
275 275
276 276
277 277 def getbundlespec(ui, fh):
278 278 """Infer the bundlespec from a bundle file handle.
279 279
280 280 The input file handle is seeked and the original seek position is not
281 281 restored.
282 282 """
283 283
284 284 def speccompression(alg):
285 285 try:
286 286 return util.compengines.forbundletype(alg).bundletype()[0]
287 287 except KeyError:
288 288 return None
289 289
290 290 b = readbundle(ui, fh, None)
291 291 if isinstance(b, changegroup.cg1unpacker):
292 292 alg = b._type
293 293 if alg == b'_truncatedBZ':
294 294 alg = b'BZ'
295 295 comp = speccompression(alg)
296 296 if not comp:
297 297 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
298 298 return b'%s-v1' % comp
299 299 elif isinstance(b, bundle2.unbundle20):
300 300 if b'Compression' in b.params:
301 301 comp = speccompression(b.params[b'Compression'])
302 302 if not comp:
303 303 raise error.Abort(
304 304 _(b'unknown compression algorithm: %s') % comp
305 305 )
306 306 else:
307 307 comp = b'none'
308 308
309 309 version = None
310 310 for part in b.iterparts():
311 311 if part.type == b'changegroup':
312 312 version = part.params[b'version']
313 313 if version in (b'01', b'02'):
314 314 version = b'v2'
315 315 else:
316 316 raise error.Abort(
317 317 _(
318 318 b'changegroup version %s does not have '
319 319 b'a known bundlespec'
320 320 )
321 321 % version,
322 322 hint=_(b'try upgrading your Mercurial client'),
323 323 )
324 324 elif part.type == b'stream2' and version is None:
325 325 # A stream2 part requires to be part of a v2 bundle
326 326 requirements = urlreq.unquote(part.params[b'requirements'])
327 327 splitted = requirements.split()
328 328 params = bundle2._formatrequirementsparams(splitted)
329 329 return b'none-v2;stream=v2;%s' % params
330 330
331 331 if not version:
332 332 raise error.Abort(
333 333 _(b'could not identify changegroup version in bundle')
334 334 )
335 335
336 336 return b'%s-%s' % (comp, version)
337 337 elif isinstance(b, streamclone.streamcloneapplier):
338 338 requirements = streamclone.readbundle1header(fh)[2]
339 339 formatted = bundle2._formatrequirementsparams(requirements)
340 340 return b'none-packed1;%s' % formatted
341 341 else:
342 342 raise error.Abort(_(b'unknown bundle type: %s') % b)
343 343
344 344
345 345 def _computeoutgoing(repo, heads, common):
346 346 """Computes which revs are outgoing given a set of common
347 347 and a set of heads.
348 348
349 349 This is a separate function so extensions can have access to
350 350 the logic.
351 351
352 352 Returns a discovery.outgoing object.
353 353 """
354 354 cl = repo.changelog
355 355 if common:
356 356 hasnode = cl.hasnode
357 357 common = [n for n in common if hasnode(n)]
358 358 else:
359 359 common = [nullid]
360 360 if not heads:
361 361 heads = cl.heads()
362 362 return discovery.outgoing(repo, common, heads)
363 363
364 364
365 365 def _checkpublish(pushop):
366 366 repo = pushop.repo
367 367 ui = repo.ui
368 368 behavior = ui.config(b'experimental', b'auto-publish')
369 369 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
370 370 return
371 371 remotephases = listkeys(pushop.remote, b'phases')
372 372 if not remotephases.get(b'publishing', False):
373 373 return
374 374
375 375 if pushop.revs is None:
376 376 published = repo.filtered(b'served').revs(b'not public()')
377 377 else:
378 378 published = repo.revs(b'::%ln - public()', pushop.revs)
379 379 if published:
380 380 if behavior == b'warn':
381 381 ui.warn(
382 382 _(b'%i changesets about to be published\n') % len(published)
383 383 )
384 384 elif behavior == b'confirm':
385 385 if ui.promptchoice(
386 386 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
387 387 % len(published)
388 388 ):
389 389 raise error.Abort(_(b'user quit'))
390 390 elif behavior == b'abort':
391 391 msg = _(b'push would publish %i changesets') % len(published)
392 392 hint = _(
393 393 b"use --publish or adjust 'experimental.auto-publish'"
394 394 b" config"
395 395 )
396 396 raise error.Abort(msg, hint=hint)
397 397
398 398
399 399 def _forcebundle1(op):
400 400 """return true if a pull/push must use bundle1
401 401
402 402 This function is used to allow testing of the older bundle version"""
403 403 ui = op.repo.ui
404 404 # The goal is this config is to allow developer to choose the bundle
405 405 # version used during exchanged. This is especially handy during test.
406 406 # Value is a list of bundle version to be picked from, highest version
407 407 # should be used.
408 408 #
409 409 # developer config: devel.legacy.exchange
410 410 exchange = ui.configlist(b'devel', b'legacy.exchange')
411 411 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
412 412 return forcebundle1 or not op.remote.capable(b'bundle2')
413 413
414 414
415 415 class pushoperation(object):
416 416 """A object that represent a single push operation
417 417
418 418 Its purpose is to carry push related state and very common operations.
419 419
420 420 A new pushoperation should be created at the beginning of each push and
421 421 discarded afterward.
422 422 """
423 423
424 424 def __init__(
425 425 self,
426 426 repo,
427 427 remote,
428 428 force=False,
429 429 revs=None,
430 430 newbranch=False,
431 431 bookmarks=(),
432 432 publish=False,
433 433 pushvars=None,
434 434 ):
435 435 # repo we push from
436 436 self.repo = repo
437 437 self.ui = repo.ui
438 438 # repo we push to
439 439 self.remote = remote
440 440 # force option provided
441 441 self.force = force
442 442 # revs to be pushed (None is "all")
443 443 self.revs = revs
444 444 # bookmark explicitly pushed
445 445 self.bookmarks = bookmarks
446 446 # allow push of new branch
447 447 self.newbranch = newbranch
448 448 # step already performed
449 449 # (used to check what steps have been already performed through bundle2)
450 450 self.stepsdone = set()
451 451 # Integer version of the changegroup push result
452 452 # - None means nothing to push
453 453 # - 0 means HTTP error
454 454 # - 1 means we pushed and remote head count is unchanged *or*
455 455 # we have outgoing changesets but refused to push
456 456 # - other values as described by addchangegroup()
457 457 self.cgresult = None
458 458 # Boolean value for the bookmark push
459 459 self.bkresult = None
460 460 # discover.outgoing object (contains common and outgoing data)
461 461 self.outgoing = None
462 462 # all remote topological heads before the push
463 463 self.remoteheads = None
464 464 # Details of the remote branch pre and post push
465 465 #
466 466 # mapping: {'branch': ([remoteheads],
467 467 # [newheads],
468 468 # [unsyncedheads],
469 469 # [discardedheads])}
470 470 # - branch: the branch name
471 471 # - remoteheads: the list of remote heads known locally
472 472 # None if the branch is new
473 473 # - newheads: the new remote heads (known locally) with outgoing pushed
474 474 # - unsyncedheads: the list of remote heads unknown locally.
475 475 # - discardedheads: the list of remote heads made obsolete by the push
476 476 self.pushbranchmap = None
477 477 # testable as a boolean indicating if any nodes are missing locally.
478 478 self.incoming = None
479 479 # summary of the remote phase situation
480 480 self.remotephases = None
481 481 # phases changes that must be pushed along side the changesets
482 482 self.outdatedphases = None
483 483 # phases changes that must be pushed if changeset push fails
484 484 self.fallbackoutdatedphases = None
485 485 # outgoing obsmarkers
486 486 self.outobsmarkers = set()
487 487 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
488 488 self.outbookmarks = []
489 489 # transaction manager
490 490 self.trmanager = None
491 491 # map { pushkey partid -> callback handling failure}
492 492 # used to handle exception from mandatory pushkey part failure
493 493 self.pkfailcb = {}
494 494 # an iterable of pushvars or None
495 495 self.pushvars = pushvars
496 496 # publish pushed changesets
497 497 self.publish = publish
498 498
499 499 @util.propertycache
500 500 def futureheads(self):
501 501 """future remote heads if the changeset push succeeds"""
502 502 return self.outgoing.missingheads
503 503
504 504 @util.propertycache
505 505 def fallbackheads(self):
506 506 """future remote heads if the changeset push fails"""
507 507 if self.revs is None:
508 508 # not target to push, all common are relevant
509 509 return self.outgoing.commonheads
510 510 unfi = self.repo.unfiltered()
511 511 # I want cheads = heads(::missingheads and ::commonheads)
512 512 # (missingheads is revs with secret changeset filtered out)
513 513 #
514 514 # This can be expressed as:
515 515 # cheads = ( (missingheads and ::commonheads)
516 516 # + (commonheads and ::missingheads))"
517 517 # )
518 518 #
519 519 # while trying to push we already computed the following:
520 520 # common = (::commonheads)
521 521 # missing = ((commonheads::missingheads) - commonheads)
522 522 #
523 523 # We can pick:
524 524 # * missingheads part of common (::commonheads)
525 525 common = self.outgoing.common
526 526 nm = self.repo.changelog.nodemap
527 527 cheads = [node for node in self.revs if nm[node] in common]
528 528 # and
529 529 # * commonheads parents on missing
530 530 revset = unfi.set(
531 531 b'%ln and parents(roots(%ln))',
532 532 self.outgoing.commonheads,
533 533 self.outgoing.missing,
534 534 )
535 535 cheads.extend(c.node() for c in revset)
536 536 return cheads
537 537
538 538 @property
539 539 def commonheads(self):
540 540 """set of all common heads after changeset bundle push"""
541 541 if self.cgresult:
542 542 return self.futureheads
543 543 else:
544 544 return self.fallbackheads
545 545
546 546
547 547 # mapping of message used when pushing bookmark
548 548 bookmsgmap = {
549 549 b'update': (
550 550 _(b"updating bookmark %s\n"),
551 551 _(b'updating bookmark %s failed!\n'),
552 552 ),
553 553 b'export': (
554 554 _(b"exporting bookmark %s\n"),
555 555 _(b'exporting bookmark %s failed!\n'),
556 556 ),
557 557 b'delete': (
558 558 _(b"deleting remote bookmark %s\n"),
559 559 _(b'deleting remote bookmark %s failed!\n'),
560 560 ),
561 561 }
562 562
563 563
564 564 def push(
565 565 repo,
566 566 remote,
567 567 force=False,
568 568 revs=None,
569 569 newbranch=False,
570 570 bookmarks=(),
571 571 publish=False,
572 572 opargs=None,
573 573 ):
574 574 '''Push outgoing changesets (limited by revs) from a local
575 575 repository to remote. Return an integer:
576 576 - None means nothing to push
577 577 - 0 means HTTP error
578 578 - 1 means we pushed and remote head count is unchanged *or*
579 579 we have outgoing changesets but refused to push
580 580 - other values as described by addchangegroup()
581 581 '''
582 582 if opargs is None:
583 583 opargs = {}
584 584 pushop = pushoperation(
585 585 repo,
586 586 remote,
587 587 force,
588 588 revs,
589 589 newbranch,
590 590 bookmarks,
591 591 publish,
592 592 **pycompat.strkwargs(opargs)
593 593 )
594 594 if pushop.remote.local():
595 595 missing = (
596 596 set(pushop.repo.requirements) - pushop.remote.local().supported
597 597 )
598 598 if missing:
599 599 msg = _(
600 600 b"required features are not"
601 601 b" supported in the destination:"
602 602 b" %s"
603 603 ) % (b', '.join(sorted(missing)))
604 604 raise error.Abort(msg)
605 605
606 606 if not pushop.remote.canpush():
607 607 raise error.Abort(_(b"destination does not support push"))
608 608
609 609 if not pushop.remote.capable(b'unbundle'):
610 610 raise error.Abort(
611 611 _(
612 612 b'cannot push: destination does not support the '
613 613 b'unbundle wire protocol command'
614 614 )
615 615 )
616 616
617 617 # get lock as we might write phase data
618 618 wlock = lock = None
619 619 try:
620 620 # bundle2 push may receive a reply bundle touching bookmarks
621 621 # requiring the wlock. Take it now to ensure proper ordering.
622 622 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
623 623 if (
624 624 (not _forcebundle1(pushop))
625 625 and maypushback
626 626 and not bookmod.bookmarksinstore(repo)
627 627 ):
628 628 wlock = pushop.repo.wlock()
629 629 lock = pushop.repo.lock()
630 630 pushop.trmanager = transactionmanager(
631 631 pushop.repo, b'push-response', pushop.remote.url()
632 632 )
633 633 except error.LockUnavailable as err:
634 634 # source repo cannot be locked.
635 635 # We do not abort the push, but just disable the local phase
636 636 # synchronisation.
637 637 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
638 638 err
639 639 )
640 640 pushop.ui.debug(msg)
641 641
642 642 with wlock or util.nullcontextmanager():
643 643 with lock or util.nullcontextmanager():
644 644 with pushop.trmanager or util.nullcontextmanager():
645 645 pushop.repo.checkpush(pushop)
646 646 _checkpublish(pushop)
647 647 _pushdiscovery(pushop)
648 648 if not _forcebundle1(pushop):
649 649 _pushbundle2(pushop)
650 650 _pushchangeset(pushop)
651 651 _pushsyncphase(pushop)
652 652 _pushobsolete(pushop)
653 653 _pushbookmark(pushop)
654 654
655 655 if repo.ui.configbool(b'experimental', b'remotenames'):
656 656 logexchange.pullremotenames(repo, remote)
657 657
658 658 return pushop
659 659
660 660
661 661 # list of steps to perform discovery before push
662 662 pushdiscoveryorder = []
663 663
664 664 # Mapping between step name and function
665 665 #
666 666 # This exists to help extensions wrap steps if necessary
667 667 pushdiscoverymapping = {}
668 668
669 669
670 670 def pushdiscovery(stepname):
671 671 """decorator for function performing discovery before push
672 672
673 673 The function is added to the step -> function mapping and appended to the
674 674 list of steps. Beware that decorated function will be added in order (this
675 675 may matter).
676 676
677 677 You can only use this decorator for a new step, if you want to wrap a step
678 678 from an extension, change the pushdiscovery dictionary directly."""
679 679
680 680 def dec(func):
681 681 assert stepname not in pushdiscoverymapping
682 682 pushdiscoverymapping[stepname] = func
683 683 pushdiscoveryorder.append(stepname)
684 684 return func
685 685
686 686 return dec
687 687
688 688
689 689 def _pushdiscovery(pushop):
690 690 """Run all discovery steps"""
691 691 for stepname in pushdiscoveryorder:
692 692 step = pushdiscoverymapping[stepname]
693 693 step(pushop)
694 694
695 695
696 696 @pushdiscovery(b'changeset')
697 697 def _pushdiscoverychangeset(pushop):
698 698 """discover the changeset that need to be pushed"""
699 699 fci = discovery.findcommonincoming
700 700 if pushop.revs:
701 701 commoninc = fci(
702 702 pushop.repo,
703 703 pushop.remote,
704 704 force=pushop.force,
705 705 ancestorsof=pushop.revs,
706 706 )
707 707 else:
708 708 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
709 709 common, inc, remoteheads = commoninc
710 710 fco = discovery.findcommonoutgoing
711 711 outgoing = fco(
712 712 pushop.repo,
713 713 pushop.remote,
714 714 onlyheads=pushop.revs,
715 715 commoninc=commoninc,
716 716 force=pushop.force,
717 717 )
718 718 pushop.outgoing = outgoing
719 719 pushop.remoteheads = remoteheads
720 720 pushop.incoming = inc
721 721
722 722
723 723 @pushdiscovery(b'phase')
724 724 def _pushdiscoveryphase(pushop):
725 725 """discover the phase that needs to be pushed
726 726
727 727 (computed for both success and failure case for changesets push)"""
728 728 outgoing = pushop.outgoing
729 729 unfi = pushop.repo.unfiltered()
730 730 remotephases = listkeys(pushop.remote, b'phases')
731 731
732 732 if (
733 733 pushop.ui.configbool(b'ui', b'_usedassubrepo')
734 734 and remotephases # server supports phases
735 735 and not pushop.outgoing.missing # no changesets to be pushed
736 736 and remotephases.get(b'publishing', False)
737 737 ):
738 738 # When:
739 739 # - this is a subrepo push
740 740 # - and remote support phase
741 741 # - and no changeset are to be pushed
742 742 # - and remote is publishing
743 743 # We may be in issue 3781 case!
744 744 # We drop the possible phase synchronisation done by
745 745 # courtesy to publish changesets possibly locally draft
746 746 # on the remote.
747 747 pushop.outdatedphases = []
748 748 pushop.fallbackoutdatedphases = []
749 749 return
750 750
751 751 pushop.remotephases = phases.remotephasessummary(
752 752 pushop.repo, pushop.fallbackheads, remotephases
753 753 )
754 754 droots = pushop.remotephases.draftroots
755 755
756 756 extracond = b''
757 757 if not pushop.remotephases.publishing:
758 758 extracond = b' and public()'
759 759 revset = b'heads((%%ln::%%ln) %s)' % extracond
760 760 # Get the list of all revs draft on remote by public here.
761 761 # XXX Beware that revset break if droots is not strictly
762 762 # XXX root we may want to ensure it is but it is costly
763 763 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
764 764 if not pushop.remotephases.publishing and pushop.publish:
765 765 future = list(
766 766 unfi.set(
767 767 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
768 768 )
769 769 )
770 770 elif not outgoing.missing:
771 771 future = fallback
772 772 else:
773 773 # adds changeset we are going to push as draft
774 774 #
775 775 # should not be necessary for publishing server, but because of an
776 776 # issue fixed in xxxxx we have to do it anyway.
777 777 fdroots = list(
778 778 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
779 779 )
780 780 fdroots = [f.node() for f in fdroots]
781 781 future = list(unfi.set(revset, fdroots, pushop.futureheads))
782 782 pushop.outdatedphases = future
783 783 pushop.fallbackoutdatedphases = fallback
784 784
785 785
786 786 @pushdiscovery(b'obsmarker')
787 787 def _pushdiscoveryobsmarkers(pushop):
788 788 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
789 789 return
790 790
791 791 if not pushop.repo.obsstore:
792 792 return
793 793
794 794 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
795 795 return
796 796
797 797 repo = pushop.repo
798 798 # very naive computation, that can be quite expensive on big repo.
799 799 # However: evolution is currently slow on them anyway.
800 800 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
801 801 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
802 802
803 803
804 804 @pushdiscovery(b'bookmarks')
805 805 def _pushdiscoverybookmarks(pushop):
806 806 ui = pushop.ui
807 807 repo = pushop.repo.unfiltered()
808 808 remote = pushop.remote
809 809 ui.debug(b"checking for updated bookmarks\n")
810 810 ancestors = ()
811 811 if pushop.revs:
812 812 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
813 813 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
814 814
815 815 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
816 816
817 817 explicit = {
818 818 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
819 819 }
820 820
821 821 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
822 822 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
823 823
824 824
825 825 def _processcompared(pushop, pushed, explicit, remotebms, comp):
826 826 """take decision on bookmarks to push to the remote repo
827 827
828 828 Exists to help extensions alter this behavior.
829 829 """
830 830 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
831 831
832 832 repo = pushop.repo
833 833
834 834 for b, scid, dcid in advsrc:
835 835 if b in explicit:
836 836 explicit.remove(b)
837 837 if not pushed or repo[scid].rev() in pushed:
838 838 pushop.outbookmarks.append((b, dcid, scid))
839 839 # search added bookmark
840 840 for b, scid, dcid in addsrc:
841 841 if b in explicit:
842 842 explicit.remove(b)
843 843 pushop.outbookmarks.append((b, b'', scid))
844 844 # search for overwritten bookmark
845 845 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
846 846 if b in explicit:
847 847 explicit.remove(b)
848 848 pushop.outbookmarks.append((b, dcid, scid))
849 849 # search for bookmark to delete
850 850 for b, scid, dcid in adddst:
851 851 if b in explicit:
852 852 explicit.remove(b)
853 853 # treat as "deleted locally"
854 854 pushop.outbookmarks.append((b, dcid, b''))
855 855 # identical bookmarks shouldn't get reported
856 856 for b, scid, dcid in same:
857 857 if b in explicit:
858 858 explicit.remove(b)
859 859
860 860 if explicit:
861 861 explicit = sorted(explicit)
862 862 # we should probably list all of them
863 863 pushop.ui.warn(
864 864 _(
865 865 b'bookmark %s does not exist on the local '
866 866 b'or remote repository!\n'
867 867 )
868 868 % explicit[0]
869 869 )
870 870 pushop.bkresult = 2
871 871
872 872 pushop.outbookmarks.sort()
873 873
874 874
875 875 def _pushcheckoutgoing(pushop):
876 876 outgoing = pushop.outgoing
877 877 unfi = pushop.repo.unfiltered()
878 878 if not outgoing.missing:
879 879 # nothing to push
880 880 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
881 881 return False
882 882 # something to push
883 883 if not pushop.force:
884 884 # if repo.obsstore == False --> no obsolete
885 885 # then, save the iteration
886 886 if unfi.obsstore:
887 887 # this message are here for 80 char limit reason
888 888 mso = _(b"push includes obsolete changeset: %s!")
889 889 mspd = _(b"push includes phase-divergent changeset: %s!")
890 890 mscd = _(b"push includes content-divergent changeset: %s!")
891 891 mst = {
892 892 b"orphan": _(b"push includes orphan changeset: %s!"),
893 893 b"phase-divergent": mspd,
894 894 b"content-divergent": mscd,
895 895 }
896 896 # If we are to push if there is at least one
897 897 # obsolete or unstable changeset in missing, at
898 898 # least one of the missinghead will be obsolete or
899 899 # unstable. So checking heads only is ok
900 900 for node in outgoing.missingheads:
901 901 ctx = unfi[node]
902 902 if ctx.obsolete():
903 903 raise error.Abort(mso % ctx)
904 904 elif ctx.isunstable():
905 905 # TODO print more than one instability in the abort
906 906 # message
907 907 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
908 908
909 909 discovery.checkheads(pushop)
910 910 return True
911 911
912 912
913 913 # List of names of steps to perform for an outgoing bundle2, order matters.
914 914 b2partsgenorder = []
915 915
916 916 # Mapping between step name and function
917 917 #
918 918 # This exists to help extensions wrap steps if necessary
919 919 b2partsgenmapping = {}
920 920
921 921
922 922 def b2partsgenerator(stepname, idx=None):
923 923 """decorator for function generating bundle2 part
924 924
925 925 The function is added to the step -> function mapping and appended to the
926 926 list of steps. Beware that decorated functions will be added in order
927 927 (this may matter).
928 928
929 929 You can only use this decorator for new steps, if you want to wrap a step
930 930 from an extension, attack the b2partsgenmapping dictionary directly."""
931 931
932 932 def dec(func):
933 933 assert stepname not in b2partsgenmapping
934 934 b2partsgenmapping[stepname] = func
935 935 if idx is None:
936 936 b2partsgenorder.append(stepname)
937 937 else:
938 938 b2partsgenorder.insert(idx, stepname)
939 939 return func
940 940
941 941 return dec
942 942
943 943
944 944 def _pushb2ctxcheckheads(pushop, bundler):
945 945 """Generate race condition checking parts
946 946
947 947 Exists as an independent function to aid extensions
948 948 """
949 949 # * 'force' do not check for push race,
950 950 # * if we don't push anything, there are nothing to check.
951 951 if not pushop.force and pushop.outgoing.missingheads:
952 952 allowunrelated = b'related' in bundler.capabilities.get(
953 953 b'checkheads', ()
954 954 )
955 955 emptyremote = pushop.pushbranchmap is None
956 956 if not allowunrelated or emptyremote:
957 957 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
958 958 else:
959 959 affected = set()
960 960 for branch, heads in pycompat.iteritems(pushop.pushbranchmap):
961 961 remoteheads, newheads, unsyncedheads, discardedheads = heads
962 962 if remoteheads is not None:
963 963 remote = set(remoteheads)
964 964 affected |= set(discardedheads) & remote
965 965 affected |= remote - set(newheads)
966 966 if affected:
967 967 data = iter(sorted(affected))
968 968 bundler.newpart(b'check:updated-heads', data=data)
969 969
970 970
971 971 def _pushing(pushop):
972 972 """return True if we are pushing anything"""
973 973 return bool(
974 974 pushop.outgoing.missing
975 975 or pushop.outdatedphases
976 976 or pushop.outobsmarkers
977 977 or pushop.outbookmarks
978 978 )
979 979
980 980
981 981 @b2partsgenerator(b'check-bookmarks')
982 982 def _pushb2checkbookmarks(pushop, bundler):
983 983 """insert bookmark move checking"""
984 984 if not _pushing(pushop) or pushop.force:
985 985 return
986 986 b2caps = bundle2.bundle2caps(pushop.remote)
987 987 hasbookmarkcheck = b'bookmarks' in b2caps
988 988 if not (pushop.outbookmarks and hasbookmarkcheck):
989 989 return
990 990 data = []
991 991 for book, old, new in pushop.outbookmarks:
992 992 data.append((book, old))
993 993 checkdata = bookmod.binaryencode(data)
994 994 bundler.newpart(b'check:bookmarks', data=checkdata)
995 995
996 996
997 997 @b2partsgenerator(b'check-phases')
998 998 def _pushb2checkphases(pushop, bundler):
999 999 """insert phase move checking"""
1000 1000 if not _pushing(pushop) or pushop.force:
1001 1001 return
1002 1002 b2caps = bundle2.bundle2caps(pushop.remote)
1003 1003 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1004 1004 if pushop.remotephases is not None and hasphaseheads:
1005 1005 # check that the remote phase has not changed
1006 1006 checks = [[] for p in phases.allphases]
1007 1007 checks[phases.public].extend(pushop.remotephases.publicheads)
1008 1008 checks[phases.draft].extend(pushop.remotephases.draftroots)
1009 1009 if any(checks):
1010 1010 for nodes in checks:
1011 1011 nodes.sort()
1012 1012 checkdata = phases.binaryencode(checks)
1013 1013 bundler.newpart(b'check:phases', data=checkdata)
1014 1014
1015 1015
1016 1016 @b2partsgenerator(b'changeset')
1017 1017 def _pushb2ctx(pushop, bundler):
1018 1018 """handle changegroup push through bundle2
1019 1019
1020 1020 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
1021 1021 """
1022 1022 if b'changesets' in pushop.stepsdone:
1023 1023 return
1024 1024 pushop.stepsdone.add(b'changesets')
1025 1025 # Send known heads to the server for race detection.
1026 1026 if not _pushcheckoutgoing(pushop):
1027 1027 return
1028 1028 pushop.repo.prepushoutgoinghooks(pushop)
1029 1029
1030 1030 _pushb2ctxcheckheads(pushop, bundler)
1031 1031
1032 1032 b2caps = bundle2.bundle2caps(pushop.remote)
1033 1033 version = b'01'
1034 1034 cgversions = b2caps.get(b'changegroup')
1035 1035 if cgversions: # 3.1 and 3.2 ship with an empty value
1036 1036 cgversions = [
1037 1037 v
1038 1038 for v in cgversions
1039 1039 if v in changegroup.supportedoutgoingversions(pushop.repo)
1040 1040 ]
1041 1041 if not cgversions:
1042 1042 raise error.Abort(_(b'no common changegroup version'))
1043 1043 version = max(cgversions)
1044 1044 cgstream = changegroup.makestream(
1045 1045 pushop.repo, pushop.outgoing, version, b'push'
1046 1046 )
1047 1047 cgpart = bundler.newpart(b'changegroup', data=cgstream)
1048 1048 if cgversions:
1049 1049 cgpart.addparam(b'version', version)
1050 1050 if b'treemanifest' in pushop.repo.requirements:
1051 1051 cgpart.addparam(b'treemanifest', b'1')
1052 1052 if b'exp-sidedata-flag' in pushop.repo.requirements:
1053 1053 cgpart.addparam(b'exp-sidedata', b'1')
1054 1054
1055 1055 def handlereply(op):
1056 1056 """extract addchangegroup returns from server reply"""
1057 1057 cgreplies = op.records.getreplies(cgpart.id)
1058 1058 assert len(cgreplies[b'changegroup']) == 1
1059 1059 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
1060 1060
1061 1061 return handlereply
1062 1062
1063 1063
1064 1064 @b2partsgenerator(b'phase')
1065 1065 def _pushb2phases(pushop, bundler):
1066 1066 """handle phase push through bundle2"""
1067 1067 if b'phases' in pushop.stepsdone:
1068 1068 return
1069 1069 b2caps = bundle2.bundle2caps(pushop.remote)
1070 1070 ui = pushop.repo.ui
1071 1071
1072 1072 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1073 1073 haspushkey = b'pushkey' in b2caps
1074 1074 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1075 1075
1076 1076 if hasphaseheads and not legacyphase:
1077 1077 return _pushb2phaseheads(pushop, bundler)
1078 1078 elif haspushkey:
1079 1079 return _pushb2phasespushkey(pushop, bundler)
1080 1080
1081 1081
1082 1082 def _pushb2phaseheads(pushop, bundler):
1083 1083 """push phase information through a bundle2 - binary part"""
1084 1084 pushop.stepsdone.add(b'phases')
1085 1085 if pushop.outdatedphases:
1086 1086 updates = [[] for p in phases.allphases]
1087 1087 updates[0].extend(h.node() for h in pushop.outdatedphases)
1088 1088 phasedata = phases.binaryencode(updates)
1089 1089 bundler.newpart(b'phase-heads', data=phasedata)
1090 1090
1091 1091
1092 1092 def _pushb2phasespushkey(pushop, bundler):
1093 1093 """push phase information through a bundle2 - pushkey part"""
1094 1094 pushop.stepsdone.add(b'phases')
1095 1095 part2node = []
1096 1096
1097 1097 def handlefailure(pushop, exc):
1098 1098 targetid = int(exc.partid)
1099 1099 for partid, node in part2node:
1100 1100 if partid == targetid:
1101 1101 raise error.Abort(_(b'updating %s to public failed') % node)
1102 1102
1103 1103 enc = pushkey.encode
1104 1104 for newremotehead in pushop.outdatedphases:
1105 1105 part = bundler.newpart(b'pushkey')
1106 1106 part.addparam(b'namespace', enc(b'phases'))
1107 1107 part.addparam(b'key', enc(newremotehead.hex()))
1108 1108 part.addparam(b'old', enc(b'%d' % phases.draft))
1109 1109 part.addparam(b'new', enc(b'%d' % phases.public))
1110 1110 part2node.append((part.id, newremotehead))
1111 1111 pushop.pkfailcb[part.id] = handlefailure
1112 1112
1113 1113 def handlereply(op):
1114 1114 for partid, node in part2node:
1115 1115 partrep = op.records.getreplies(partid)
1116 1116 results = partrep[b'pushkey']
1117 1117 assert len(results) <= 1
1118 1118 msg = None
1119 1119 if not results:
1120 1120 msg = _(b'server ignored update of %s to public!\n') % node
1121 1121 elif not int(results[0][b'return']):
1122 1122 msg = _(b'updating %s to public failed!\n') % node
1123 1123 if msg is not None:
1124 1124 pushop.ui.warn(msg)
1125 1125
1126 1126 return handlereply
1127 1127
1128 1128
1129 1129 @b2partsgenerator(b'obsmarkers')
1130 1130 def _pushb2obsmarkers(pushop, bundler):
1131 1131 if b'obsmarkers' in pushop.stepsdone:
1132 1132 return
1133 1133 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1134 1134 if obsolete.commonversion(remoteversions) is None:
1135 1135 return
1136 1136 pushop.stepsdone.add(b'obsmarkers')
1137 1137 if pushop.outobsmarkers:
1138 1138 markers = sorted(pushop.outobsmarkers)
1139 1139 bundle2.buildobsmarkerspart(bundler, markers)
1140 1140
1141 1141
1142 1142 @b2partsgenerator(b'bookmarks')
1143 1143 def _pushb2bookmarks(pushop, bundler):
1144 1144 """handle bookmark push through bundle2"""
1145 1145 if b'bookmarks' in pushop.stepsdone:
1146 1146 return
1147 1147 b2caps = bundle2.bundle2caps(pushop.remote)
1148 1148
1149 1149 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1150 1150 legacybooks = b'bookmarks' in legacy
1151 1151
1152 1152 if not legacybooks and b'bookmarks' in b2caps:
1153 1153 return _pushb2bookmarkspart(pushop, bundler)
1154 1154 elif b'pushkey' in b2caps:
1155 1155 return _pushb2bookmarkspushkey(pushop, bundler)
1156 1156
1157 1157
1158 1158 def _bmaction(old, new):
1159 1159 """small utility for bookmark pushing"""
1160 1160 if not old:
1161 1161 return b'export'
1162 1162 elif not new:
1163 1163 return b'delete'
1164 1164 return b'update'
1165 1165
1166 1166
1167 1167 def _abortonsecretctx(pushop, node, b):
1168 1168 """abort if a given bookmark points to a secret changeset"""
1169 1169 if node and pushop.repo[node].phase() == phases.secret:
1170 1170 raise error.Abort(
1171 1171 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1172 1172 )
1173 1173
1174 1174
1175 1175 def _pushb2bookmarkspart(pushop, bundler):
1176 1176 pushop.stepsdone.add(b'bookmarks')
1177 1177 if not pushop.outbookmarks:
1178 1178 return
1179 1179
1180 1180 allactions = []
1181 1181 data = []
1182 1182 for book, old, new in pushop.outbookmarks:
1183 1183 _abortonsecretctx(pushop, new, book)
1184 1184 data.append((book, new))
1185 1185 allactions.append((book, _bmaction(old, new)))
1186 1186 checkdata = bookmod.binaryencode(data)
1187 1187 bundler.newpart(b'bookmarks', data=checkdata)
1188 1188
1189 1189 def handlereply(op):
1190 1190 ui = pushop.ui
1191 1191 # if success
1192 1192 for book, action in allactions:
1193 1193 ui.status(bookmsgmap[action][0] % book)
1194 1194
1195 1195 return handlereply
1196 1196
1197 1197
1198 1198 def _pushb2bookmarkspushkey(pushop, bundler):
1199 1199 pushop.stepsdone.add(b'bookmarks')
1200 1200 part2book = []
1201 1201 enc = pushkey.encode
1202 1202
1203 1203 def handlefailure(pushop, exc):
1204 1204 targetid = int(exc.partid)
1205 1205 for partid, book, action in part2book:
1206 1206 if partid == targetid:
1207 1207 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1208 1208 # we should not be called for part we did not generated
1209 1209 assert False
1210 1210
1211 1211 for book, old, new in pushop.outbookmarks:
1212 1212 _abortonsecretctx(pushop, new, book)
1213 1213 part = bundler.newpart(b'pushkey')
1214 1214 part.addparam(b'namespace', enc(b'bookmarks'))
1215 1215 part.addparam(b'key', enc(book))
1216 1216 part.addparam(b'old', enc(hex(old)))
1217 1217 part.addparam(b'new', enc(hex(new)))
1218 1218 action = b'update'
1219 1219 if not old:
1220 1220 action = b'export'
1221 1221 elif not new:
1222 1222 action = b'delete'
1223 1223 part2book.append((part.id, book, action))
1224 1224 pushop.pkfailcb[part.id] = handlefailure
1225 1225
1226 1226 def handlereply(op):
1227 1227 ui = pushop.ui
1228 1228 for partid, book, action in part2book:
1229 1229 partrep = op.records.getreplies(partid)
1230 1230 results = partrep[b'pushkey']
1231 1231 assert len(results) <= 1
1232 1232 if not results:
1233 1233 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1234 1234 else:
1235 1235 ret = int(results[0][b'return'])
1236 1236 if ret:
1237 1237 ui.status(bookmsgmap[action][0] % book)
1238 1238 else:
1239 1239 ui.warn(bookmsgmap[action][1] % book)
1240 1240 if pushop.bkresult is not None:
1241 1241 pushop.bkresult = 1
1242 1242
1243 1243 return handlereply
1244 1244
1245 1245
1246 1246 @b2partsgenerator(b'pushvars', idx=0)
1247 1247 def _getbundlesendvars(pushop, bundler):
1248 1248 '''send shellvars via bundle2'''
1249 1249 pushvars = pushop.pushvars
1250 1250 if pushvars:
1251 1251 shellvars = {}
1252 1252 for raw in pushvars:
1253 1253 if b'=' not in raw:
1254 1254 msg = (
1255 1255 b"unable to parse variable '%s', should follow "
1256 1256 b"'KEY=VALUE' or 'KEY=' format"
1257 1257 )
1258 1258 raise error.Abort(msg % raw)
1259 1259 k, v = raw.split(b'=', 1)
1260 1260 shellvars[k] = v
1261 1261
1262 1262 part = bundler.newpart(b'pushvars')
1263 1263
1264 1264 for key, value in pycompat.iteritems(shellvars):
1265 1265 part.addparam(key, value, mandatory=False)
1266 1266
1267 1267
1268 1268 def _pushbundle2(pushop):
1269 1269 """push data to the remote using bundle2
1270 1270
1271 1271 The only currently supported type of data is changegroup but this will
1272 1272 evolve in the future."""
1273 1273 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1274 1274 pushback = pushop.trmanager and pushop.ui.configbool(
1275 1275 b'experimental', b'bundle2.pushback'
1276 1276 )
1277 1277
1278 1278 # create reply capability
1279 1279 capsblob = bundle2.encodecaps(
1280 1280 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1281 1281 )
1282 1282 bundler.newpart(b'replycaps', data=capsblob)
1283 1283 replyhandlers = []
1284 1284 for partgenname in b2partsgenorder:
1285 1285 partgen = b2partsgenmapping[partgenname]
1286 1286 ret = partgen(pushop, bundler)
1287 1287 if callable(ret):
1288 1288 replyhandlers.append(ret)
1289 1289 # do not push if nothing to push
1290 1290 if bundler.nbparts <= 1:
1291 1291 return
1292 1292 stream = util.chunkbuffer(bundler.getchunks())
1293 1293 try:
1294 1294 try:
1295 1295 with pushop.remote.commandexecutor() as e:
1296 1296 reply = e.callcommand(
1297 1297 b'unbundle',
1298 1298 {
1299 1299 b'bundle': stream,
1300 1300 b'heads': [b'force'],
1301 1301 b'url': pushop.remote.url(),
1302 1302 },
1303 1303 ).result()
1304 1304 except error.BundleValueError as exc:
1305 1305 raise error.Abort(_(b'missing support for %s') % exc)
1306 1306 try:
1307 1307 trgetter = None
1308 1308 if pushback:
1309 1309 trgetter = pushop.trmanager.transaction
1310 1310 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1311 1311 except error.BundleValueError as exc:
1312 1312 raise error.Abort(_(b'missing support for %s') % exc)
1313 1313 except bundle2.AbortFromPart as exc:
1314 1314 pushop.ui.status(_(b'remote: %s\n') % exc)
1315 1315 if exc.hint is not None:
1316 1316 pushop.ui.status(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1317 1317 raise error.Abort(_(b'push failed on remote'))
1318 1318 except error.PushkeyFailed as exc:
1319 1319 partid = int(exc.partid)
1320 1320 if partid not in pushop.pkfailcb:
1321 1321 raise
1322 1322 pushop.pkfailcb[partid](pushop, exc)
1323 1323 for rephand in replyhandlers:
1324 1324 rephand(op)
1325 1325
1326 1326
1327 1327 def _pushchangeset(pushop):
1328 1328 """Make the actual push of changeset bundle to remote repo"""
1329 1329 if b'changesets' in pushop.stepsdone:
1330 1330 return
1331 1331 pushop.stepsdone.add(b'changesets')
1332 1332 if not _pushcheckoutgoing(pushop):
1333 1333 return
1334 1334
1335 1335 # Should have verified this in push().
1336 1336 assert pushop.remote.capable(b'unbundle')
1337 1337
1338 1338 pushop.repo.prepushoutgoinghooks(pushop)
1339 1339 outgoing = pushop.outgoing
1340 1340 # TODO: get bundlecaps from remote
1341 1341 bundlecaps = None
1342 1342 # create a changegroup from local
1343 1343 if pushop.revs is None and not (
1344 1344 outgoing.excluded or pushop.repo.changelog.filteredrevs
1345 1345 ):
1346 1346 # push everything,
1347 1347 # use the fast path, no race possible on push
1348 1348 cg = changegroup.makechangegroup(
1349 1349 pushop.repo,
1350 1350 outgoing,
1351 1351 b'01',
1352 1352 b'push',
1353 1353 fastpath=True,
1354 1354 bundlecaps=bundlecaps,
1355 1355 )
1356 1356 else:
1357 1357 cg = changegroup.makechangegroup(
1358 1358 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1359 1359 )
1360 1360
1361 1361 # apply changegroup to remote
1362 1362 # local repo finds heads on server, finds out what
1363 1363 # revs it must push. once revs transferred, if server
1364 1364 # finds it has different heads (someone else won
1365 1365 # commit/push race), server aborts.
1366 1366 if pushop.force:
1367 1367 remoteheads = [b'force']
1368 1368 else:
1369 1369 remoteheads = pushop.remoteheads
1370 1370 # ssh: return remote's addchangegroup()
1371 1371 # http: return remote's addchangegroup() or 0 for error
1372 1372 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1373 1373
1374 1374
1375 1375 def _pushsyncphase(pushop):
1376 1376 """synchronise phase information locally and remotely"""
1377 1377 cheads = pushop.commonheads
1378 1378 # even when we don't push, exchanging phase data is useful
1379 1379 remotephases = listkeys(pushop.remote, b'phases')
1380 1380 if (
1381 1381 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1382 1382 and remotephases # server supports phases
1383 1383 and pushop.cgresult is None # nothing was pushed
1384 1384 and remotephases.get(b'publishing', False)
1385 1385 ):
1386 1386 # When:
1387 1387 # - this is a subrepo push
1388 1388 # - and remote support phase
1389 1389 # - and no changeset was pushed
1390 1390 # - and remote is publishing
1391 1391 # We may be in issue 3871 case!
1392 1392 # We drop the possible phase synchronisation done by
1393 1393 # courtesy to publish changesets possibly locally draft
1394 1394 # on the remote.
1395 1395 remotephases = {b'publishing': b'True'}
1396 1396 if not remotephases: # old server or public only reply from non-publishing
1397 1397 _localphasemove(pushop, cheads)
1398 1398 # don't push any phase data as there is nothing to push
1399 1399 else:
1400 1400 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1401 1401 pheads, droots = ana
1402 1402 ### Apply remote phase on local
1403 1403 if remotephases.get(b'publishing', False):
1404 1404 _localphasemove(pushop, cheads)
1405 1405 else: # publish = False
1406 1406 _localphasemove(pushop, pheads)
1407 1407 _localphasemove(pushop, cheads, phases.draft)
1408 1408 ### Apply local phase on remote
1409 1409
1410 1410 if pushop.cgresult:
1411 1411 if b'phases' in pushop.stepsdone:
1412 1412 # phases already pushed though bundle2
1413 1413 return
1414 1414 outdated = pushop.outdatedphases
1415 1415 else:
1416 1416 outdated = pushop.fallbackoutdatedphases
1417 1417
1418 1418 pushop.stepsdone.add(b'phases')
1419 1419
1420 1420 # filter heads already turned public by the push
1421 1421 outdated = [c for c in outdated if c.node() not in pheads]
1422 1422 # fallback to independent pushkey command
1423 1423 for newremotehead in outdated:
1424 1424 with pushop.remote.commandexecutor() as e:
1425 1425 r = e.callcommand(
1426 1426 b'pushkey',
1427 1427 {
1428 1428 b'namespace': b'phases',
1429 1429 b'key': newremotehead.hex(),
1430 1430 b'old': b'%d' % phases.draft,
1431 1431 b'new': b'%d' % phases.public,
1432 1432 },
1433 1433 ).result()
1434 1434
1435 1435 if not r:
1436 1436 pushop.ui.warn(
1437 1437 _(b'updating %s to public failed!\n') % newremotehead
1438 1438 )
1439 1439
1440 1440
1441 1441 def _localphasemove(pushop, nodes, phase=phases.public):
1442 1442 """move <nodes> to <phase> in the local source repo"""
1443 1443 if pushop.trmanager:
1444 1444 phases.advanceboundary(
1445 1445 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1446 1446 )
1447 1447 else:
1448 1448 # repo is not locked, do not change any phases!
1449 1449 # Informs the user that phases should have been moved when
1450 1450 # applicable.
1451 1451 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1452 1452 phasestr = phases.phasenames[phase]
1453 1453 if actualmoves:
1454 1454 pushop.ui.status(
1455 1455 _(
1456 1456 b'cannot lock source repo, skipping '
1457 1457 b'local %s phase update\n'
1458 1458 )
1459 1459 % phasestr
1460 1460 )
1461 1461
1462 1462
1463 1463 def _pushobsolete(pushop):
1464 1464 """utility function to push obsolete markers to a remote"""
1465 1465 if b'obsmarkers' in pushop.stepsdone:
1466 1466 return
1467 1467 repo = pushop.repo
1468 1468 remote = pushop.remote
1469 1469 pushop.stepsdone.add(b'obsmarkers')
1470 1470 if pushop.outobsmarkers:
1471 1471 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1472 1472 rslts = []
1473 1473 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1474 1474 for key in sorted(remotedata, reverse=True):
1475 1475 # reverse sort to ensure we end with dump0
1476 1476 data = remotedata[key]
1477 1477 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1478 1478 if [r for r in rslts if not r]:
1479 1479 msg = _(b'failed to push some obsolete markers!\n')
1480 1480 repo.ui.warn(msg)
1481 1481
1482 1482
1483 1483 def _pushbookmark(pushop):
1484 1484 """Update bookmark position on remote"""
1485 1485 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1486 1486 return
1487 1487 pushop.stepsdone.add(b'bookmarks')
1488 1488 ui = pushop.ui
1489 1489 remote = pushop.remote
1490 1490
1491 1491 for b, old, new in pushop.outbookmarks:
1492 1492 action = b'update'
1493 1493 if not old:
1494 1494 action = b'export'
1495 1495 elif not new:
1496 1496 action = b'delete'
1497 1497
1498 1498 with remote.commandexecutor() as e:
1499 1499 r = e.callcommand(
1500 1500 b'pushkey',
1501 1501 {
1502 1502 b'namespace': b'bookmarks',
1503 1503 b'key': b,
1504 1504 b'old': hex(old),
1505 1505 b'new': hex(new),
1506 1506 },
1507 1507 ).result()
1508 1508
1509 1509 if r:
1510 1510 ui.status(bookmsgmap[action][0] % b)
1511 1511 else:
1512 1512 ui.warn(bookmsgmap[action][1] % b)
1513 1513 # discovery can have set the value form invalid entry
1514 1514 if pushop.bkresult is not None:
1515 1515 pushop.bkresult = 1
1516 1516
1517 1517
1518 1518 class pulloperation(object):
1519 1519 """A object that represent a single pull operation
1520 1520
1521 1521 It purpose is to carry pull related state and very common operation.
1522 1522
1523 1523 A new should be created at the beginning of each pull and discarded
1524 1524 afterward.
1525 1525 """
1526 1526
1527 1527 def __init__(
1528 1528 self,
1529 1529 repo,
1530 1530 remote,
1531 1531 heads=None,
1532 1532 force=False,
1533 1533 bookmarks=(),
1534 1534 remotebookmarks=None,
1535 1535 streamclonerequested=None,
1536 1536 includepats=None,
1537 1537 excludepats=None,
1538 1538 depth=None,
1539 1539 ):
1540 1540 # repo we pull into
1541 1541 self.repo = repo
1542 1542 # repo we pull from
1543 1543 self.remote = remote
1544 1544 # revision we try to pull (None is "all")
1545 1545 self.heads = heads
1546 1546 # bookmark pulled explicitly
1547 1547 self.explicitbookmarks = [
1548 1548 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1549 1549 ]
1550 1550 # do we force pull?
1551 1551 self.force = force
1552 1552 # whether a streaming clone was requested
1553 1553 self.streamclonerequested = streamclonerequested
1554 1554 # transaction manager
1555 1555 self.trmanager = None
1556 1556 # set of common changeset between local and remote before pull
1557 1557 self.common = None
1558 1558 # set of pulled head
1559 1559 self.rheads = None
1560 1560 # list of missing changeset to fetch remotely
1561 1561 self.fetch = None
1562 1562 # remote bookmarks data
1563 1563 self.remotebookmarks = remotebookmarks
1564 1564 # result of changegroup pulling (used as return code by pull)
1565 1565 self.cgresult = None
1566 1566 # list of step already done
1567 1567 self.stepsdone = set()
1568 1568 # Whether we attempted a clone from pre-generated bundles.
1569 1569 self.clonebundleattempted = False
1570 1570 # Set of file patterns to include.
1571 1571 self.includepats = includepats
1572 1572 # Set of file patterns to exclude.
1573 1573 self.excludepats = excludepats
1574 1574 # Number of ancestor changesets to pull from each pulled head.
1575 1575 self.depth = depth
1576 1576
1577 1577 @util.propertycache
1578 1578 def pulledsubset(self):
1579 1579 """heads of the set of changeset target by the pull"""
1580 1580 # compute target subset
1581 1581 if self.heads is None:
1582 1582 # We pulled every thing possible
1583 1583 # sync on everything common
1584 1584 c = set(self.common)
1585 1585 ret = list(self.common)
1586 1586 for n in self.rheads:
1587 1587 if n not in c:
1588 1588 ret.append(n)
1589 1589 return ret
1590 1590 else:
1591 1591 # We pulled a specific subset
1592 1592 # sync on this subset
1593 1593 return self.heads
1594 1594
1595 1595 @util.propertycache
1596 1596 def canusebundle2(self):
1597 1597 return not _forcebundle1(self)
1598 1598
1599 1599 @util.propertycache
1600 1600 def remotebundle2caps(self):
1601 1601 return bundle2.bundle2caps(self.remote)
1602 1602
1603 1603 def gettransaction(self):
1604 1604 # deprecated; talk to trmanager directly
1605 1605 return self.trmanager.transaction()
1606 1606
1607 1607
1608 1608 class transactionmanager(util.transactional):
1609 1609 """An object to manage the life cycle of a transaction
1610 1610
1611 1611 It creates the transaction on demand and calls the appropriate hooks when
1612 1612 closing the transaction."""
1613 1613
1614 1614 def __init__(self, repo, source, url):
1615 1615 self.repo = repo
1616 1616 self.source = source
1617 1617 self.url = url
1618 1618 self._tr = None
1619 1619
1620 1620 def transaction(self):
1621 1621 """Return an open transaction object, constructing if necessary"""
1622 1622 if not self._tr:
1623 1623 trname = b'%s\n%s' % (self.source, util.hidepassword(self.url))
1624 1624 self._tr = self.repo.transaction(trname)
1625 1625 self._tr.hookargs[b'source'] = self.source
1626 1626 self._tr.hookargs[b'url'] = self.url
1627 1627 return self._tr
1628 1628
1629 1629 def close(self):
1630 1630 """close transaction if created"""
1631 1631 if self._tr is not None:
1632 1632 self._tr.close()
1633 1633
1634 1634 def release(self):
1635 1635 """release transaction if created"""
1636 1636 if self._tr is not None:
1637 1637 self._tr.release()
1638 1638
1639 1639
1640 1640 def listkeys(remote, namespace):
1641 1641 with remote.commandexecutor() as e:
1642 1642 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1643 1643
1644 1644
1645 1645 def _fullpullbundle2(repo, pullop):
1646 1646 # The server may send a partial reply, i.e. when inlining
1647 1647 # pre-computed bundles. In that case, update the common
1648 1648 # set based on the results and pull another bundle.
1649 1649 #
1650 1650 # There are two indicators that the process is finished:
1651 1651 # - no changeset has been added, or
1652 1652 # - all remote heads are known locally.
1653 1653 # The head check must use the unfiltered view as obsoletion
1654 1654 # markers can hide heads.
1655 1655 unfi = repo.unfiltered()
1656 1656 unficl = unfi.changelog
1657 1657
1658 1658 def headsofdiff(h1, h2):
1659 1659 """Returns heads(h1 % h2)"""
1660 1660 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1661 1661 return set(ctx.node() for ctx in res)
1662 1662
1663 1663 def headsofunion(h1, h2):
1664 1664 """Returns heads((h1 + h2) - null)"""
1665 1665 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1666 1666 return set(ctx.node() for ctx in res)
1667 1667
1668 1668 while True:
1669 1669 old_heads = unficl.heads()
1670 1670 clstart = len(unficl)
1671 1671 _pullbundle2(pullop)
1672 1672 if repository.NARROW_REQUIREMENT in repo.requirements:
1673 1673 # XXX narrow clones filter the heads on the server side during
1674 1674 # XXX getbundle and result in partial replies as well.
1675 1675 # XXX Disable pull bundles in this case as band aid to avoid
1676 1676 # XXX extra round trips.
1677 1677 break
1678 1678 if clstart == len(unficl):
1679 1679 break
1680 1680 if all(unficl.hasnode(n) for n in pullop.rheads):
1681 1681 break
1682 1682 new_heads = headsofdiff(unficl.heads(), old_heads)
1683 1683 pullop.common = headsofunion(new_heads, pullop.common)
1684 1684 pullop.rheads = set(pullop.rheads) - pullop.common
1685 1685
1686 1686
1687 1687 def pull(
1688 1688 repo,
1689 1689 remote,
1690 1690 heads=None,
1691 1691 force=False,
1692 1692 bookmarks=(),
1693 1693 opargs=None,
1694 1694 streamclonerequested=None,
1695 1695 includepats=None,
1696 1696 excludepats=None,
1697 1697 depth=None,
1698 1698 ):
1699 1699 """Fetch repository data from a remote.
1700 1700
1701 1701 This is the main function used to retrieve data from a remote repository.
1702 1702
1703 1703 ``repo`` is the local repository to clone into.
1704 1704 ``remote`` is a peer instance.
1705 1705 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1706 1706 default) means to pull everything from the remote.
1707 1707 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1708 1708 default, all remote bookmarks are pulled.
1709 1709 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1710 1710 initialization.
1711 1711 ``streamclonerequested`` is a boolean indicating whether a "streaming
1712 1712 clone" is requested. A "streaming clone" is essentially a raw file copy
1713 1713 of revlogs from the server. This only works when the local repository is
1714 1714 empty. The default value of ``None`` means to respect the server
1715 1715 configuration for preferring stream clones.
1716 1716 ``includepats`` and ``excludepats`` define explicit file patterns to
1717 1717 include and exclude in storage, respectively. If not defined, narrow
1718 1718 patterns from the repo instance are used, if available.
1719 1719 ``depth`` is an integer indicating the DAG depth of history we're
1720 1720 interested in. If defined, for each revision specified in ``heads``, we
1721 1721 will fetch up to this many of its ancestors and data associated with them.
1722 1722
1723 1723 Returns the ``pulloperation`` created for this pull.
1724 1724 """
1725 1725 if opargs is None:
1726 1726 opargs = {}
1727 1727
1728 1728 # We allow the narrow patterns to be passed in explicitly to provide more
1729 1729 # flexibility for API consumers.
1730 1730 if includepats or excludepats:
1731 1731 includepats = includepats or set()
1732 1732 excludepats = excludepats or set()
1733 1733 else:
1734 1734 includepats, excludepats = repo.narrowpats
1735 1735
1736 1736 narrowspec.validatepatterns(includepats)
1737 1737 narrowspec.validatepatterns(excludepats)
1738 1738
1739 1739 pullop = pulloperation(
1740 1740 repo,
1741 1741 remote,
1742 1742 heads,
1743 1743 force,
1744 1744 bookmarks=bookmarks,
1745 1745 streamclonerequested=streamclonerequested,
1746 1746 includepats=includepats,
1747 1747 excludepats=excludepats,
1748 1748 depth=depth,
1749 1749 **pycompat.strkwargs(opargs)
1750 1750 )
1751 1751
1752 1752 peerlocal = pullop.remote.local()
1753 1753 if peerlocal:
1754 1754 missing = set(peerlocal.requirements) - pullop.repo.supported
1755 1755 if missing:
1756 1756 msg = _(
1757 1757 b"required features are not"
1758 1758 b" supported in the destination:"
1759 1759 b" %s"
1760 1760 ) % (b', '.join(sorted(missing)))
1761 1761 raise error.Abort(msg)
1762 1762
1763 1763 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1764 1764 wlock = util.nullcontextmanager()
1765 1765 if not bookmod.bookmarksinstore(repo):
1766 1766 wlock = repo.wlock()
1767 1767 with wlock, repo.lock(), pullop.trmanager:
1768 1768 # Use the modern wire protocol, if available.
1769 1769 if remote.capable(b'command-changesetdata'):
1770 1770 exchangev2.pull(pullop)
1771 1771 else:
1772 1772 # This should ideally be in _pullbundle2(). However, it needs to run
1773 1773 # before discovery to avoid extra work.
1774 1774 _maybeapplyclonebundle(pullop)
1775 1775 streamclone.maybeperformlegacystreamclone(pullop)
1776 1776 _pulldiscovery(pullop)
1777 1777 if pullop.canusebundle2:
1778 1778 _fullpullbundle2(repo, pullop)
1779 1779 _pullchangeset(pullop)
1780 1780 _pullphase(pullop)
1781 1781 _pullbookmarks(pullop)
1782 1782 _pullobsolete(pullop)
1783 1783
1784 1784 # storing remotenames
1785 1785 if repo.ui.configbool(b'experimental', b'remotenames'):
1786 1786 logexchange.pullremotenames(repo, remote)
1787 1787
1788 1788 return pullop
1789 1789
1790 1790
1791 1791 # list of steps to perform discovery before pull
1792 1792 pulldiscoveryorder = []
1793 1793
1794 1794 # Mapping between step name and function
1795 1795 #
1796 1796 # This exists to help extensions wrap steps if necessary
1797 1797 pulldiscoverymapping = {}
1798 1798
1799 1799
1800 1800 def pulldiscovery(stepname):
1801 1801 """decorator for function performing discovery before pull
1802 1802
1803 1803 The function is added to the step -> function mapping and appended to the
1804 1804 list of steps. Beware that decorated function will be added in order (this
1805 1805 may matter).
1806 1806
1807 1807 You can only use this decorator for a new step, if you want to wrap a step
1808 1808 from an extension, change the pulldiscovery dictionary directly."""
1809 1809
1810 1810 def dec(func):
1811 1811 assert stepname not in pulldiscoverymapping
1812 1812 pulldiscoverymapping[stepname] = func
1813 1813 pulldiscoveryorder.append(stepname)
1814 1814 return func
1815 1815
1816 1816 return dec
1817 1817
1818 1818
1819 1819 def _pulldiscovery(pullop):
1820 1820 """Run all discovery steps"""
1821 1821 for stepname in pulldiscoveryorder:
1822 1822 step = pulldiscoverymapping[stepname]
1823 1823 step(pullop)
1824 1824
1825 1825
1826 1826 @pulldiscovery(b'b1:bookmarks')
1827 1827 def _pullbookmarkbundle1(pullop):
1828 1828 """fetch bookmark data in bundle1 case
1829 1829
1830 1830 If not using bundle2, we have to fetch bookmarks before changeset
1831 1831 discovery to reduce the chance and impact of race conditions."""
1832 1832 if pullop.remotebookmarks is not None:
1833 1833 return
1834 1834 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1835 1835 # all known bundle2 servers now support listkeys, but lets be nice with
1836 1836 # new implementation.
1837 1837 return
1838 1838 books = listkeys(pullop.remote, b'bookmarks')
1839 1839 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1840 1840
1841 1841
1842 1842 @pulldiscovery(b'changegroup')
1843 1843 def _pulldiscoverychangegroup(pullop):
1844 1844 """discovery phase for the pull
1845 1845
1846 1846 Current handle changeset discovery only, will change handle all discovery
1847 1847 at some point."""
1848 1848 tmp = discovery.findcommonincoming(
1849 1849 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1850 1850 )
1851 1851 common, fetch, rheads = tmp
1852 1852 nm = pullop.repo.unfiltered().changelog.nodemap
1853 1853 if fetch and rheads:
1854 1854 # If a remote heads is filtered locally, put in back in common.
1855 1855 #
1856 1856 # This is a hackish solution to catch most of "common but locally
1857 1857 # hidden situation". We do not performs discovery on unfiltered
1858 1858 # repository because it end up doing a pathological amount of round
1859 1859 # trip for w huge amount of changeset we do not care about.
1860 1860 #
1861 1861 # If a set of such "common but filtered" changeset exist on the server
1862 1862 # but are not including a remote heads, we'll not be able to detect it,
1863 1863 scommon = set(common)
1864 1864 for n in rheads:
1865 1865 if n in nm:
1866 1866 if n not in scommon:
1867 1867 common.append(n)
1868 1868 if set(rheads).issubset(set(common)):
1869 1869 fetch = []
1870 1870 pullop.common = common
1871 1871 pullop.fetch = fetch
1872 1872 pullop.rheads = rheads
1873 1873
1874 1874
1875 1875 def _pullbundle2(pullop):
1876 1876 """pull data using bundle2
1877 1877
1878 1878 For now, the only supported data are changegroup."""
1879 1879 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1880 1880
1881 1881 # make ui easier to access
1882 1882 ui = pullop.repo.ui
1883 1883
1884 1884 # At the moment we don't do stream clones over bundle2. If that is
1885 1885 # implemented then here's where the check for that will go.
1886 1886 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1887 1887
1888 1888 # declare pull perimeters
1889 1889 kwargs[b'common'] = pullop.common
1890 1890 kwargs[b'heads'] = pullop.heads or pullop.rheads
1891 1891
1892 1892 # check server supports narrow and then adding includepats and excludepats
1893 1893 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1894 1894 if servernarrow and pullop.includepats:
1895 1895 kwargs[b'includepats'] = pullop.includepats
1896 1896 if servernarrow and pullop.excludepats:
1897 1897 kwargs[b'excludepats'] = pullop.excludepats
1898 1898
1899 1899 if streaming:
1900 1900 kwargs[b'cg'] = False
1901 1901 kwargs[b'stream'] = True
1902 1902 pullop.stepsdone.add(b'changegroup')
1903 1903 pullop.stepsdone.add(b'phases')
1904 1904
1905 1905 else:
1906 1906 # pulling changegroup
1907 1907 pullop.stepsdone.add(b'changegroup')
1908 1908
1909 1909 kwargs[b'cg'] = pullop.fetch
1910 1910
1911 1911 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1912 1912 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1913 1913 if not legacyphase and hasbinaryphase:
1914 1914 kwargs[b'phases'] = True
1915 1915 pullop.stepsdone.add(b'phases')
1916 1916
1917 1917 if b'listkeys' in pullop.remotebundle2caps:
1918 1918 if b'phases' not in pullop.stepsdone:
1919 1919 kwargs[b'listkeys'] = [b'phases']
1920 1920
1921 1921 bookmarksrequested = False
1922 1922 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1923 1923 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1924 1924
1925 1925 if pullop.remotebookmarks is not None:
1926 1926 pullop.stepsdone.add(b'request-bookmarks')
1927 1927
1928 1928 if (
1929 1929 b'request-bookmarks' not in pullop.stepsdone
1930 1930 and pullop.remotebookmarks is None
1931 1931 and not legacybookmark
1932 1932 and hasbinarybook
1933 1933 ):
1934 1934 kwargs[b'bookmarks'] = True
1935 1935 bookmarksrequested = True
1936 1936
1937 1937 if b'listkeys' in pullop.remotebundle2caps:
1938 1938 if b'request-bookmarks' not in pullop.stepsdone:
1939 1939 # make sure to always includes bookmark data when migrating
1940 1940 # `hg incoming --bundle` to using this function.
1941 1941 pullop.stepsdone.add(b'request-bookmarks')
1942 1942 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1943 1943
1944 1944 # If this is a full pull / clone and the server supports the clone bundles
1945 1945 # feature, tell the server whether we attempted a clone bundle. The
1946 1946 # presence of this flag indicates the client supports clone bundles. This
1947 1947 # will enable the server to treat clients that support clone bundles
1948 1948 # differently from those that don't.
1949 1949 if (
1950 1950 pullop.remote.capable(b'clonebundles')
1951 1951 and pullop.heads is None
1952 1952 and list(pullop.common) == [nullid]
1953 1953 ):
1954 1954 kwargs[b'cbattempted'] = pullop.clonebundleattempted
1955 1955
1956 1956 if streaming:
1957 1957 pullop.repo.ui.status(_(b'streaming all changes\n'))
1958 1958 elif not pullop.fetch:
1959 1959 pullop.repo.ui.status(_(b"no changes found\n"))
1960 1960 pullop.cgresult = 0
1961 1961 else:
1962 1962 if pullop.heads is None and list(pullop.common) == [nullid]:
1963 1963 pullop.repo.ui.status(_(b"requesting all changes\n"))
1964 1964 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1965 1965 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1966 1966 if obsolete.commonversion(remoteversions) is not None:
1967 1967 kwargs[b'obsmarkers'] = True
1968 1968 pullop.stepsdone.add(b'obsmarkers')
1969 1969 _pullbundle2extraprepare(pullop, kwargs)
1970 1970
1971 1971 with pullop.remote.commandexecutor() as e:
1972 1972 args = dict(kwargs)
1973 1973 args[b'source'] = b'pull'
1974 1974 bundle = e.callcommand(b'getbundle', args).result()
1975 1975
1976 1976 try:
1977 1977 op = bundle2.bundleoperation(
1978 1978 pullop.repo, pullop.gettransaction, source=b'pull'
1979 1979 )
1980 1980 op.modes[b'bookmarks'] = b'records'
1981 1981 bundle2.processbundle(pullop.repo, bundle, op=op)
1982 1982 except bundle2.AbortFromPart as exc:
1983 1983 pullop.repo.ui.status(_(b'remote: abort: %s\n') % exc)
1984 1984 raise error.Abort(_(b'pull failed on remote'), hint=exc.hint)
1985 1985 except error.BundleValueError as exc:
1986 1986 raise error.Abort(_(b'missing support for %s') % exc)
1987 1987
1988 1988 if pullop.fetch:
1989 1989 pullop.cgresult = bundle2.combinechangegroupresults(op)
1990 1990
1991 1991 # processing phases change
1992 1992 for namespace, value in op.records[b'listkeys']:
1993 1993 if namespace == b'phases':
1994 1994 _pullapplyphases(pullop, value)
1995 1995
1996 1996 # processing bookmark update
1997 1997 if bookmarksrequested:
1998 1998 books = {}
1999 1999 for record in op.records[b'bookmarks']:
2000 2000 books[record[b'bookmark']] = record[b"node"]
2001 2001 pullop.remotebookmarks = books
2002 2002 else:
2003 2003 for namespace, value in op.records[b'listkeys']:
2004 2004 if namespace == b'bookmarks':
2005 2005 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
2006 2006
2007 2007 # bookmark data were either already there or pulled in the bundle
2008 2008 if pullop.remotebookmarks is not None:
2009 2009 _pullbookmarks(pullop)
2010 2010
2011 2011
2012 2012 def _pullbundle2extraprepare(pullop, kwargs):
2013 2013 """hook function so that extensions can extend the getbundle call"""
2014 2014
2015 2015
2016 2016 def _pullchangeset(pullop):
2017 2017 """pull changeset from unbundle into the local repo"""
2018 2018 # We delay the open of the transaction as late as possible so we
2019 2019 # don't open transaction for nothing or you break future useful
2020 2020 # rollback call
2021 2021 if b'changegroup' in pullop.stepsdone:
2022 2022 return
2023 2023 pullop.stepsdone.add(b'changegroup')
2024 2024 if not pullop.fetch:
2025 2025 pullop.repo.ui.status(_(b"no changes found\n"))
2026 2026 pullop.cgresult = 0
2027 2027 return
2028 2028 tr = pullop.gettransaction()
2029 2029 if pullop.heads is None and list(pullop.common) == [nullid]:
2030 2030 pullop.repo.ui.status(_(b"requesting all changes\n"))
2031 2031 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
2032 2032 # issue1320, avoid a race if remote changed after discovery
2033 2033 pullop.heads = pullop.rheads
2034 2034
2035 2035 if pullop.remote.capable(b'getbundle'):
2036 2036 # TODO: get bundlecaps from remote
2037 2037 cg = pullop.remote.getbundle(
2038 2038 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
2039 2039 )
2040 2040 elif pullop.heads is None:
2041 2041 with pullop.remote.commandexecutor() as e:
2042 2042 cg = e.callcommand(
2043 2043 b'changegroup', {b'nodes': pullop.fetch, b'source': b'pull',}
2044 2044 ).result()
2045 2045
2046 2046 elif not pullop.remote.capable(b'changegroupsubset'):
2047 2047 raise error.Abort(
2048 2048 _(
2049 2049 b"partial pull cannot be done because "
2050 2050 b"other repository doesn't support "
2051 2051 b"changegroupsubset."
2052 2052 )
2053 2053 )
2054 2054 else:
2055 2055 with pullop.remote.commandexecutor() as e:
2056 2056 cg = e.callcommand(
2057 2057 b'changegroupsubset',
2058 2058 {
2059 2059 b'bases': pullop.fetch,
2060 2060 b'heads': pullop.heads,
2061 2061 b'source': b'pull',
2062 2062 },
2063 2063 ).result()
2064 2064
2065 2065 bundleop = bundle2.applybundle(
2066 2066 pullop.repo, cg, tr, b'pull', pullop.remote.url()
2067 2067 )
2068 2068 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
2069 2069
2070 2070
2071 2071 def _pullphase(pullop):
2072 2072 # Get remote phases data from remote
2073 2073 if b'phases' in pullop.stepsdone:
2074 2074 return
2075 2075 remotephases = listkeys(pullop.remote, b'phases')
2076 2076 _pullapplyphases(pullop, remotephases)
2077 2077
2078 2078
2079 2079 def _pullapplyphases(pullop, remotephases):
2080 2080 """apply phase movement from observed remote state"""
2081 2081 if b'phases' in pullop.stepsdone:
2082 2082 return
2083 2083 pullop.stepsdone.add(b'phases')
2084 2084 publishing = bool(remotephases.get(b'publishing', False))
2085 2085 if remotephases and not publishing:
2086 2086 # remote is new and non-publishing
2087 2087 pheads, _dr = phases.analyzeremotephases(
2088 2088 pullop.repo, pullop.pulledsubset, remotephases
2089 2089 )
2090 2090 dheads = pullop.pulledsubset
2091 2091 else:
2092 2092 # Remote is old or publishing all common changesets
2093 2093 # should be seen as public
2094 2094 pheads = pullop.pulledsubset
2095 2095 dheads = []
2096 2096 unfi = pullop.repo.unfiltered()
2097 2097 phase = unfi._phasecache.phase
2098 2098 rev = unfi.changelog.nodemap.get
2099 2099 public = phases.public
2100 2100 draft = phases.draft
2101 2101
2102 2102 # exclude changesets already public locally and update the others
2103 2103 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2104 2104 if pheads:
2105 2105 tr = pullop.gettransaction()
2106 2106 phases.advanceboundary(pullop.repo, tr, public, pheads)
2107 2107
2108 2108 # exclude changesets already draft locally and update the others
2109 2109 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2110 2110 if dheads:
2111 2111 tr = pullop.gettransaction()
2112 2112 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2113 2113
2114 2114
2115 2115 def _pullbookmarks(pullop):
2116 2116 """process the remote bookmark information to update the local one"""
2117 2117 if b'bookmarks' in pullop.stepsdone:
2118 2118 return
2119 2119 pullop.stepsdone.add(b'bookmarks')
2120 2120 repo = pullop.repo
2121 2121 remotebookmarks = pullop.remotebookmarks
2122 2122 bookmod.updatefromremote(
2123 2123 repo.ui,
2124 2124 repo,
2125 2125 remotebookmarks,
2126 2126 pullop.remote.url(),
2127 2127 pullop.gettransaction,
2128 2128 explicit=pullop.explicitbookmarks,
2129 2129 )
2130 2130
2131 2131
2132 2132 def _pullobsolete(pullop):
2133 2133 """utility function to pull obsolete markers from a remote
2134 2134
2135 2135 The `gettransaction` is function that return the pull transaction, creating
2136 2136 one if necessary. We return the transaction to inform the calling code that
2137 2137 a new transaction have been created (when applicable).
2138 2138
2139 2139 Exists mostly to allow overriding for experimentation purpose"""
2140 2140 if b'obsmarkers' in pullop.stepsdone:
2141 2141 return
2142 2142 pullop.stepsdone.add(b'obsmarkers')
2143 2143 tr = None
2144 2144 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2145 2145 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2146 2146 remoteobs = listkeys(pullop.remote, b'obsolete')
2147 2147 if b'dump0' in remoteobs:
2148 2148 tr = pullop.gettransaction()
2149 2149 markers = []
2150 2150 for key in sorted(remoteobs, reverse=True):
2151 2151 if key.startswith(b'dump'):
2152 2152 data = util.b85decode(remoteobs[key])
2153 2153 version, newmarks = obsolete._readmarkers(data)
2154 2154 markers += newmarks
2155 2155 if markers:
2156 2156 pullop.repo.obsstore.add(tr, markers)
2157 2157 pullop.repo.invalidatevolatilesets()
2158 2158 return tr
2159 2159
2160 2160
2161 2161 def applynarrowacl(repo, kwargs):
2162 2162 """Apply narrow fetch access control.
2163 2163
2164 2164 This massages the named arguments for getbundle wire protocol commands
2165 2165 so requested data is filtered through access control rules.
2166 2166 """
2167 2167 ui = repo.ui
2168 2168 # TODO this assumes existence of HTTP and is a layering violation.
2169 2169 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2170 2170 user_includes = ui.configlist(
2171 2171 _NARROWACL_SECTION,
2172 2172 username + b'.includes',
2173 2173 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2174 2174 )
2175 2175 user_excludes = ui.configlist(
2176 2176 _NARROWACL_SECTION,
2177 2177 username + b'.excludes',
2178 2178 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2179 2179 )
2180 2180 if not user_includes:
2181 2181 raise error.Abort(
2182 2182 _(b"{} configuration for user {} is empty").format(
2183 2183 _NARROWACL_SECTION, username
2184 2184 )
2185 2185 )
2186 2186
2187 2187 user_includes = [
2188 2188 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2189 2189 ]
2190 2190 user_excludes = [
2191 2191 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2192 2192 ]
2193 2193
2194 2194 req_includes = set(kwargs.get(r'includepats', []))
2195 2195 req_excludes = set(kwargs.get(r'excludepats', []))
2196 2196
2197 2197 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2198 2198 req_includes, req_excludes, user_includes, user_excludes
2199 2199 )
2200 2200
2201 2201 if invalid_includes:
2202 2202 raise error.Abort(
2203 2203 _(b"The following includes are not accessible for {}: {}").format(
2204 2204 username, invalid_includes
2205 2205 )
2206 2206 )
2207 2207
2208 2208 new_args = {}
2209 2209 new_args.update(kwargs)
2210 2210 new_args[r'narrow'] = True
2211 2211 new_args[r'narrow_acl'] = True
2212 2212 new_args[r'includepats'] = req_includes
2213 2213 if req_excludes:
2214 2214 new_args[r'excludepats'] = req_excludes
2215 2215
2216 2216 return new_args
2217 2217
2218 2218
2219 2219 def _computeellipsis(repo, common, heads, known, match, depth=None):
2220 2220 """Compute the shape of a narrowed DAG.
2221 2221
2222 2222 Args:
2223 2223 repo: The repository we're transferring.
2224 2224 common: The roots of the DAG range we're transferring.
2225 2225 May be just [nullid], which means all ancestors of heads.
2226 2226 heads: The heads of the DAG range we're transferring.
2227 2227 match: The narrowmatcher that allows us to identify relevant changes.
2228 2228 depth: If not None, only consider nodes to be full nodes if they are at
2229 2229 most depth changesets away from one of heads.
2230 2230
2231 2231 Returns:
2232 2232 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2233 2233
2234 2234 visitnodes: The list of nodes (either full or ellipsis) which
2235 2235 need to be sent to the client.
2236 2236 relevant_nodes: The set of changelog nodes which change a file inside
2237 2237 the narrowspec. The client needs these as non-ellipsis nodes.
2238 2238 ellipsisroots: A dict of {rev: parents} that is used in
2239 2239 narrowchangegroup to produce ellipsis nodes with the
2240 2240 correct parents.
2241 2241 """
2242 2242 cl = repo.changelog
2243 2243 mfl = repo.manifestlog
2244 2244
2245 2245 clrev = cl.rev
2246 2246
2247 2247 commonrevs = {clrev(n) for n in common} | {nullrev}
2248 2248 headsrevs = {clrev(n) for n in heads}
2249 2249
2250 2250 if depth:
2251 2251 revdepth = {h: 0 for h in headsrevs}
2252 2252
2253 2253 ellipsisheads = collections.defaultdict(set)
2254 2254 ellipsisroots = collections.defaultdict(set)
2255 2255
2256 2256 def addroot(head, curchange):
2257 2257 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2258 2258 ellipsisroots[head].add(curchange)
2259 2259 # Recursively split ellipsis heads with 3 roots by finding the
2260 2260 # roots' youngest common descendant which is an elided merge commit.
2261 2261 # That descendant takes 2 of the 3 roots as its own, and becomes a
2262 2262 # root of the head.
2263 2263 while len(ellipsisroots[head]) > 2:
2264 2264 child, roots = splithead(head)
2265 2265 splitroots(head, child, roots)
2266 2266 head = child # Recurse in case we just added a 3rd root
2267 2267
2268 2268 def splitroots(head, child, roots):
2269 2269 ellipsisroots[head].difference_update(roots)
2270 2270 ellipsisroots[head].add(child)
2271 2271 ellipsisroots[child].update(roots)
2272 2272 ellipsisroots[child].discard(child)
2273 2273
2274 2274 def splithead(head):
2275 2275 r1, r2, r3 = sorted(ellipsisroots[head])
2276 2276 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2277 2277 mid = repo.revs(
2278 2278 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2279 2279 )
2280 2280 for j in mid:
2281 2281 if j == nr2:
2282 2282 return nr2, (nr1, nr2)
2283 2283 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2284 2284 return j, (nr1, nr2)
2285 2285 raise error.Abort(
2286 2286 _(
2287 2287 b'Failed to split up ellipsis node! head: %d, '
2288 2288 b'roots: %d %d %d'
2289 2289 )
2290 2290 % (head, r1, r2, r3)
2291 2291 )
2292 2292
2293 2293 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2294 2294 visit = reversed(missing)
2295 2295 relevant_nodes = set()
2296 2296 visitnodes = [cl.node(m) for m in missing]
2297 2297 required = set(headsrevs) | known
2298 2298 for rev in visit:
2299 2299 clrev = cl.changelogrevision(rev)
2300 2300 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2301 2301 if depth is not None:
2302 2302 curdepth = revdepth[rev]
2303 2303 for p in ps:
2304 2304 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2305 2305 needed = False
2306 2306 shallow_enough = depth is None or revdepth[rev] <= depth
2307 2307 if shallow_enough:
2308 2308 curmf = mfl[clrev.manifest].read()
2309 2309 if ps:
2310 2310 # We choose to not trust the changed files list in
2311 2311 # changesets because it's not always correct. TODO: could
2312 2312 # we trust it for the non-merge case?
2313 2313 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2314 2314 needed = bool(curmf.diff(p1mf, match))
2315 2315 if not needed and len(ps) > 1:
2316 2316 # For merge changes, the list of changed files is not
2317 2317 # helpful, since we need to emit the merge if a file
2318 2318 # in the narrow spec has changed on either side of the
2319 2319 # merge. As a result, we do a manifest diff to check.
2320 2320 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2321 2321 needed = bool(curmf.diff(p2mf, match))
2322 2322 else:
2323 2323 # For a root node, we need to include the node if any
2324 2324 # files in the node match the narrowspec.
2325 2325 needed = any(curmf.walk(match))
2326 2326
2327 2327 if needed:
2328 2328 for head in ellipsisheads[rev]:
2329 2329 addroot(head, rev)
2330 2330 for p in ps:
2331 2331 required.add(p)
2332 2332 relevant_nodes.add(cl.node(rev))
2333 2333 else:
2334 2334 if not ps:
2335 2335 ps = [nullrev]
2336 2336 if rev in required:
2337 2337 for head in ellipsisheads[rev]:
2338 2338 addroot(head, rev)
2339 2339 for p in ps:
2340 2340 ellipsisheads[p].add(rev)
2341 2341 else:
2342 2342 for p in ps:
2343 2343 ellipsisheads[p] |= ellipsisheads[rev]
2344 2344
2345 2345 # add common changesets as roots of their reachable ellipsis heads
2346 2346 for c in commonrevs:
2347 2347 for head in ellipsisheads[c]:
2348 2348 addroot(head, c)
2349 2349 return visitnodes, relevant_nodes, ellipsisroots
2350 2350
2351 2351
2352 2352 def caps20to10(repo, role):
2353 2353 """return a set with appropriate options to use bundle20 during getbundle"""
2354 2354 caps = {b'HG20'}
2355 2355 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2356 2356 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2357 2357 return caps
2358 2358
2359 2359
2360 2360 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2361 2361 getbundle2partsorder = []
2362 2362
2363 2363 # Mapping between step name and function
2364 2364 #
2365 2365 # This exists to help extensions wrap steps if necessary
2366 2366 getbundle2partsmapping = {}
2367 2367
2368 2368
2369 2369 def getbundle2partsgenerator(stepname, idx=None):
2370 2370 """decorator for function generating bundle2 part for getbundle
2371 2371
2372 2372 The function is added to the step -> function mapping and appended to the
2373 2373 list of steps. Beware that decorated functions will be added in order
2374 2374 (this may matter).
2375 2375
2376 2376 You can only use this decorator for new steps, if you want to wrap a step
2377 2377 from an extension, attack the getbundle2partsmapping dictionary directly."""
2378 2378
2379 2379 def dec(func):
2380 2380 assert stepname not in getbundle2partsmapping
2381 2381 getbundle2partsmapping[stepname] = func
2382 2382 if idx is None:
2383 2383 getbundle2partsorder.append(stepname)
2384 2384 else:
2385 2385 getbundle2partsorder.insert(idx, stepname)
2386 2386 return func
2387 2387
2388 2388 return dec
2389 2389
2390 2390
2391 2391 def bundle2requested(bundlecaps):
2392 2392 if bundlecaps is not None:
2393 2393 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2394 2394 return False
2395 2395
2396 2396
2397 2397 def getbundlechunks(
2398 2398 repo, source, heads=None, common=None, bundlecaps=None, **kwargs
2399 2399 ):
2400 2400 """Return chunks constituting a bundle's raw data.
2401 2401
2402 2402 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2403 2403 passed.
2404 2404
2405 2405 Returns a 2-tuple of a dict with metadata about the generated bundle
2406 2406 and an iterator over raw chunks (of varying sizes).
2407 2407 """
2408 2408 kwargs = pycompat.byteskwargs(kwargs)
2409 2409 info = {}
2410 2410 usebundle2 = bundle2requested(bundlecaps)
2411 2411 # bundle10 case
2412 2412 if not usebundle2:
2413 2413 if bundlecaps and not kwargs.get(b'cg', True):
2414 2414 raise ValueError(
2415 2415 _(b'request for bundle10 must include changegroup')
2416 2416 )
2417 2417
2418 2418 if kwargs:
2419 2419 raise ValueError(
2420 2420 _(b'unsupported getbundle arguments: %s')
2421 2421 % b', '.join(sorted(kwargs.keys()))
2422 2422 )
2423 2423 outgoing = _computeoutgoing(repo, heads, common)
2424 2424 info[b'bundleversion'] = 1
2425 2425 return (
2426 2426 info,
2427 2427 changegroup.makestream(
2428 2428 repo, outgoing, b'01', source, bundlecaps=bundlecaps
2429 2429 ),
2430 2430 )
2431 2431
2432 2432 # bundle20 case
2433 2433 info[b'bundleversion'] = 2
2434 2434 b2caps = {}
2435 2435 for bcaps in bundlecaps:
2436 2436 if bcaps.startswith(b'bundle2='):
2437 2437 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2438 2438 b2caps.update(bundle2.decodecaps(blob))
2439 2439 bundler = bundle2.bundle20(repo.ui, b2caps)
2440 2440
2441 2441 kwargs[b'heads'] = heads
2442 2442 kwargs[b'common'] = common
2443 2443
2444 2444 for name in getbundle2partsorder:
2445 2445 func = getbundle2partsmapping[name]
2446 2446 func(
2447 2447 bundler,
2448 2448 repo,
2449 2449 source,
2450 2450 bundlecaps=bundlecaps,
2451 2451 b2caps=b2caps,
2452 2452 **pycompat.strkwargs(kwargs)
2453 2453 )
2454 2454
2455 2455 info[b'prefercompressed'] = bundler.prefercompressed
2456 2456
2457 2457 return info, bundler.getchunks()
2458 2458
2459 2459
2460 2460 @getbundle2partsgenerator(b'stream2')
2461 2461 def _getbundlestream2(bundler, repo, *args, **kwargs):
2462 2462 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2463 2463
2464 2464
2465 2465 @getbundle2partsgenerator(b'changegroup')
2466 2466 def _getbundlechangegrouppart(
2467 2467 bundler,
2468 2468 repo,
2469 2469 source,
2470 2470 bundlecaps=None,
2471 2471 b2caps=None,
2472 2472 heads=None,
2473 2473 common=None,
2474 2474 **kwargs
2475 2475 ):
2476 2476 """add a changegroup part to the requested bundle"""
2477 2477 if not kwargs.get(r'cg', True):
2478 2478 return
2479 2479
2480 2480 version = b'01'
2481 2481 cgversions = b2caps.get(b'changegroup')
2482 2482 if cgversions: # 3.1 and 3.2 ship with an empty value
2483 2483 cgversions = [
2484 2484 v
2485 2485 for v in cgversions
2486 2486 if v in changegroup.supportedoutgoingversions(repo)
2487 2487 ]
2488 2488 if not cgversions:
2489 2489 raise error.Abort(_(b'no common changegroup version'))
2490 2490 version = max(cgversions)
2491 2491
2492 2492 outgoing = _computeoutgoing(repo, heads, common)
2493 2493 if not outgoing.missing:
2494 2494 return
2495 2495
2496 2496 if kwargs.get(r'narrow', False):
2497 2497 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
2498 2498 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
2499 2499 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2500 2500 else:
2501 2501 matcher = None
2502 2502
2503 2503 cgstream = changegroup.makestream(
2504 2504 repo, outgoing, version, source, bundlecaps=bundlecaps, matcher=matcher
2505 2505 )
2506 2506
2507 2507 part = bundler.newpart(b'changegroup', data=cgstream)
2508 2508 if cgversions:
2509 2509 part.addparam(b'version', version)
2510 2510
2511 2511 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2512 2512
2513 2513 if b'treemanifest' in repo.requirements:
2514 2514 part.addparam(b'treemanifest', b'1')
2515 2515
2516 2516 if b'exp-sidedata-flag' in repo.requirements:
2517 2517 part.addparam(b'exp-sidedata', b'1')
2518 2518
2519 2519 if (
2520 2520 kwargs.get(r'narrow', False)
2521 2521 and kwargs.get(r'narrow_acl', False)
2522 2522 and (include or exclude)
2523 2523 ):
2524 2524 # this is mandatory because otherwise ACL clients won't work
2525 2525 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2526 2526 narrowspecpart.data = b'%s\0%s' % (
2527 2527 b'\n'.join(include),
2528 2528 b'\n'.join(exclude),
2529 2529 )
2530 2530
2531 2531
2532 2532 @getbundle2partsgenerator(b'bookmarks')
2533 2533 def _getbundlebookmarkpart(
2534 2534 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2535 2535 ):
2536 2536 """add a bookmark part to the requested bundle"""
2537 2537 if not kwargs.get(r'bookmarks', False):
2538 2538 return
2539 2539 if b'bookmarks' not in b2caps:
2540 2540 raise error.Abort(_(b'no common bookmarks exchange method'))
2541 2541 books = bookmod.listbinbookmarks(repo)
2542 2542 data = bookmod.binaryencode(books)
2543 2543 if data:
2544 2544 bundler.newpart(b'bookmarks', data=data)
2545 2545
2546 2546
2547 2547 @getbundle2partsgenerator(b'listkeys')
2548 2548 def _getbundlelistkeysparts(
2549 2549 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2550 2550 ):
2551 2551 """add parts containing listkeys namespaces to the requested bundle"""
2552 2552 listkeys = kwargs.get(r'listkeys', ())
2553 2553 for namespace in listkeys:
2554 2554 part = bundler.newpart(b'listkeys')
2555 2555 part.addparam(b'namespace', namespace)
2556 2556 keys = repo.listkeys(namespace).items()
2557 2557 part.data = pushkey.encodekeys(keys)
2558 2558
2559 2559
2560 2560 @getbundle2partsgenerator(b'obsmarkers')
2561 2561 def _getbundleobsmarkerpart(
2562 2562 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2563 2563 ):
2564 2564 """add an obsolescence markers part to the requested bundle"""
2565 2565 if kwargs.get(r'obsmarkers', False):
2566 2566 if heads is None:
2567 2567 heads = repo.heads()
2568 2568 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2569 2569 markers = repo.obsstore.relevantmarkers(subset)
2570 markers = sorted(markers)
2570 # last item of marker tuple ('parents') may be None or a tuple
2571 markers = sorted(markers, key=lambda m: m[:-1] + (m[-1] or (),))
2571 2572 bundle2.buildobsmarkerspart(bundler, markers)
2572 2573
2573 2574
2574 2575 @getbundle2partsgenerator(b'phases')
2575 2576 def _getbundlephasespart(
2576 2577 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2577 2578 ):
2578 2579 """add phase heads part to the requested bundle"""
2579 2580 if kwargs.get(r'phases', False):
2580 2581 if not b'heads' in b2caps.get(b'phases'):
2581 2582 raise error.Abort(_(b'no common phases exchange method'))
2582 2583 if heads is None:
2583 2584 heads = repo.heads()
2584 2585
2585 2586 headsbyphase = collections.defaultdict(set)
2586 2587 if repo.publishing():
2587 2588 headsbyphase[phases.public] = heads
2588 2589 else:
2589 2590 # find the appropriate heads to move
2590 2591
2591 2592 phase = repo._phasecache.phase
2592 2593 node = repo.changelog.node
2593 2594 rev = repo.changelog.rev
2594 2595 for h in heads:
2595 2596 headsbyphase[phase(repo, rev(h))].add(h)
2596 2597 seenphases = list(headsbyphase.keys())
2597 2598
2598 2599 # We do not handle anything but public and draft phase for now)
2599 2600 if seenphases:
2600 2601 assert max(seenphases) <= phases.draft
2601 2602
2602 2603 # if client is pulling non-public changesets, we need to find
2603 2604 # intermediate public heads.
2604 2605 draftheads = headsbyphase.get(phases.draft, set())
2605 2606 if draftheads:
2606 2607 publicheads = headsbyphase.get(phases.public, set())
2607 2608
2608 2609 revset = b'heads(only(%ln, %ln) and public())'
2609 2610 extraheads = repo.revs(revset, draftheads, publicheads)
2610 2611 for r in extraheads:
2611 2612 headsbyphase[phases.public].add(node(r))
2612 2613
2613 2614 # transform data in a format used by the encoding function
2614 2615 phasemapping = []
2615 2616 for phase in phases.allphases:
2616 2617 phasemapping.append(sorted(headsbyphase[phase]))
2617 2618
2618 2619 # generate the actual part
2619 2620 phasedata = phases.binaryencode(phasemapping)
2620 2621 bundler.newpart(b'phase-heads', data=phasedata)
2621 2622
2622 2623
2623 2624 @getbundle2partsgenerator(b'hgtagsfnodes')
2624 2625 def _getbundletagsfnodes(
2625 2626 bundler,
2626 2627 repo,
2627 2628 source,
2628 2629 bundlecaps=None,
2629 2630 b2caps=None,
2630 2631 heads=None,
2631 2632 common=None,
2632 2633 **kwargs
2633 2634 ):
2634 2635 """Transfer the .hgtags filenodes mapping.
2635 2636
2636 2637 Only values for heads in this bundle will be transferred.
2637 2638
2638 2639 The part data consists of pairs of 20 byte changeset node and .hgtags
2639 2640 filenodes raw values.
2640 2641 """
2641 2642 # Don't send unless:
2642 2643 # - changeset are being exchanged,
2643 2644 # - the client supports it.
2644 2645 if not (kwargs.get(r'cg', True) and b'hgtagsfnodes' in b2caps):
2645 2646 return
2646 2647
2647 2648 outgoing = _computeoutgoing(repo, heads, common)
2648 2649 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2649 2650
2650 2651
2651 2652 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2652 2653 def _getbundlerevbranchcache(
2653 2654 bundler,
2654 2655 repo,
2655 2656 source,
2656 2657 bundlecaps=None,
2657 2658 b2caps=None,
2658 2659 heads=None,
2659 2660 common=None,
2660 2661 **kwargs
2661 2662 ):
2662 2663 """Transfer the rev-branch-cache mapping
2663 2664
2664 2665 The payload is a series of data related to each branch
2665 2666
2666 2667 1) branch name length
2667 2668 2) number of open heads
2668 2669 3) number of closed heads
2669 2670 4) open heads nodes
2670 2671 5) closed heads nodes
2671 2672 """
2672 2673 # Don't send unless:
2673 2674 # - changeset are being exchanged,
2674 2675 # - the client supports it.
2675 2676 # - narrow bundle isn't in play (not currently compatible).
2676 2677 if (
2677 2678 not kwargs.get(r'cg', True)
2678 2679 or b'rev-branch-cache' not in b2caps
2679 2680 or kwargs.get(r'narrow', False)
2680 2681 or repo.ui.has_section(_NARROWACL_SECTION)
2681 2682 ):
2682 2683 return
2683 2684
2684 2685 outgoing = _computeoutgoing(repo, heads, common)
2685 2686 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2686 2687
2687 2688
2688 2689 def check_heads(repo, their_heads, context):
2689 2690 """check if the heads of a repo have been modified
2690 2691
2691 2692 Used by peer for unbundling.
2692 2693 """
2693 2694 heads = repo.heads()
2694 2695 heads_hash = hashlib.sha1(b''.join(sorted(heads))).digest()
2695 2696 if not (
2696 2697 their_heads == [b'force']
2697 2698 or their_heads == heads
2698 2699 or their_heads == [b'hashed', heads_hash]
2699 2700 ):
2700 2701 # someone else committed/pushed/unbundled while we
2701 2702 # were transferring data
2702 2703 raise error.PushRaced(
2703 2704 b'repository changed while %s - please try again' % context
2704 2705 )
2705 2706
2706 2707
2707 2708 def unbundle(repo, cg, heads, source, url):
2708 2709 """Apply a bundle to a repo.
2709 2710
2710 2711 this function makes sure the repo is locked during the application and have
2711 2712 mechanism to check that no push race occurred between the creation of the
2712 2713 bundle and its application.
2713 2714
2714 2715 If the push was raced as PushRaced exception is raised."""
2715 2716 r = 0
2716 2717 # need a transaction when processing a bundle2 stream
2717 2718 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2718 2719 lockandtr = [None, None, None]
2719 2720 recordout = None
2720 2721 # quick fix for output mismatch with bundle2 in 3.4
2721 2722 captureoutput = repo.ui.configbool(
2722 2723 b'experimental', b'bundle2-output-capture'
2723 2724 )
2724 2725 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2725 2726 captureoutput = True
2726 2727 try:
2727 2728 # note: outside bundle1, 'heads' is expected to be empty and this
2728 2729 # 'check_heads' call wil be a no-op
2729 2730 check_heads(repo, heads, b'uploading changes')
2730 2731 # push can proceed
2731 2732 if not isinstance(cg, bundle2.unbundle20):
2732 2733 # legacy case: bundle1 (changegroup 01)
2733 2734 txnname = b"\n".join([source, util.hidepassword(url)])
2734 2735 with repo.lock(), repo.transaction(txnname) as tr:
2735 2736 op = bundle2.applybundle(repo, cg, tr, source, url)
2736 2737 r = bundle2.combinechangegroupresults(op)
2737 2738 else:
2738 2739 r = None
2739 2740 try:
2740 2741
2741 2742 def gettransaction():
2742 2743 if not lockandtr[2]:
2743 2744 if not bookmod.bookmarksinstore(repo):
2744 2745 lockandtr[0] = repo.wlock()
2745 2746 lockandtr[1] = repo.lock()
2746 2747 lockandtr[2] = repo.transaction(source)
2747 2748 lockandtr[2].hookargs[b'source'] = source
2748 2749 lockandtr[2].hookargs[b'url'] = url
2749 2750 lockandtr[2].hookargs[b'bundle2'] = b'1'
2750 2751 return lockandtr[2]
2751 2752
2752 2753 # Do greedy locking by default until we're satisfied with lazy
2753 2754 # locking.
2754 2755 if not repo.ui.configbool(
2755 2756 b'experimental', b'bundle2lazylocking'
2756 2757 ):
2757 2758 gettransaction()
2758 2759
2759 2760 op = bundle2.bundleoperation(
2760 2761 repo,
2761 2762 gettransaction,
2762 2763 captureoutput=captureoutput,
2763 2764 source=b'push',
2764 2765 )
2765 2766 try:
2766 2767 op = bundle2.processbundle(repo, cg, op=op)
2767 2768 finally:
2768 2769 r = op.reply
2769 2770 if captureoutput and r is not None:
2770 2771 repo.ui.pushbuffer(error=True, subproc=True)
2771 2772
2772 2773 def recordout(output):
2773 2774 r.newpart(b'output', data=output, mandatory=False)
2774 2775
2775 2776 if lockandtr[2] is not None:
2776 2777 lockandtr[2].close()
2777 2778 except BaseException as exc:
2778 2779 exc.duringunbundle2 = True
2779 2780 if captureoutput and r is not None:
2780 2781 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2781 2782
2782 2783 def recordout(output):
2783 2784 part = bundle2.bundlepart(
2784 2785 b'output', data=output, mandatory=False
2785 2786 )
2786 2787 parts.append(part)
2787 2788
2788 2789 raise
2789 2790 finally:
2790 2791 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2791 2792 if recordout is not None:
2792 2793 recordout(repo.ui.popbuffer())
2793 2794 return r
2794 2795
2795 2796
2796 2797 def _maybeapplyclonebundle(pullop):
2797 2798 """Apply a clone bundle from a remote, if possible."""
2798 2799
2799 2800 repo = pullop.repo
2800 2801 remote = pullop.remote
2801 2802
2802 2803 if not repo.ui.configbool(b'ui', b'clonebundles'):
2803 2804 return
2804 2805
2805 2806 # Only run if local repo is empty.
2806 2807 if len(repo):
2807 2808 return
2808 2809
2809 2810 if pullop.heads:
2810 2811 return
2811 2812
2812 2813 if not remote.capable(b'clonebundles'):
2813 2814 return
2814 2815
2815 2816 with remote.commandexecutor() as e:
2816 2817 res = e.callcommand(b'clonebundles', {}).result()
2817 2818
2818 2819 # If we call the wire protocol command, that's good enough to record the
2819 2820 # attempt.
2820 2821 pullop.clonebundleattempted = True
2821 2822
2822 2823 entries = parseclonebundlesmanifest(repo, res)
2823 2824 if not entries:
2824 2825 repo.ui.note(
2825 2826 _(
2826 2827 b'no clone bundles available on remote; '
2827 2828 b'falling back to regular clone\n'
2828 2829 )
2829 2830 )
2830 2831 return
2831 2832
2832 2833 entries = filterclonebundleentries(
2833 2834 repo, entries, streamclonerequested=pullop.streamclonerequested
2834 2835 )
2835 2836
2836 2837 if not entries:
2837 2838 # There is a thundering herd concern here. However, if a server
2838 2839 # operator doesn't advertise bundles appropriate for its clients,
2839 2840 # they deserve what's coming. Furthermore, from a client's
2840 2841 # perspective, no automatic fallback would mean not being able to
2841 2842 # clone!
2842 2843 repo.ui.warn(
2843 2844 _(
2844 2845 b'no compatible clone bundles available on server; '
2845 2846 b'falling back to regular clone\n'
2846 2847 )
2847 2848 )
2848 2849 repo.ui.warn(
2849 2850 _(b'(you may want to report this to the server operator)\n')
2850 2851 )
2851 2852 return
2852 2853
2853 2854 entries = sortclonebundleentries(repo.ui, entries)
2854 2855
2855 2856 url = entries[0][b'URL']
2856 2857 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2857 2858 if trypullbundlefromurl(repo.ui, repo, url):
2858 2859 repo.ui.status(_(b'finished applying clone bundle\n'))
2859 2860 # Bundle failed.
2860 2861 #
2861 2862 # We abort by default to avoid the thundering herd of
2862 2863 # clients flooding a server that was expecting expensive
2863 2864 # clone load to be offloaded.
2864 2865 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2865 2866 repo.ui.warn(_(b'falling back to normal clone\n'))
2866 2867 else:
2867 2868 raise error.Abort(
2868 2869 _(b'error applying bundle'),
2869 2870 hint=_(
2870 2871 b'if this error persists, consider contacting '
2871 2872 b'the server operator or disable clone '
2872 2873 b'bundles via '
2873 2874 b'"--config ui.clonebundles=false"'
2874 2875 ),
2875 2876 )
2876 2877
2877 2878
2878 2879 def parseclonebundlesmanifest(repo, s):
2879 2880 """Parses the raw text of a clone bundles manifest.
2880 2881
2881 2882 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2882 2883 to the URL and other keys are the attributes for the entry.
2883 2884 """
2884 2885 m = []
2885 2886 for line in s.splitlines():
2886 2887 fields = line.split()
2887 2888 if not fields:
2888 2889 continue
2889 2890 attrs = {b'URL': fields[0]}
2890 2891 for rawattr in fields[1:]:
2891 2892 key, value = rawattr.split(b'=', 1)
2892 2893 key = urlreq.unquote(key)
2893 2894 value = urlreq.unquote(value)
2894 2895 attrs[key] = value
2895 2896
2896 2897 # Parse BUNDLESPEC into components. This makes client-side
2897 2898 # preferences easier to specify since you can prefer a single
2898 2899 # component of the BUNDLESPEC.
2899 2900 if key == b'BUNDLESPEC':
2900 2901 try:
2901 2902 bundlespec = parsebundlespec(repo, value)
2902 2903 attrs[b'COMPRESSION'] = bundlespec.compression
2903 2904 attrs[b'VERSION'] = bundlespec.version
2904 2905 except error.InvalidBundleSpecification:
2905 2906 pass
2906 2907 except error.UnsupportedBundleSpecification:
2907 2908 pass
2908 2909
2909 2910 m.append(attrs)
2910 2911
2911 2912 return m
2912 2913
2913 2914
2914 2915 def isstreamclonespec(bundlespec):
2915 2916 # Stream clone v1
2916 2917 if bundlespec.wirecompression == b'UN' and bundlespec.wireversion == b's1':
2917 2918 return True
2918 2919
2919 2920 # Stream clone v2
2920 2921 if (
2921 2922 bundlespec.wirecompression == b'UN'
2922 2923 and bundlespec.wireversion == b'02'
2923 2924 and bundlespec.contentopts.get(b'streamv2')
2924 2925 ):
2925 2926 return True
2926 2927
2927 2928 return False
2928 2929
2929 2930
2930 2931 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2931 2932 """Remove incompatible clone bundle manifest entries.
2932 2933
2933 2934 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2934 2935 and returns a new list consisting of only the entries that this client
2935 2936 should be able to apply.
2936 2937
2937 2938 There is no guarantee we'll be able to apply all returned entries because
2938 2939 the metadata we use to filter on may be missing or wrong.
2939 2940 """
2940 2941 newentries = []
2941 2942 for entry in entries:
2942 2943 spec = entry.get(b'BUNDLESPEC')
2943 2944 if spec:
2944 2945 try:
2945 2946 bundlespec = parsebundlespec(repo, spec, strict=True)
2946 2947
2947 2948 # If a stream clone was requested, filter out non-streamclone
2948 2949 # entries.
2949 2950 if streamclonerequested and not isstreamclonespec(bundlespec):
2950 2951 repo.ui.debug(
2951 2952 b'filtering %s because not a stream clone\n'
2952 2953 % entry[b'URL']
2953 2954 )
2954 2955 continue
2955 2956
2956 2957 except error.InvalidBundleSpecification as e:
2957 2958 repo.ui.debug(stringutil.forcebytestr(e) + b'\n')
2958 2959 continue
2959 2960 except error.UnsupportedBundleSpecification as e:
2960 2961 repo.ui.debug(
2961 2962 b'filtering %s because unsupported bundle '
2962 2963 b'spec: %s\n' % (entry[b'URL'], stringutil.forcebytestr(e))
2963 2964 )
2964 2965 continue
2965 2966 # If we don't have a spec and requested a stream clone, we don't know
2966 2967 # what the entry is so don't attempt to apply it.
2967 2968 elif streamclonerequested:
2968 2969 repo.ui.debug(
2969 2970 b'filtering %s because cannot determine if a stream '
2970 2971 b'clone bundle\n' % entry[b'URL']
2971 2972 )
2972 2973 continue
2973 2974
2974 2975 if b'REQUIRESNI' in entry and not sslutil.hassni:
2975 2976 repo.ui.debug(
2976 2977 b'filtering %s because SNI not supported\n' % entry[b'URL']
2977 2978 )
2978 2979 continue
2979 2980
2980 2981 newentries.append(entry)
2981 2982
2982 2983 return newentries
2983 2984
2984 2985
2985 2986 class clonebundleentry(object):
2986 2987 """Represents an item in a clone bundles manifest.
2987 2988
2988 2989 This rich class is needed to support sorting since sorted() in Python 3
2989 2990 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2990 2991 won't work.
2991 2992 """
2992 2993
2993 2994 def __init__(self, value, prefers):
2994 2995 self.value = value
2995 2996 self.prefers = prefers
2996 2997
2997 2998 def _cmp(self, other):
2998 2999 for prefkey, prefvalue in self.prefers:
2999 3000 avalue = self.value.get(prefkey)
3000 3001 bvalue = other.value.get(prefkey)
3001 3002
3002 3003 # Special case for b missing attribute and a matches exactly.
3003 3004 if avalue is not None and bvalue is None and avalue == prefvalue:
3004 3005 return -1
3005 3006
3006 3007 # Special case for a missing attribute and b matches exactly.
3007 3008 if bvalue is not None and avalue is None and bvalue == prefvalue:
3008 3009 return 1
3009 3010
3010 3011 # We can't compare unless attribute present on both.
3011 3012 if avalue is None or bvalue is None:
3012 3013 continue
3013 3014
3014 3015 # Same values should fall back to next attribute.
3015 3016 if avalue == bvalue:
3016 3017 continue
3017 3018
3018 3019 # Exact matches come first.
3019 3020 if avalue == prefvalue:
3020 3021 return -1
3021 3022 if bvalue == prefvalue:
3022 3023 return 1
3023 3024
3024 3025 # Fall back to next attribute.
3025 3026 continue
3026 3027
3027 3028 # If we got here we couldn't sort by attributes and prefers. Fall
3028 3029 # back to index order.
3029 3030 return 0
3030 3031
3031 3032 def __lt__(self, other):
3032 3033 return self._cmp(other) < 0
3033 3034
3034 3035 def __gt__(self, other):
3035 3036 return self._cmp(other) > 0
3036 3037
3037 3038 def __eq__(self, other):
3038 3039 return self._cmp(other) == 0
3039 3040
3040 3041 def __le__(self, other):
3041 3042 return self._cmp(other) <= 0
3042 3043
3043 3044 def __ge__(self, other):
3044 3045 return self._cmp(other) >= 0
3045 3046
3046 3047 def __ne__(self, other):
3047 3048 return self._cmp(other) != 0
3048 3049
3049 3050
3050 3051 def sortclonebundleentries(ui, entries):
3051 3052 prefers = ui.configlist(b'ui', b'clonebundleprefers')
3052 3053 if not prefers:
3053 3054 return list(entries)
3054 3055
3055 3056 prefers = [p.split(b'=', 1) for p in prefers]
3056 3057
3057 3058 items = sorted(clonebundleentry(v, prefers) for v in entries)
3058 3059 return [i.value for i in items]
3059 3060
3060 3061
3061 3062 def trypullbundlefromurl(ui, repo, url):
3062 3063 """Attempt to apply a bundle from a URL."""
3063 3064 with repo.lock(), repo.transaction(b'bundleurl') as tr:
3064 3065 try:
3065 3066 fh = urlmod.open(ui, url)
3066 3067 cg = readbundle(ui, fh, b'stream')
3067 3068
3068 3069 if isinstance(cg, streamclone.streamcloneapplier):
3069 3070 cg.apply(repo)
3070 3071 else:
3071 3072 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
3072 3073 return True
3073 3074 except urlerr.httperror as e:
3074 3075 ui.warn(
3075 3076 _(b'HTTP error fetching bundle: %s\n')
3076 3077 % stringutil.forcebytestr(e)
3077 3078 )
3078 3079 except urlerr.urlerror as e:
3079 3080 ui.warn(
3080 3081 _(b'error fetching bundle: %s\n')
3081 3082 % stringutil.forcebytestr(e.reason)
3082 3083 )
3083 3084
3084 3085 return False
General Comments 0
You need to be logged in to leave comments. Login now