##// END OF EJS Templates
exchange: extract a function to sort obsolete markers...
Denis Laxalde -
r43571:48b9fbfb default
parent child Browse files
Show More
@@ -1,3085 +1,3089
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import hashlib
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 hex,
16 16 nullid,
17 17 nullrev,
18 18 )
19 19 from .thirdparty import attr
20 20 from . import (
21 21 bookmarks as bookmod,
22 22 bundle2,
23 23 changegroup,
24 24 discovery,
25 25 error,
26 26 exchangev2,
27 27 lock as lockmod,
28 28 logexchange,
29 29 narrowspec,
30 30 obsolete,
31 31 phases,
32 32 pushkey,
33 33 pycompat,
34 34 scmutil,
35 35 sslutil,
36 36 streamclone,
37 37 url as urlmod,
38 38 util,
39 39 wireprototypes,
40 40 )
41 41 from .interfaces import repository
42 42 from .utils import stringutil
43 43
44 44 urlerr = util.urlerr
45 45 urlreq = util.urlreq
46 46
47 47 _NARROWACL_SECTION = b'narrowacl'
48 48
49 49 # Maps bundle version human names to changegroup versions.
50 50 _bundlespeccgversions = {
51 51 b'v1': b'01',
52 52 b'v2': b'02',
53 53 b'packed1': b's1',
54 54 b'bundle2': b'02', # legacy
55 55 }
56 56
57 57 # Maps bundle version with content opts to choose which part to bundle
58 58 _bundlespeccontentopts = {
59 59 b'v1': {
60 60 b'changegroup': True,
61 61 b'cg.version': b'01',
62 62 b'obsolescence': False,
63 63 b'phases': False,
64 64 b'tagsfnodescache': False,
65 65 b'revbranchcache': False,
66 66 },
67 67 b'v2': {
68 68 b'changegroup': True,
69 69 b'cg.version': b'02',
70 70 b'obsolescence': False,
71 71 b'phases': False,
72 72 b'tagsfnodescache': True,
73 73 b'revbranchcache': True,
74 74 },
75 75 b'packed1': {b'cg.version': b's1'},
76 76 }
77 77 _bundlespeccontentopts[b'bundle2'] = _bundlespeccontentopts[b'v2']
78 78
79 79 _bundlespecvariants = {
80 80 b"streamv2": {
81 81 b"changegroup": False,
82 82 b"streamv2": True,
83 83 b"tagsfnodescache": False,
84 84 b"revbranchcache": False,
85 85 }
86 86 }
87 87
88 88 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
89 89 _bundlespecv1compengines = {b'gzip', b'bzip2', b'none'}
90 90
91 91
92 92 @attr.s
93 93 class bundlespec(object):
94 94 compression = attr.ib()
95 95 wirecompression = attr.ib()
96 96 version = attr.ib()
97 97 wireversion = attr.ib()
98 98 params = attr.ib()
99 99 contentopts = attr.ib()
100 100
101 101
102 def _sortedmarkers(markers):
103 # last item of marker tuple ('parents') may be None or a tuple
104 return sorted(markers, key=lambda m: m[:-1] + (m[-1] or (),))
105
106
102 107 def parsebundlespec(repo, spec, strict=True):
103 108 """Parse a bundle string specification into parts.
104 109
105 110 Bundle specifications denote a well-defined bundle/exchange format.
106 111 The content of a given specification should not change over time in
107 112 order to ensure that bundles produced by a newer version of Mercurial are
108 113 readable from an older version.
109 114
110 115 The string currently has the form:
111 116
112 117 <compression>-<type>[;<parameter0>[;<parameter1>]]
113 118
114 119 Where <compression> is one of the supported compression formats
115 120 and <type> is (currently) a version string. A ";" can follow the type and
116 121 all text afterwards is interpreted as URI encoded, ";" delimited key=value
117 122 pairs.
118 123
119 124 If ``strict`` is True (the default) <compression> is required. Otherwise,
120 125 it is optional.
121 126
122 127 Returns a bundlespec object of (compression, version, parameters).
123 128 Compression will be ``None`` if not in strict mode and a compression isn't
124 129 defined.
125 130
126 131 An ``InvalidBundleSpecification`` is raised when the specification is
127 132 not syntactically well formed.
128 133
129 134 An ``UnsupportedBundleSpecification`` is raised when the compression or
130 135 bundle type/version is not recognized.
131 136
132 137 Note: this function will likely eventually return a more complex data
133 138 structure, including bundle2 part information.
134 139 """
135 140
136 141 def parseparams(s):
137 142 if b';' not in s:
138 143 return s, {}
139 144
140 145 params = {}
141 146 version, paramstr = s.split(b';', 1)
142 147
143 148 for p in paramstr.split(b';'):
144 149 if b'=' not in p:
145 150 raise error.InvalidBundleSpecification(
146 151 _(
147 152 b'invalid bundle specification: '
148 153 b'missing "=" in parameter: %s'
149 154 )
150 155 % p
151 156 )
152 157
153 158 key, value = p.split(b'=', 1)
154 159 key = urlreq.unquote(key)
155 160 value = urlreq.unquote(value)
156 161 params[key] = value
157 162
158 163 return version, params
159 164
160 165 if strict and b'-' not in spec:
161 166 raise error.InvalidBundleSpecification(
162 167 _(
163 168 b'invalid bundle specification; '
164 169 b'must be prefixed with compression: %s'
165 170 )
166 171 % spec
167 172 )
168 173
169 174 if b'-' in spec:
170 175 compression, version = spec.split(b'-', 1)
171 176
172 177 if compression not in util.compengines.supportedbundlenames:
173 178 raise error.UnsupportedBundleSpecification(
174 179 _(b'%s compression is not supported') % compression
175 180 )
176 181
177 182 version, params = parseparams(version)
178 183
179 184 if version not in _bundlespeccgversions:
180 185 raise error.UnsupportedBundleSpecification(
181 186 _(b'%s is not a recognized bundle version') % version
182 187 )
183 188 else:
184 189 # Value could be just the compression or just the version, in which
185 190 # case some defaults are assumed (but only when not in strict mode).
186 191 assert not strict
187 192
188 193 spec, params = parseparams(spec)
189 194
190 195 if spec in util.compengines.supportedbundlenames:
191 196 compression = spec
192 197 version = b'v1'
193 198 # Generaldelta repos require v2.
194 199 if b'generaldelta' in repo.requirements:
195 200 version = b'v2'
196 201 # Modern compression engines require v2.
197 202 if compression not in _bundlespecv1compengines:
198 203 version = b'v2'
199 204 elif spec in _bundlespeccgversions:
200 205 if spec == b'packed1':
201 206 compression = b'none'
202 207 else:
203 208 compression = b'bzip2'
204 209 version = spec
205 210 else:
206 211 raise error.UnsupportedBundleSpecification(
207 212 _(b'%s is not a recognized bundle specification') % spec
208 213 )
209 214
210 215 # Bundle version 1 only supports a known set of compression engines.
211 216 if version == b'v1' and compression not in _bundlespecv1compengines:
212 217 raise error.UnsupportedBundleSpecification(
213 218 _(b'compression engine %s is not supported on v1 bundles')
214 219 % compression
215 220 )
216 221
217 222 # The specification for packed1 can optionally declare the data formats
218 223 # required to apply it. If we see this metadata, compare against what the
219 224 # repo supports and error if the bundle isn't compatible.
220 225 if version == b'packed1' and b'requirements' in params:
221 226 requirements = set(params[b'requirements'].split(b','))
222 227 missingreqs = requirements - repo.supportedformats
223 228 if missingreqs:
224 229 raise error.UnsupportedBundleSpecification(
225 230 _(b'missing support for repository features: %s')
226 231 % b', '.join(sorted(missingreqs))
227 232 )
228 233
229 234 # Compute contentopts based on the version
230 235 contentopts = _bundlespeccontentopts.get(version, {}).copy()
231 236
232 237 # Process the variants
233 238 if b"stream" in params and params[b"stream"] == b"v2":
234 239 variant = _bundlespecvariants[b"streamv2"]
235 240 contentopts.update(variant)
236 241
237 242 engine = util.compengines.forbundlename(compression)
238 243 compression, wirecompression = engine.bundletype()
239 244 wireversion = _bundlespeccgversions[version]
240 245
241 246 return bundlespec(
242 247 compression, wirecompression, version, wireversion, params, contentopts
243 248 )
244 249
245 250
246 251 def readbundle(ui, fh, fname, vfs=None):
247 252 header = changegroup.readexactly(fh, 4)
248 253
249 254 alg = None
250 255 if not fname:
251 256 fname = b"stream"
252 257 if not header.startswith(b'HG') and header.startswith(b'\0'):
253 258 fh = changegroup.headerlessfixup(fh, header)
254 259 header = b"HG10"
255 260 alg = b'UN'
256 261 elif vfs:
257 262 fname = vfs.join(fname)
258 263
259 264 magic, version = header[0:2], header[2:4]
260 265
261 266 if magic != b'HG':
262 267 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
263 268 if version == b'10':
264 269 if alg is None:
265 270 alg = changegroup.readexactly(fh, 2)
266 271 return changegroup.cg1unpacker(fh, alg)
267 272 elif version.startswith(b'2'):
268 273 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
269 274 elif version == b'S1':
270 275 return streamclone.streamcloneapplier(fh)
271 276 else:
272 277 raise error.Abort(
273 278 _(b'%s: unknown bundle version %s') % (fname, version)
274 279 )
275 280
276 281
277 282 def getbundlespec(ui, fh):
278 283 """Infer the bundlespec from a bundle file handle.
279 284
280 285 The input file handle is seeked and the original seek position is not
281 286 restored.
282 287 """
283 288
284 289 def speccompression(alg):
285 290 try:
286 291 return util.compengines.forbundletype(alg).bundletype()[0]
287 292 except KeyError:
288 293 return None
289 294
290 295 b = readbundle(ui, fh, None)
291 296 if isinstance(b, changegroup.cg1unpacker):
292 297 alg = b._type
293 298 if alg == b'_truncatedBZ':
294 299 alg = b'BZ'
295 300 comp = speccompression(alg)
296 301 if not comp:
297 302 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
298 303 return b'%s-v1' % comp
299 304 elif isinstance(b, bundle2.unbundle20):
300 305 if b'Compression' in b.params:
301 306 comp = speccompression(b.params[b'Compression'])
302 307 if not comp:
303 308 raise error.Abort(
304 309 _(b'unknown compression algorithm: %s') % comp
305 310 )
306 311 else:
307 312 comp = b'none'
308 313
309 314 version = None
310 315 for part in b.iterparts():
311 316 if part.type == b'changegroup':
312 317 version = part.params[b'version']
313 318 if version in (b'01', b'02'):
314 319 version = b'v2'
315 320 else:
316 321 raise error.Abort(
317 322 _(
318 323 b'changegroup version %s does not have '
319 324 b'a known bundlespec'
320 325 )
321 326 % version,
322 327 hint=_(b'try upgrading your Mercurial client'),
323 328 )
324 329 elif part.type == b'stream2' and version is None:
325 330 # A stream2 part requires to be part of a v2 bundle
326 331 requirements = urlreq.unquote(part.params[b'requirements'])
327 332 splitted = requirements.split()
328 333 params = bundle2._formatrequirementsparams(splitted)
329 334 return b'none-v2;stream=v2;%s' % params
330 335
331 336 if not version:
332 337 raise error.Abort(
333 338 _(b'could not identify changegroup version in bundle')
334 339 )
335 340
336 341 return b'%s-%s' % (comp, version)
337 342 elif isinstance(b, streamclone.streamcloneapplier):
338 343 requirements = streamclone.readbundle1header(fh)[2]
339 344 formatted = bundle2._formatrequirementsparams(requirements)
340 345 return b'none-packed1;%s' % formatted
341 346 else:
342 347 raise error.Abort(_(b'unknown bundle type: %s') % b)
343 348
344 349
345 350 def _computeoutgoing(repo, heads, common):
346 351 """Computes which revs are outgoing given a set of common
347 352 and a set of heads.
348 353
349 354 This is a separate function so extensions can have access to
350 355 the logic.
351 356
352 357 Returns a discovery.outgoing object.
353 358 """
354 359 cl = repo.changelog
355 360 if common:
356 361 hasnode = cl.hasnode
357 362 common = [n for n in common if hasnode(n)]
358 363 else:
359 364 common = [nullid]
360 365 if not heads:
361 366 heads = cl.heads()
362 367 return discovery.outgoing(repo, common, heads)
363 368
364 369
365 370 def _checkpublish(pushop):
366 371 repo = pushop.repo
367 372 ui = repo.ui
368 373 behavior = ui.config(b'experimental', b'auto-publish')
369 374 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
370 375 return
371 376 remotephases = listkeys(pushop.remote, b'phases')
372 377 if not remotephases.get(b'publishing', False):
373 378 return
374 379
375 380 if pushop.revs is None:
376 381 published = repo.filtered(b'served').revs(b'not public()')
377 382 else:
378 383 published = repo.revs(b'::%ln - public()', pushop.revs)
379 384 if published:
380 385 if behavior == b'warn':
381 386 ui.warn(
382 387 _(b'%i changesets about to be published\n') % len(published)
383 388 )
384 389 elif behavior == b'confirm':
385 390 if ui.promptchoice(
386 391 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
387 392 % len(published)
388 393 ):
389 394 raise error.Abort(_(b'user quit'))
390 395 elif behavior == b'abort':
391 396 msg = _(b'push would publish %i changesets') % len(published)
392 397 hint = _(
393 398 b"use --publish or adjust 'experimental.auto-publish'"
394 399 b" config"
395 400 )
396 401 raise error.Abort(msg, hint=hint)
397 402
398 403
399 404 def _forcebundle1(op):
400 405 """return true if a pull/push must use bundle1
401 406
402 407 This function is used to allow testing of the older bundle version"""
403 408 ui = op.repo.ui
404 409 # The goal is this config is to allow developer to choose the bundle
405 410 # version used during exchanged. This is especially handy during test.
406 411 # Value is a list of bundle version to be picked from, highest version
407 412 # should be used.
408 413 #
409 414 # developer config: devel.legacy.exchange
410 415 exchange = ui.configlist(b'devel', b'legacy.exchange')
411 416 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
412 417 return forcebundle1 or not op.remote.capable(b'bundle2')
413 418
414 419
415 420 class pushoperation(object):
416 421 """A object that represent a single push operation
417 422
418 423 Its purpose is to carry push related state and very common operations.
419 424
420 425 A new pushoperation should be created at the beginning of each push and
421 426 discarded afterward.
422 427 """
423 428
424 429 def __init__(
425 430 self,
426 431 repo,
427 432 remote,
428 433 force=False,
429 434 revs=None,
430 435 newbranch=False,
431 436 bookmarks=(),
432 437 publish=False,
433 438 pushvars=None,
434 439 ):
435 440 # repo we push from
436 441 self.repo = repo
437 442 self.ui = repo.ui
438 443 # repo we push to
439 444 self.remote = remote
440 445 # force option provided
441 446 self.force = force
442 447 # revs to be pushed (None is "all")
443 448 self.revs = revs
444 449 # bookmark explicitly pushed
445 450 self.bookmarks = bookmarks
446 451 # allow push of new branch
447 452 self.newbranch = newbranch
448 453 # step already performed
449 454 # (used to check what steps have been already performed through bundle2)
450 455 self.stepsdone = set()
451 456 # Integer version of the changegroup push result
452 457 # - None means nothing to push
453 458 # - 0 means HTTP error
454 459 # - 1 means we pushed and remote head count is unchanged *or*
455 460 # we have outgoing changesets but refused to push
456 461 # - other values as described by addchangegroup()
457 462 self.cgresult = None
458 463 # Boolean value for the bookmark push
459 464 self.bkresult = None
460 465 # discover.outgoing object (contains common and outgoing data)
461 466 self.outgoing = None
462 467 # all remote topological heads before the push
463 468 self.remoteheads = None
464 469 # Details of the remote branch pre and post push
465 470 #
466 471 # mapping: {'branch': ([remoteheads],
467 472 # [newheads],
468 473 # [unsyncedheads],
469 474 # [discardedheads])}
470 475 # - branch: the branch name
471 476 # - remoteheads: the list of remote heads known locally
472 477 # None if the branch is new
473 478 # - newheads: the new remote heads (known locally) with outgoing pushed
474 479 # - unsyncedheads: the list of remote heads unknown locally.
475 480 # - discardedheads: the list of remote heads made obsolete by the push
476 481 self.pushbranchmap = None
477 482 # testable as a boolean indicating if any nodes are missing locally.
478 483 self.incoming = None
479 484 # summary of the remote phase situation
480 485 self.remotephases = None
481 486 # phases changes that must be pushed along side the changesets
482 487 self.outdatedphases = None
483 488 # phases changes that must be pushed if changeset push fails
484 489 self.fallbackoutdatedphases = None
485 490 # outgoing obsmarkers
486 491 self.outobsmarkers = set()
487 492 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
488 493 self.outbookmarks = []
489 494 # transaction manager
490 495 self.trmanager = None
491 496 # map { pushkey partid -> callback handling failure}
492 497 # used to handle exception from mandatory pushkey part failure
493 498 self.pkfailcb = {}
494 499 # an iterable of pushvars or None
495 500 self.pushvars = pushvars
496 501 # publish pushed changesets
497 502 self.publish = publish
498 503
499 504 @util.propertycache
500 505 def futureheads(self):
501 506 """future remote heads if the changeset push succeeds"""
502 507 return self.outgoing.missingheads
503 508
504 509 @util.propertycache
505 510 def fallbackheads(self):
506 511 """future remote heads if the changeset push fails"""
507 512 if self.revs is None:
508 513 # not target to push, all common are relevant
509 514 return self.outgoing.commonheads
510 515 unfi = self.repo.unfiltered()
511 516 # I want cheads = heads(::missingheads and ::commonheads)
512 517 # (missingheads is revs with secret changeset filtered out)
513 518 #
514 519 # This can be expressed as:
515 520 # cheads = ( (missingheads and ::commonheads)
516 521 # + (commonheads and ::missingheads))"
517 522 # )
518 523 #
519 524 # while trying to push we already computed the following:
520 525 # common = (::commonheads)
521 526 # missing = ((commonheads::missingheads) - commonheads)
522 527 #
523 528 # We can pick:
524 529 # * missingheads part of common (::commonheads)
525 530 common = self.outgoing.common
526 531 nm = self.repo.changelog.nodemap
527 532 cheads = [node for node in self.revs if nm[node] in common]
528 533 # and
529 534 # * commonheads parents on missing
530 535 revset = unfi.set(
531 536 b'%ln and parents(roots(%ln))',
532 537 self.outgoing.commonheads,
533 538 self.outgoing.missing,
534 539 )
535 540 cheads.extend(c.node() for c in revset)
536 541 return cheads
537 542
538 543 @property
539 544 def commonheads(self):
540 545 """set of all common heads after changeset bundle push"""
541 546 if self.cgresult:
542 547 return self.futureheads
543 548 else:
544 549 return self.fallbackheads
545 550
546 551
547 552 # mapping of message used when pushing bookmark
548 553 bookmsgmap = {
549 554 b'update': (
550 555 _(b"updating bookmark %s\n"),
551 556 _(b'updating bookmark %s failed!\n'),
552 557 ),
553 558 b'export': (
554 559 _(b"exporting bookmark %s\n"),
555 560 _(b'exporting bookmark %s failed!\n'),
556 561 ),
557 562 b'delete': (
558 563 _(b"deleting remote bookmark %s\n"),
559 564 _(b'deleting remote bookmark %s failed!\n'),
560 565 ),
561 566 }
562 567
563 568
564 569 def push(
565 570 repo,
566 571 remote,
567 572 force=False,
568 573 revs=None,
569 574 newbranch=False,
570 575 bookmarks=(),
571 576 publish=False,
572 577 opargs=None,
573 578 ):
574 579 '''Push outgoing changesets (limited by revs) from a local
575 580 repository to remote. Return an integer:
576 581 - None means nothing to push
577 582 - 0 means HTTP error
578 583 - 1 means we pushed and remote head count is unchanged *or*
579 584 we have outgoing changesets but refused to push
580 585 - other values as described by addchangegroup()
581 586 '''
582 587 if opargs is None:
583 588 opargs = {}
584 589 pushop = pushoperation(
585 590 repo,
586 591 remote,
587 592 force,
588 593 revs,
589 594 newbranch,
590 595 bookmarks,
591 596 publish,
592 597 **pycompat.strkwargs(opargs)
593 598 )
594 599 if pushop.remote.local():
595 600 missing = (
596 601 set(pushop.repo.requirements) - pushop.remote.local().supported
597 602 )
598 603 if missing:
599 604 msg = _(
600 605 b"required features are not"
601 606 b" supported in the destination:"
602 607 b" %s"
603 608 ) % (b', '.join(sorted(missing)))
604 609 raise error.Abort(msg)
605 610
606 611 if not pushop.remote.canpush():
607 612 raise error.Abort(_(b"destination does not support push"))
608 613
609 614 if not pushop.remote.capable(b'unbundle'):
610 615 raise error.Abort(
611 616 _(
612 617 b'cannot push: destination does not support the '
613 618 b'unbundle wire protocol command'
614 619 )
615 620 )
616 621
617 622 # get lock as we might write phase data
618 623 wlock = lock = None
619 624 try:
620 625 # bundle2 push may receive a reply bundle touching bookmarks
621 626 # requiring the wlock. Take it now to ensure proper ordering.
622 627 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
623 628 if (
624 629 (not _forcebundle1(pushop))
625 630 and maypushback
626 631 and not bookmod.bookmarksinstore(repo)
627 632 ):
628 633 wlock = pushop.repo.wlock()
629 634 lock = pushop.repo.lock()
630 635 pushop.trmanager = transactionmanager(
631 636 pushop.repo, b'push-response', pushop.remote.url()
632 637 )
633 638 except error.LockUnavailable as err:
634 639 # source repo cannot be locked.
635 640 # We do not abort the push, but just disable the local phase
636 641 # synchronisation.
637 642 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
638 643 err
639 644 )
640 645 pushop.ui.debug(msg)
641 646
642 647 with wlock or util.nullcontextmanager():
643 648 with lock or util.nullcontextmanager():
644 649 with pushop.trmanager or util.nullcontextmanager():
645 650 pushop.repo.checkpush(pushop)
646 651 _checkpublish(pushop)
647 652 _pushdiscovery(pushop)
648 653 if not _forcebundle1(pushop):
649 654 _pushbundle2(pushop)
650 655 _pushchangeset(pushop)
651 656 _pushsyncphase(pushop)
652 657 _pushobsolete(pushop)
653 658 _pushbookmark(pushop)
654 659
655 660 if repo.ui.configbool(b'experimental', b'remotenames'):
656 661 logexchange.pullremotenames(repo, remote)
657 662
658 663 return pushop
659 664
660 665
661 666 # list of steps to perform discovery before push
662 667 pushdiscoveryorder = []
663 668
664 669 # Mapping between step name and function
665 670 #
666 671 # This exists to help extensions wrap steps if necessary
667 672 pushdiscoverymapping = {}
668 673
669 674
670 675 def pushdiscovery(stepname):
671 676 """decorator for function performing discovery before push
672 677
673 678 The function is added to the step -> function mapping and appended to the
674 679 list of steps. Beware that decorated function will be added in order (this
675 680 may matter).
676 681
677 682 You can only use this decorator for a new step, if you want to wrap a step
678 683 from an extension, change the pushdiscovery dictionary directly."""
679 684
680 685 def dec(func):
681 686 assert stepname not in pushdiscoverymapping
682 687 pushdiscoverymapping[stepname] = func
683 688 pushdiscoveryorder.append(stepname)
684 689 return func
685 690
686 691 return dec
687 692
688 693
689 694 def _pushdiscovery(pushop):
690 695 """Run all discovery steps"""
691 696 for stepname in pushdiscoveryorder:
692 697 step = pushdiscoverymapping[stepname]
693 698 step(pushop)
694 699
695 700
696 701 @pushdiscovery(b'changeset')
697 702 def _pushdiscoverychangeset(pushop):
698 703 """discover the changeset that need to be pushed"""
699 704 fci = discovery.findcommonincoming
700 705 if pushop.revs:
701 706 commoninc = fci(
702 707 pushop.repo,
703 708 pushop.remote,
704 709 force=pushop.force,
705 710 ancestorsof=pushop.revs,
706 711 )
707 712 else:
708 713 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
709 714 common, inc, remoteheads = commoninc
710 715 fco = discovery.findcommonoutgoing
711 716 outgoing = fco(
712 717 pushop.repo,
713 718 pushop.remote,
714 719 onlyheads=pushop.revs,
715 720 commoninc=commoninc,
716 721 force=pushop.force,
717 722 )
718 723 pushop.outgoing = outgoing
719 724 pushop.remoteheads = remoteheads
720 725 pushop.incoming = inc
721 726
722 727
723 728 @pushdiscovery(b'phase')
724 729 def _pushdiscoveryphase(pushop):
725 730 """discover the phase that needs to be pushed
726 731
727 732 (computed for both success and failure case for changesets push)"""
728 733 outgoing = pushop.outgoing
729 734 unfi = pushop.repo.unfiltered()
730 735 remotephases = listkeys(pushop.remote, b'phases')
731 736
732 737 if (
733 738 pushop.ui.configbool(b'ui', b'_usedassubrepo')
734 739 and remotephases # server supports phases
735 740 and not pushop.outgoing.missing # no changesets to be pushed
736 741 and remotephases.get(b'publishing', False)
737 742 ):
738 743 # When:
739 744 # - this is a subrepo push
740 745 # - and remote support phase
741 746 # - and no changeset are to be pushed
742 747 # - and remote is publishing
743 748 # We may be in issue 3781 case!
744 749 # We drop the possible phase synchronisation done by
745 750 # courtesy to publish changesets possibly locally draft
746 751 # on the remote.
747 752 pushop.outdatedphases = []
748 753 pushop.fallbackoutdatedphases = []
749 754 return
750 755
751 756 pushop.remotephases = phases.remotephasessummary(
752 757 pushop.repo, pushop.fallbackheads, remotephases
753 758 )
754 759 droots = pushop.remotephases.draftroots
755 760
756 761 extracond = b''
757 762 if not pushop.remotephases.publishing:
758 763 extracond = b' and public()'
759 764 revset = b'heads((%%ln::%%ln) %s)' % extracond
760 765 # Get the list of all revs draft on remote by public here.
761 766 # XXX Beware that revset break if droots is not strictly
762 767 # XXX root we may want to ensure it is but it is costly
763 768 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
764 769 if not pushop.remotephases.publishing and pushop.publish:
765 770 future = list(
766 771 unfi.set(
767 772 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
768 773 )
769 774 )
770 775 elif not outgoing.missing:
771 776 future = fallback
772 777 else:
773 778 # adds changeset we are going to push as draft
774 779 #
775 780 # should not be necessary for publishing server, but because of an
776 781 # issue fixed in xxxxx we have to do it anyway.
777 782 fdroots = list(
778 783 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
779 784 )
780 785 fdroots = [f.node() for f in fdroots]
781 786 future = list(unfi.set(revset, fdroots, pushop.futureheads))
782 787 pushop.outdatedphases = future
783 788 pushop.fallbackoutdatedphases = fallback
784 789
785 790
786 791 @pushdiscovery(b'obsmarker')
787 792 def _pushdiscoveryobsmarkers(pushop):
788 793 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
789 794 return
790 795
791 796 if not pushop.repo.obsstore:
792 797 return
793 798
794 799 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
795 800 return
796 801
797 802 repo = pushop.repo
798 803 # very naive computation, that can be quite expensive on big repo.
799 804 # However: evolution is currently slow on them anyway.
800 805 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
801 806 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
802 807
803 808
804 809 @pushdiscovery(b'bookmarks')
805 810 def _pushdiscoverybookmarks(pushop):
806 811 ui = pushop.ui
807 812 repo = pushop.repo.unfiltered()
808 813 remote = pushop.remote
809 814 ui.debug(b"checking for updated bookmarks\n")
810 815 ancestors = ()
811 816 if pushop.revs:
812 817 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
813 818 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
814 819
815 820 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
816 821
817 822 explicit = {
818 823 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
819 824 }
820 825
821 826 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
822 827 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
823 828
824 829
825 830 def _processcompared(pushop, pushed, explicit, remotebms, comp):
826 831 """take decision on bookmarks to push to the remote repo
827 832
828 833 Exists to help extensions alter this behavior.
829 834 """
830 835 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
831 836
832 837 repo = pushop.repo
833 838
834 839 for b, scid, dcid in advsrc:
835 840 if b in explicit:
836 841 explicit.remove(b)
837 842 if not pushed or repo[scid].rev() in pushed:
838 843 pushop.outbookmarks.append((b, dcid, scid))
839 844 # search added bookmark
840 845 for b, scid, dcid in addsrc:
841 846 if b in explicit:
842 847 explicit.remove(b)
843 848 pushop.outbookmarks.append((b, b'', scid))
844 849 # search for overwritten bookmark
845 850 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
846 851 if b in explicit:
847 852 explicit.remove(b)
848 853 pushop.outbookmarks.append((b, dcid, scid))
849 854 # search for bookmark to delete
850 855 for b, scid, dcid in adddst:
851 856 if b in explicit:
852 857 explicit.remove(b)
853 858 # treat as "deleted locally"
854 859 pushop.outbookmarks.append((b, dcid, b''))
855 860 # identical bookmarks shouldn't get reported
856 861 for b, scid, dcid in same:
857 862 if b in explicit:
858 863 explicit.remove(b)
859 864
860 865 if explicit:
861 866 explicit = sorted(explicit)
862 867 # we should probably list all of them
863 868 pushop.ui.warn(
864 869 _(
865 870 b'bookmark %s does not exist on the local '
866 871 b'or remote repository!\n'
867 872 )
868 873 % explicit[0]
869 874 )
870 875 pushop.bkresult = 2
871 876
872 877 pushop.outbookmarks.sort()
873 878
874 879
875 880 def _pushcheckoutgoing(pushop):
876 881 outgoing = pushop.outgoing
877 882 unfi = pushop.repo.unfiltered()
878 883 if not outgoing.missing:
879 884 # nothing to push
880 885 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
881 886 return False
882 887 # something to push
883 888 if not pushop.force:
884 889 # if repo.obsstore == False --> no obsolete
885 890 # then, save the iteration
886 891 if unfi.obsstore:
887 892 # this message are here for 80 char limit reason
888 893 mso = _(b"push includes obsolete changeset: %s!")
889 894 mspd = _(b"push includes phase-divergent changeset: %s!")
890 895 mscd = _(b"push includes content-divergent changeset: %s!")
891 896 mst = {
892 897 b"orphan": _(b"push includes orphan changeset: %s!"),
893 898 b"phase-divergent": mspd,
894 899 b"content-divergent": mscd,
895 900 }
896 901 # If we are to push if there is at least one
897 902 # obsolete or unstable changeset in missing, at
898 903 # least one of the missinghead will be obsolete or
899 904 # unstable. So checking heads only is ok
900 905 for node in outgoing.missingheads:
901 906 ctx = unfi[node]
902 907 if ctx.obsolete():
903 908 raise error.Abort(mso % ctx)
904 909 elif ctx.isunstable():
905 910 # TODO print more than one instability in the abort
906 911 # message
907 912 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
908 913
909 914 discovery.checkheads(pushop)
910 915 return True
911 916
912 917
913 918 # List of names of steps to perform for an outgoing bundle2, order matters.
914 919 b2partsgenorder = []
915 920
916 921 # Mapping between step name and function
917 922 #
918 923 # This exists to help extensions wrap steps if necessary
919 924 b2partsgenmapping = {}
920 925
921 926
922 927 def b2partsgenerator(stepname, idx=None):
923 928 """decorator for function generating bundle2 part
924 929
925 930 The function is added to the step -> function mapping and appended to the
926 931 list of steps. Beware that decorated functions will be added in order
927 932 (this may matter).
928 933
929 934 You can only use this decorator for new steps, if you want to wrap a step
930 935 from an extension, attack the b2partsgenmapping dictionary directly."""
931 936
932 937 def dec(func):
933 938 assert stepname not in b2partsgenmapping
934 939 b2partsgenmapping[stepname] = func
935 940 if idx is None:
936 941 b2partsgenorder.append(stepname)
937 942 else:
938 943 b2partsgenorder.insert(idx, stepname)
939 944 return func
940 945
941 946 return dec
942 947
943 948
944 949 def _pushb2ctxcheckheads(pushop, bundler):
945 950 """Generate race condition checking parts
946 951
947 952 Exists as an independent function to aid extensions
948 953 """
949 954 # * 'force' do not check for push race,
950 955 # * if we don't push anything, there are nothing to check.
951 956 if not pushop.force and pushop.outgoing.missingheads:
952 957 allowunrelated = b'related' in bundler.capabilities.get(
953 958 b'checkheads', ()
954 959 )
955 960 emptyremote = pushop.pushbranchmap is None
956 961 if not allowunrelated or emptyremote:
957 962 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
958 963 else:
959 964 affected = set()
960 965 for branch, heads in pycompat.iteritems(pushop.pushbranchmap):
961 966 remoteheads, newheads, unsyncedheads, discardedheads = heads
962 967 if remoteheads is not None:
963 968 remote = set(remoteheads)
964 969 affected |= set(discardedheads) & remote
965 970 affected |= remote - set(newheads)
966 971 if affected:
967 972 data = iter(sorted(affected))
968 973 bundler.newpart(b'check:updated-heads', data=data)
969 974
970 975
971 976 def _pushing(pushop):
972 977 """return True if we are pushing anything"""
973 978 return bool(
974 979 pushop.outgoing.missing
975 980 or pushop.outdatedphases
976 981 or pushop.outobsmarkers
977 982 or pushop.outbookmarks
978 983 )
979 984
980 985
981 986 @b2partsgenerator(b'check-bookmarks')
982 987 def _pushb2checkbookmarks(pushop, bundler):
983 988 """insert bookmark move checking"""
984 989 if not _pushing(pushop) or pushop.force:
985 990 return
986 991 b2caps = bundle2.bundle2caps(pushop.remote)
987 992 hasbookmarkcheck = b'bookmarks' in b2caps
988 993 if not (pushop.outbookmarks and hasbookmarkcheck):
989 994 return
990 995 data = []
991 996 for book, old, new in pushop.outbookmarks:
992 997 data.append((book, old))
993 998 checkdata = bookmod.binaryencode(data)
994 999 bundler.newpart(b'check:bookmarks', data=checkdata)
995 1000
996 1001
997 1002 @b2partsgenerator(b'check-phases')
998 1003 def _pushb2checkphases(pushop, bundler):
999 1004 """insert phase move checking"""
1000 1005 if not _pushing(pushop) or pushop.force:
1001 1006 return
1002 1007 b2caps = bundle2.bundle2caps(pushop.remote)
1003 1008 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1004 1009 if pushop.remotephases is not None and hasphaseheads:
1005 1010 # check that the remote phase has not changed
1006 1011 checks = [[] for p in phases.allphases]
1007 1012 checks[phases.public].extend(pushop.remotephases.publicheads)
1008 1013 checks[phases.draft].extend(pushop.remotephases.draftroots)
1009 1014 if any(checks):
1010 1015 for nodes in checks:
1011 1016 nodes.sort()
1012 1017 checkdata = phases.binaryencode(checks)
1013 1018 bundler.newpart(b'check:phases', data=checkdata)
1014 1019
1015 1020
1016 1021 @b2partsgenerator(b'changeset')
1017 1022 def _pushb2ctx(pushop, bundler):
1018 1023 """handle changegroup push through bundle2
1019 1024
1020 1025 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
1021 1026 """
1022 1027 if b'changesets' in pushop.stepsdone:
1023 1028 return
1024 1029 pushop.stepsdone.add(b'changesets')
1025 1030 # Send known heads to the server for race detection.
1026 1031 if not _pushcheckoutgoing(pushop):
1027 1032 return
1028 1033 pushop.repo.prepushoutgoinghooks(pushop)
1029 1034
1030 1035 _pushb2ctxcheckheads(pushop, bundler)
1031 1036
1032 1037 b2caps = bundle2.bundle2caps(pushop.remote)
1033 1038 version = b'01'
1034 1039 cgversions = b2caps.get(b'changegroup')
1035 1040 if cgversions: # 3.1 and 3.2 ship with an empty value
1036 1041 cgversions = [
1037 1042 v
1038 1043 for v in cgversions
1039 1044 if v in changegroup.supportedoutgoingversions(pushop.repo)
1040 1045 ]
1041 1046 if not cgversions:
1042 1047 raise error.Abort(_(b'no common changegroup version'))
1043 1048 version = max(cgversions)
1044 1049 cgstream = changegroup.makestream(
1045 1050 pushop.repo, pushop.outgoing, version, b'push'
1046 1051 )
1047 1052 cgpart = bundler.newpart(b'changegroup', data=cgstream)
1048 1053 if cgversions:
1049 1054 cgpart.addparam(b'version', version)
1050 1055 if b'treemanifest' in pushop.repo.requirements:
1051 1056 cgpart.addparam(b'treemanifest', b'1')
1052 1057 if b'exp-sidedata-flag' in pushop.repo.requirements:
1053 1058 cgpart.addparam(b'exp-sidedata', b'1')
1054 1059
1055 1060 def handlereply(op):
1056 1061 """extract addchangegroup returns from server reply"""
1057 1062 cgreplies = op.records.getreplies(cgpart.id)
1058 1063 assert len(cgreplies[b'changegroup']) == 1
1059 1064 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
1060 1065
1061 1066 return handlereply
1062 1067
1063 1068
1064 1069 @b2partsgenerator(b'phase')
1065 1070 def _pushb2phases(pushop, bundler):
1066 1071 """handle phase push through bundle2"""
1067 1072 if b'phases' in pushop.stepsdone:
1068 1073 return
1069 1074 b2caps = bundle2.bundle2caps(pushop.remote)
1070 1075 ui = pushop.repo.ui
1071 1076
1072 1077 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1073 1078 haspushkey = b'pushkey' in b2caps
1074 1079 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1075 1080
1076 1081 if hasphaseheads and not legacyphase:
1077 1082 return _pushb2phaseheads(pushop, bundler)
1078 1083 elif haspushkey:
1079 1084 return _pushb2phasespushkey(pushop, bundler)
1080 1085
1081 1086
1082 1087 def _pushb2phaseheads(pushop, bundler):
1083 1088 """push phase information through a bundle2 - binary part"""
1084 1089 pushop.stepsdone.add(b'phases')
1085 1090 if pushop.outdatedphases:
1086 1091 updates = [[] for p in phases.allphases]
1087 1092 updates[0].extend(h.node() for h in pushop.outdatedphases)
1088 1093 phasedata = phases.binaryencode(updates)
1089 1094 bundler.newpart(b'phase-heads', data=phasedata)
1090 1095
1091 1096
1092 1097 def _pushb2phasespushkey(pushop, bundler):
1093 1098 """push phase information through a bundle2 - pushkey part"""
1094 1099 pushop.stepsdone.add(b'phases')
1095 1100 part2node = []
1096 1101
1097 1102 def handlefailure(pushop, exc):
1098 1103 targetid = int(exc.partid)
1099 1104 for partid, node in part2node:
1100 1105 if partid == targetid:
1101 1106 raise error.Abort(_(b'updating %s to public failed') % node)
1102 1107
1103 1108 enc = pushkey.encode
1104 1109 for newremotehead in pushop.outdatedphases:
1105 1110 part = bundler.newpart(b'pushkey')
1106 1111 part.addparam(b'namespace', enc(b'phases'))
1107 1112 part.addparam(b'key', enc(newremotehead.hex()))
1108 1113 part.addparam(b'old', enc(b'%d' % phases.draft))
1109 1114 part.addparam(b'new', enc(b'%d' % phases.public))
1110 1115 part2node.append((part.id, newremotehead))
1111 1116 pushop.pkfailcb[part.id] = handlefailure
1112 1117
1113 1118 def handlereply(op):
1114 1119 for partid, node in part2node:
1115 1120 partrep = op.records.getreplies(partid)
1116 1121 results = partrep[b'pushkey']
1117 1122 assert len(results) <= 1
1118 1123 msg = None
1119 1124 if not results:
1120 1125 msg = _(b'server ignored update of %s to public!\n') % node
1121 1126 elif not int(results[0][b'return']):
1122 1127 msg = _(b'updating %s to public failed!\n') % node
1123 1128 if msg is not None:
1124 1129 pushop.ui.warn(msg)
1125 1130
1126 1131 return handlereply
1127 1132
1128 1133
1129 1134 @b2partsgenerator(b'obsmarkers')
1130 1135 def _pushb2obsmarkers(pushop, bundler):
1131 1136 if b'obsmarkers' in pushop.stepsdone:
1132 1137 return
1133 1138 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1134 1139 if obsolete.commonversion(remoteversions) is None:
1135 1140 return
1136 1141 pushop.stepsdone.add(b'obsmarkers')
1137 1142 if pushop.outobsmarkers:
1138 1143 markers = sorted(pushop.outobsmarkers)
1139 1144 bundle2.buildobsmarkerspart(bundler, markers)
1140 1145
1141 1146
1142 1147 @b2partsgenerator(b'bookmarks')
1143 1148 def _pushb2bookmarks(pushop, bundler):
1144 1149 """handle bookmark push through bundle2"""
1145 1150 if b'bookmarks' in pushop.stepsdone:
1146 1151 return
1147 1152 b2caps = bundle2.bundle2caps(pushop.remote)
1148 1153
1149 1154 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1150 1155 legacybooks = b'bookmarks' in legacy
1151 1156
1152 1157 if not legacybooks and b'bookmarks' in b2caps:
1153 1158 return _pushb2bookmarkspart(pushop, bundler)
1154 1159 elif b'pushkey' in b2caps:
1155 1160 return _pushb2bookmarkspushkey(pushop, bundler)
1156 1161
1157 1162
1158 1163 def _bmaction(old, new):
1159 1164 """small utility for bookmark pushing"""
1160 1165 if not old:
1161 1166 return b'export'
1162 1167 elif not new:
1163 1168 return b'delete'
1164 1169 return b'update'
1165 1170
1166 1171
1167 1172 def _abortonsecretctx(pushop, node, b):
1168 1173 """abort if a given bookmark points to a secret changeset"""
1169 1174 if node and pushop.repo[node].phase() == phases.secret:
1170 1175 raise error.Abort(
1171 1176 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1172 1177 )
1173 1178
1174 1179
1175 1180 def _pushb2bookmarkspart(pushop, bundler):
1176 1181 pushop.stepsdone.add(b'bookmarks')
1177 1182 if not pushop.outbookmarks:
1178 1183 return
1179 1184
1180 1185 allactions = []
1181 1186 data = []
1182 1187 for book, old, new in pushop.outbookmarks:
1183 1188 _abortonsecretctx(pushop, new, book)
1184 1189 data.append((book, new))
1185 1190 allactions.append((book, _bmaction(old, new)))
1186 1191 checkdata = bookmod.binaryencode(data)
1187 1192 bundler.newpart(b'bookmarks', data=checkdata)
1188 1193
1189 1194 def handlereply(op):
1190 1195 ui = pushop.ui
1191 1196 # if success
1192 1197 for book, action in allactions:
1193 1198 ui.status(bookmsgmap[action][0] % book)
1194 1199
1195 1200 return handlereply
1196 1201
1197 1202
1198 1203 def _pushb2bookmarkspushkey(pushop, bundler):
1199 1204 pushop.stepsdone.add(b'bookmarks')
1200 1205 part2book = []
1201 1206 enc = pushkey.encode
1202 1207
1203 1208 def handlefailure(pushop, exc):
1204 1209 targetid = int(exc.partid)
1205 1210 for partid, book, action in part2book:
1206 1211 if partid == targetid:
1207 1212 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1208 1213 # we should not be called for part we did not generated
1209 1214 assert False
1210 1215
1211 1216 for book, old, new in pushop.outbookmarks:
1212 1217 _abortonsecretctx(pushop, new, book)
1213 1218 part = bundler.newpart(b'pushkey')
1214 1219 part.addparam(b'namespace', enc(b'bookmarks'))
1215 1220 part.addparam(b'key', enc(book))
1216 1221 part.addparam(b'old', enc(hex(old)))
1217 1222 part.addparam(b'new', enc(hex(new)))
1218 1223 action = b'update'
1219 1224 if not old:
1220 1225 action = b'export'
1221 1226 elif not new:
1222 1227 action = b'delete'
1223 1228 part2book.append((part.id, book, action))
1224 1229 pushop.pkfailcb[part.id] = handlefailure
1225 1230
1226 1231 def handlereply(op):
1227 1232 ui = pushop.ui
1228 1233 for partid, book, action in part2book:
1229 1234 partrep = op.records.getreplies(partid)
1230 1235 results = partrep[b'pushkey']
1231 1236 assert len(results) <= 1
1232 1237 if not results:
1233 1238 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1234 1239 else:
1235 1240 ret = int(results[0][b'return'])
1236 1241 if ret:
1237 1242 ui.status(bookmsgmap[action][0] % book)
1238 1243 else:
1239 1244 ui.warn(bookmsgmap[action][1] % book)
1240 1245 if pushop.bkresult is not None:
1241 1246 pushop.bkresult = 1
1242 1247
1243 1248 return handlereply
1244 1249
1245 1250
1246 1251 @b2partsgenerator(b'pushvars', idx=0)
1247 1252 def _getbundlesendvars(pushop, bundler):
1248 1253 '''send shellvars via bundle2'''
1249 1254 pushvars = pushop.pushvars
1250 1255 if pushvars:
1251 1256 shellvars = {}
1252 1257 for raw in pushvars:
1253 1258 if b'=' not in raw:
1254 1259 msg = (
1255 1260 b"unable to parse variable '%s', should follow "
1256 1261 b"'KEY=VALUE' or 'KEY=' format"
1257 1262 )
1258 1263 raise error.Abort(msg % raw)
1259 1264 k, v = raw.split(b'=', 1)
1260 1265 shellvars[k] = v
1261 1266
1262 1267 part = bundler.newpart(b'pushvars')
1263 1268
1264 1269 for key, value in pycompat.iteritems(shellvars):
1265 1270 part.addparam(key, value, mandatory=False)
1266 1271
1267 1272
1268 1273 def _pushbundle2(pushop):
1269 1274 """push data to the remote using bundle2
1270 1275
1271 1276 The only currently supported type of data is changegroup but this will
1272 1277 evolve in the future."""
1273 1278 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1274 1279 pushback = pushop.trmanager and pushop.ui.configbool(
1275 1280 b'experimental', b'bundle2.pushback'
1276 1281 )
1277 1282
1278 1283 # create reply capability
1279 1284 capsblob = bundle2.encodecaps(
1280 1285 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1281 1286 )
1282 1287 bundler.newpart(b'replycaps', data=capsblob)
1283 1288 replyhandlers = []
1284 1289 for partgenname in b2partsgenorder:
1285 1290 partgen = b2partsgenmapping[partgenname]
1286 1291 ret = partgen(pushop, bundler)
1287 1292 if callable(ret):
1288 1293 replyhandlers.append(ret)
1289 1294 # do not push if nothing to push
1290 1295 if bundler.nbparts <= 1:
1291 1296 return
1292 1297 stream = util.chunkbuffer(bundler.getchunks())
1293 1298 try:
1294 1299 try:
1295 1300 with pushop.remote.commandexecutor() as e:
1296 1301 reply = e.callcommand(
1297 1302 b'unbundle',
1298 1303 {
1299 1304 b'bundle': stream,
1300 1305 b'heads': [b'force'],
1301 1306 b'url': pushop.remote.url(),
1302 1307 },
1303 1308 ).result()
1304 1309 except error.BundleValueError as exc:
1305 1310 raise error.Abort(_(b'missing support for %s') % exc)
1306 1311 try:
1307 1312 trgetter = None
1308 1313 if pushback:
1309 1314 trgetter = pushop.trmanager.transaction
1310 1315 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1311 1316 except error.BundleValueError as exc:
1312 1317 raise error.Abort(_(b'missing support for %s') % exc)
1313 1318 except bundle2.AbortFromPart as exc:
1314 1319 pushop.ui.status(_(b'remote: %s\n') % exc)
1315 1320 if exc.hint is not None:
1316 1321 pushop.ui.status(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1317 1322 raise error.Abort(_(b'push failed on remote'))
1318 1323 except error.PushkeyFailed as exc:
1319 1324 partid = int(exc.partid)
1320 1325 if partid not in pushop.pkfailcb:
1321 1326 raise
1322 1327 pushop.pkfailcb[partid](pushop, exc)
1323 1328 for rephand in replyhandlers:
1324 1329 rephand(op)
1325 1330
1326 1331
1327 1332 def _pushchangeset(pushop):
1328 1333 """Make the actual push of changeset bundle to remote repo"""
1329 1334 if b'changesets' in pushop.stepsdone:
1330 1335 return
1331 1336 pushop.stepsdone.add(b'changesets')
1332 1337 if not _pushcheckoutgoing(pushop):
1333 1338 return
1334 1339
1335 1340 # Should have verified this in push().
1336 1341 assert pushop.remote.capable(b'unbundle')
1337 1342
1338 1343 pushop.repo.prepushoutgoinghooks(pushop)
1339 1344 outgoing = pushop.outgoing
1340 1345 # TODO: get bundlecaps from remote
1341 1346 bundlecaps = None
1342 1347 # create a changegroup from local
1343 1348 if pushop.revs is None and not (
1344 1349 outgoing.excluded or pushop.repo.changelog.filteredrevs
1345 1350 ):
1346 1351 # push everything,
1347 1352 # use the fast path, no race possible on push
1348 1353 cg = changegroup.makechangegroup(
1349 1354 pushop.repo,
1350 1355 outgoing,
1351 1356 b'01',
1352 1357 b'push',
1353 1358 fastpath=True,
1354 1359 bundlecaps=bundlecaps,
1355 1360 )
1356 1361 else:
1357 1362 cg = changegroup.makechangegroup(
1358 1363 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1359 1364 )
1360 1365
1361 1366 # apply changegroup to remote
1362 1367 # local repo finds heads on server, finds out what
1363 1368 # revs it must push. once revs transferred, if server
1364 1369 # finds it has different heads (someone else won
1365 1370 # commit/push race), server aborts.
1366 1371 if pushop.force:
1367 1372 remoteheads = [b'force']
1368 1373 else:
1369 1374 remoteheads = pushop.remoteheads
1370 1375 # ssh: return remote's addchangegroup()
1371 1376 # http: return remote's addchangegroup() or 0 for error
1372 1377 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1373 1378
1374 1379
1375 1380 def _pushsyncphase(pushop):
1376 1381 """synchronise phase information locally and remotely"""
1377 1382 cheads = pushop.commonheads
1378 1383 # even when we don't push, exchanging phase data is useful
1379 1384 remotephases = listkeys(pushop.remote, b'phases')
1380 1385 if (
1381 1386 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1382 1387 and remotephases # server supports phases
1383 1388 and pushop.cgresult is None # nothing was pushed
1384 1389 and remotephases.get(b'publishing', False)
1385 1390 ):
1386 1391 # When:
1387 1392 # - this is a subrepo push
1388 1393 # - and remote support phase
1389 1394 # - and no changeset was pushed
1390 1395 # - and remote is publishing
1391 1396 # We may be in issue 3871 case!
1392 1397 # We drop the possible phase synchronisation done by
1393 1398 # courtesy to publish changesets possibly locally draft
1394 1399 # on the remote.
1395 1400 remotephases = {b'publishing': b'True'}
1396 1401 if not remotephases: # old server or public only reply from non-publishing
1397 1402 _localphasemove(pushop, cheads)
1398 1403 # don't push any phase data as there is nothing to push
1399 1404 else:
1400 1405 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1401 1406 pheads, droots = ana
1402 1407 ### Apply remote phase on local
1403 1408 if remotephases.get(b'publishing', False):
1404 1409 _localphasemove(pushop, cheads)
1405 1410 else: # publish = False
1406 1411 _localphasemove(pushop, pheads)
1407 1412 _localphasemove(pushop, cheads, phases.draft)
1408 1413 ### Apply local phase on remote
1409 1414
1410 1415 if pushop.cgresult:
1411 1416 if b'phases' in pushop.stepsdone:
1412 1417 # phases already pushed though bundle2
1413 1418 return
1414 1419 outdated = pushop.outdatedphases
1415 1420 else:
1416 1421 outdated = pushop.fallbackoutdatedphases
1417 1422
1418 1423 pushop.stepsdone.add(b'phases')
1419 1424
1420 1425 # filter heads already turned public by the push
1421 1426 outdated = [c for c in outdated if c.node() not in pheads]
1422 1427 # fallback to independent pushkey command
1423 1428 for newremotehead in outdated:
1424 1429 with pushop.remote.commandexecutor() as e:
1425 1430 r = e.callcommand(
1426 1431 b'pushkey',
1427 1432 {
1428 1433 b'namespace': b'phases',
1429 1434 b'key': newremotehead.hex(),
1430 1435 b'old': b'%d' % phases.draft,
1431 1436 b'new': b'%d' % phases.public,
1432 1437 },
1433 1438 ).result()
1434 1439
1435 1440 if not r:
1436 1441 pushop.ui.warn(
1437 1442 _(b'updating %s to public failed!\n') % newremotehead
1438 1443 )
1439 1444
1440 1445
1441 1446 def _localphasemove(pushop, nodes, phase=phases.public):
1442 1447 """move <nodes> to <phase> in the local source repo"""
1443 1448 if pushop.trmanager:
1444 1449 phases.advanceboundary(
1445 1450 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1446 1451 )
1447 1452 else:
1448 1453 # repo is not locked, do not change any phases!
1449 1454 # Informs the user that phases should have been moved when
1450 1455 # applicable.
1451 1456 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1452 1457 phasestr = phases.phasenames[phase]
1453 1458 if actualmoves:
1454 1459 pushop.ui.status(
1455 1460 _(
1456 1461 b'cannot lock source repo, skipping '
1457 1462 b'local %s phase update\n'
1458 1463 )
1459 1464 % phasestr
1460 1465 )
1461 1466
1462 1467
1463 1468 def _pushobsolete(pushop):
1464 1469 """utility function to push obsolete markers to a remote"""
1465 1470 if b'obsmarkers' in pushop.stepsdone:
1466 1471 return
1467 1472 repo = pushop.repo
1468 1473 remote = pushop.remote
1469 1474 pushop.stepsdone.add(b'obsmarkers')
1470 1475 if pushop.outobsmarkers:
1471 1476 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1472 1477 rslts = []
1473 1478 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1474 1479 for key in sorted(remotedata, reverse=True):
1475 1480 # reverse sort to ensure we end with dump0
1476 1481 data = remotedata[key]
1477 1482 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1478 1483 if [r for r in rslts if not r]:
1479 1484 msg = _(b'failed to push some obsolete markers!\n')
1480 1485 repo.ui.warn(msg)
1481 1486
1482 1487
1483 1488 def _pushbookmark(pushop):
1484 1489 """Update bookmark position on remote"""
1485 1490 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1486 1491 return
1487 1492 pushop.stepsdone.add(b'bookmarks')
1488 1493 ui = pushop.ui
1489 1494 remote = pushop.remote
1490 1495
1491 1496 for b, old, new in pushop.outbookmarks:
1492 1497 action = b'update'
1493 1498 if not old:
1494 1499 action = b'export'
1495 1500 elif not new:
1496 1501 action = b'delete'
1497 1502
1498 1503 with remote.commandexecutor() as e:
1499 1504 r = e.callcommand(
1500 1505 b'pushkey',
1501 1506 {
1502 1507 b'namespace': b'bookmarks',
1503 1508 b'key': b,
1504 1509 b'old': hex(old),
1505 1510 b'new': hex(new),
1506 1511 },
1507 1512 ).result()
1508 1513
1509 1514 if r:
1510 1515 ui.status(bookmsgmap[action][0] % b)
1511 1516 else:
1512 1517 ui.warn(bookmsgmap[action][1] % b)
1513 1518 # discovery can have set the value form invalid entry
1514 1519 if pushop.bkresult is not None:
1515 1520 pushop.bkresult = 1
1516 1521
1517 1522
1518 1523 class pulloperation(object):
1519 1524 """A object that represent a single pull operation
1520 1525
1521 1526 It purpose is to carry pull related state and very common operation.
1522 1527
1523 1528 A new should be created at the beginning of each pull and discarded
1524 1529 afterward.
1525 1530 """
1526 1531
1527 1532 def __init__(
1528 1533 self,
1529 1534 repo,
1530 1535 remote,
1531 1536 heads=None,
1532 1537 force=False,
1533 1538 bookmarks=(),
1534 1539 remotebookmarks=None,
1535 1540 streamclonerequested=None,
1536 1541 includepats=None,
1537 1542 excludepats=None,
1538 1543 depth=None,
1539 1544 ):
1540 1545 # repo we pull into
1541 1546 self.repo = repo
1542 1547 # repo we pull from
1543 1548 self.remote = remote
1544 1549 # revision we try to pull (None is "all")
1545 1550 self.heads = heads
1546 1551 # bookmark pulled explicitly
1547 1552 self.explicitbookmarks = [
1548 1553 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1549 1554 ]
1550 1555 # do we force pull?
1551 1556 self.force = force
1552 1557 # whether a streaming clone was requested
1553 1558 self.streamclonerequested = streamclonerequested
1554 1559 # transaction manager
1555 1560 self.trmanager = None
1556 1561 # set of common changeset between local and remote before pull
1557 1562 self.common = None
1558 1563 # set of pulled head
1559 1564 self.rheads = None
1560 1565 # list of missing changeset to fetch remotely
1561 1566 self.fetch = None
1562 1567 # remote bookmarks data
1563 1568 self.remotebookmarks = remotebookmarks
1564 1569 # result of changegroup pulling (used as return code by pull)
1565 1570 self.cgresult = None
1566 1571 # list of step already done
1567 1572 self.stepsdone = set()
1568 1573 # Whether we attempted a clone from pre-generated bundles.
1569 1574 self.clonebundleattempted = False
1570 1575 # Set of file patterns to include.
1571 1576 self.includepats = includepats
1572 1577 # Set of file patterns to exclude.
1573 1578 self.excludepats = excludepats
1574 1579 # Number of ancestor changesets to pull from each pulled head.
1575 1580 self.depth = depth
1576 1581
1577 1582 @util.propertycache
1578 1583 def pulledsubset(self):
1579 1584 """heads of the set of changeset target by the pull"""
1580 1585 # compute target subset
1581 1586 if self.heads is None:
1582 1587 # We pulled every thing possible
1583 1588 # sync on everything common
1584 1589 c = set(self.common)
1585 1590 ret = list(self.common)
1586 1591 for n in self.rheads:
1587 1592 if n not in c:
1588 1593 ret.append(n)
1589 1594 return ret
1590 1595 else:
1591 1596 # We pulled a specific subset
1592 1597 # sync on this subset
1593 1598 return self.heads
1594 1599
1595 1600 @util.propertycache
1596 1601 def canusebundle2(self):
1597 1602 return not _forcebundle1(self)
1598 1603
1599 1604 @util.propertycache
1600 1605 def remotebundle2caps(self):
1601 1606 return bundle2.bundle2caps(self.remote)
1602 1607
1603 1608 def gettransaction(self):
1604 1609 # deprecated; talk to trmanager directly
1605 1610 return self.trmanager.transaction()
1606 1611
1607 1612
1608 1613 class transactionmanager(util.transactional):
1609 1614 """An object to manage the life cycle of a transaction
1610 1615
1611 1616 It creates the transaction on demand and calls the appropriate hooks when
1612 1617 closing the transaction."""
1613 1618
1614 1619 def __init__(self, repo, source, url):
1615 1620 self.repo = repo
1616 1621 self.source = source
1617 1622 self.url = url
1618 1623 self._tr = None
1619 1624
1620 1625 def transaction(self):
1621 1626 """Return an open transaction object, constructing if necessary"""
1622 1627 if not self._tr:
1623 1628 trname = b'%s\n%s' % (self.source, util.hidepassword(self.url))
1624 1629 self._tr = self.repo.transaction(trname)
1625 1630 self._tr.hookargs[b'source'] = self.source
1626 1631 self._tr.hookargs[b'url'] = self.url
1627 1632 return self._tr
1628 1633
1629 1634 def close(self):
1630 1635 """close transaction if created"""
1631 1636 if self._tr is not None:
1632 1637 self._tr.close()
1633 1638
1634 1639 def release(self):
1635 1640 """release transaction if created"""
1636 1641 if self._tr is not None:
1637 1642 self._tr.release()
1638 1643
1639 1644
1640 1645 def listkeys(remote, namespace):
1641 1646 with remote.commandexecutor() as e:
1642 1647 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1643 1648
1644 1649
1645 1650 def _fullpullbundle2(repo, pullop):
1646 1651 # The server may send a partial reply, i.e. when inlining
1647 1652 # pre-computed bundles. In that case, update the common
1648 1653 # set based on the results and pull another bundle.
1649 1654 #
1650 1655 # There are two indicators that the process is finished:
1651 1656 # - no changeset has been added, or
1652 1657 # - all remote heads are known locally.
1653 1658 # The head check must use the unfiltered view as obsoletion
1654 1659 # markers can hide heads.
1655 1660 unfi = repo.unfiltered()
1656 1661 unficl = unfi.changelog
1657 1662
1658 1663 def headsofdiff(h1, h2):
1659 1664 """Returns heads(h1 % h2)"""
1660 1665 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1661 1666 return set(ctx.node() for ctx in res)
1662 1667
1663 1668 def headsofunion(h1, h2):
1664 1669 """Returns heads((h1 + h2) - null)"""
1665 1670 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1666 1671 return set(ctx.node() for ctx in res)
1667 1672
1668 1673 while True:
1669 1674 old_heads = unficl.heads()
1670 1675 clstart = len(unficl)
1671 1676 _pullbundle2(pullop)
1672 1677 if repository.NARROW_REQUIREMENT in repo.requirements:
1673 1678 # XXX narrow clones filter the heads on the server side during
1674 1679 # XXX getbundle and result in partial replies as well.
1675 1680 # XXX Disable pull bundles in this case as band aid to avoid
1676 1681 # XXX extra round trips.
1677 1682 break
1678 1683 if clstart == len(unficl):
1679 1684 break
1680 1685 if all(unficl.hasnode(n) for n in pullop.rheads):
1681 1686 break
1682 1687 new_heads = headsofdiff(unficl.heads(), old_heads)
1683 1688 pullop.common = headsofunion(new_heads, pullop.common)
1684 1689 pullop.rheads = set(pullop.rheads) - pullop.common
1685 1690
1686 1691
1687 1692 def pull(
1688 1693 repo,
1689 1694 remote,
1690 1695 heads=None,
1691 1696 force=False,
1692 1697 bookmarks=(),
1693 1698 opargs=None,
1694 1699 streamclonerequested=None,
1695 1700 includepats=None,
1696 1701 excludepats=None,
1697 1702 depth=None,
1698 1703 ):
1699 1704 """Fetch repository data from a remote.
1700 1705
1701 1706 This is the main function used to retrieve data from a remote repository.
1702 1707
1703 1708 ``repo`` is the local repository to clone into.
1704 1709 ``remote`` is a peer instance.
1705 1710 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1706 1711 default) means to pull everything from the remote.
1707 1712 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1708 1713 default, all remote bookmarks are pulled.
1709 1714 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1710 1715 initialization.
1711 1716 ``streamclonerequested`` is a boolean indicating whether a "streaming
1712 1717 clone" is requested. A "streaming clone" is essentially a raw file copy
1713 1718 of revlogs from the server. This only works when the local repository is
1714 1719 empty. The default value of ``None`` means to respect the server
1715 1720 configuration for preferring stream clones.
1716 1721 ``includepats`` and ``excludepats`` define explicit file patterns to
1717 1722 include and exclude in storage, respectively. If not defined, narrow
1718 1723 patterns from the repo instance are used, if available.
1719 1724 ``depth`` is an integer indicating the DAG depth of history we're
1720 1725 interested in. If defined, for each revision specified in ``heads``, we
1721 1726 will fetch up to this many of its ancestors and data associated with them.
1722 1727
1723 1728 Returns the ``pulloperation`` created for this pull.
1724 1729 """
1725 1730 if opargs is None:
1726 1731 opargs = {}
1727 1732
1728 1733 # We allow the narrow patterns to be passed in explicitly to provide more
1729 1734 # flexibility for API consumers.
1730 1735 if includepats or excludepats:
1731 1736 includepats = includepats or set()
1732 1737 excludepats = excludepats or set()
1733 1738 else:
1734 1739 includepats, excludepats = repo.narrowpats
1735 1740
1736 1741 narrowspec.validatepatterns(includepats)
1737 1742 narrowspec.validatepatterns(excludepats)
1738 1743
1739 1744 pullop = pulloperation(
1740 1745 repo,
1741 1746 remote,
1742 1747 heads,
1743 1748 force,
1744 1749 bookmarks=bookmarks,
1745 1750 streamclonerequested=streamclonerequested,
1746 1751 includepats=includepats,
1747 1752 excludepats=excludepats,
1748 1753 depth=depth,
1749 1754 **pycompat.strkwargs(opargs)
1750 1755 )
1751 1756
1752 1757 peerlocal = pullop.remote.local()
1753 1758 if peerlocal:
1754 1759 missing = set(peerlocal.requirements) - pullop.repo.supported
1755 1760 if missing:
1756 1761 msg = _(
1757 1762 b"required features are not"
1758 1763 b" supported in the destination:"
1759 1764 b" %s"
1760 1765 ) % (b', '.join(sorted(missing)))
1761 1766 raise error.Abort(msg)
1762 1767
1763 1768 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1764 1769 wlock = util.nullcontextmanager()
1765 1770 if not bookmod.bookmarksinstore(repo):
1766 1771 wlock = repo.wlock()
1767 1772 with wlock, repo.lock(), pullop.trmanager:
1768 1773 # Use the modern wire protocol, if available.
1769 1774 if remote.capable(b'command-changesetdata'):
1770 1775 exchangev2.pull(pullop)
1771 1776 else:
1772 1777 # This should ideally be in _pullbundle2(). However, it needs to run
1773 1778 # before discovery to avoid extra work.
1774 1779 _maybeapplyclonebundle(pullop)
1775 1780 streamclone.maybeperformlegacystreamclone(pullop)
1776 1781 _pulldiscovery(pullop)
1777 1782 if pullop.canusebundle2:
1778 1783 _fullpullbundle2(repo, pullop)
1779 1784 _pullchangeset(pullop)
1780 1785 _pullphase(pullop)
1781 1786 _pullbookmarks(pullop)
1782 1787 _pullobsolete(pullop)
1783 1788
1784 1789 # storing remotenames
1785 1790 if repo.ui.configbool(b'experimental', b'remotenames'):
1786 1791 logexchange.pullremotenames(repo, remote)
1787 1792
1788 1793 return pullop
1789 1794
1790 1795
1791 1796 # list of steps to perform discovery before pull
1792 1797 pulldiscoveryorder = []
1793 1798
1794 1799 # Mapping between step name and function
1795 1800 #
1796 1801 # This exists to help extensions wrap steps if necessary
1797 1802 pulldiscoverymapping = {}
1798 1803
1799 1804
1800 1805 def pulldiscovery(stepname):
1801 1806 """decorator for function performing discovery before pull
1802 1807
1803 1808 The function is added to the step -> function mapping and appended to the
1804 1809 list of steps. Beware that decorated function will be added in order (this
1805 1810 may matter).
1806 1811
1807 1812 You can only use this decorator for a new step, if you want to wrap a step
1808 1813 from an extension, change the pulldiscovery dictionary directly."""
1809 1814
1810 1815 def dec(func):
1811 1816 assert stepname not in pulldiscoverymapping
1812 1817 pulldiscoverymapping[stepname] = func
1813 1818 pulldiscoveryorder.append(stepname)
1814 1819 return func
1815 1820
1816 1821 return dec
1817 1822
1818 1823
1819 1824 def _pulldiscovery(pullop):
1820 1825 """Run all discovery steps"""
1821 1826 for stepname in pulldiscoveryorder:
1822 1827 step = pulldiscoverymapping[stepname]
1823 1828 step(pullop)
1824 1829
1825 1830
1826 1831 @pulldiscovery(b'b1:bookmarks')
1827 1832 def _pullbookmarkbundle1(pullop):
1828 1833 """fetch bookmark data in bundle1 case
1829 1834
1830 1835 If not using bundle2, we have to fetch bookmarks before changeset
1831 1836 discovery to reduce the chance and impact of race conditions."""
1832 1837 if pullop.remotebookmarks is not None:
1833 1838 return
1834 1839 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1835 1840 # all known bundle2 servers now support listkeys, but lets be nice with
1836 1841 # new implementation.
1837 1842 return
1838 1843 books = listkeys(pullop.remote, b'bookmarks')
1839 1844 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1840 1845
1841 1846
1842 1847 @pulldiscovery(b'changegroup')
1843 1848 def _pulldiscoverychangegroup(pullop):
1844 1849 """discovery phase for the pull
1845 1850
1846 1851 Current handle changeset discovery only, will change handle all discovery
1847 1852 at some point."""
1848 1853 tmp = discovery.findcommonincoming(
1849 1854 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1850 1855 )
1851 1856 common, fetch, rheads = tmp
1852 1857 nm = pullop.repo.unfiltered().changelog.nodemap
1853 1858 if fetch and rheads:
1854 1859 # If a remote heads is filtered locally, put in back in common.
1855 1860 #
1856 1861 # This is a hackish solution to catch most of "common but locally
1857 1862 # hidden situation". We do not performs discovery on unfiltered
1858 1863 # repository because it end up doing a pathological amount of round
1859 1864 # trip for w huge amount of changeset we do not care about.
1860 1865 #
1861 1866 # If a set of such "common but filtered" changeset exist on the server
1862 1867 # but are not including a remote heads, we'll not be able to detect it,
1863 1868 scommon = set(common)
1864 1869 for n in rheads:
1865 1870 if n in nm:
1866 1871 if n not in scommon:
1867 1872 common.append(n)
1868 1873 if set(rheads).issubset(set(common)):
1869 1874 fetch = []
1870 1875 pullop.common = common
1871 1876 pullop.fetch = fetch
1872 1877 pullop.rheads = rheads
1873 1878
1874 1879
1875 1880 def _pullbundle2(pullop):
1876 1881 """pull data using bundle2
1877 1882
1878 1883 For now, the only supported data are changegroup."""
1879 1884 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1880 1885
1881 1886 # make ui easier to access
1882 1887 ui = pullop.repo.ui
1883 1888
1884 1889 # At the moment we don't do stream clones over bundle2. If that is
1885 1890 # implemented then here's where the check for that will go.
1886 1891 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1887 1892
1888 1893 # declare pull perimeters
1889 1894 kwargs[b'common'] = pullop.common
1890 1895 kwargs[b'heads'] = pullop.heads or pullop.rheads
1891 1896
1892 1897 # check server supports narrow and then adding includepats and excludepats
1893 1898 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1894 1899 if servernarrow and pullop.includepats:
1895 1900 kwargs[b'includepats'] = pullop.includepats
1896 1901 if servernarrow and pullop.excludepats:
1897 1902 kwargs[b'excludepats'] = pullop.excludepats
1898 1903
1899 1904 if streaming:
1900 1905 kwargs[b'cg'] = False
1901 1906 kwargs[b'stream'] = True
1902 1907 pullop.stepsdone.add(b'changegroup')
1903 1908 pullop.stepsdone.add(b'phases')
1904 1909
1905 1910 else:
1906 1911 # pulling changegroup
1907 1912 pullop.stepsdone.add(b'changegroup')
1908 1913
1909 1914 kwargs[b'cg'] = pullop.fetch
1910 1915
1911 1916 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1912 1917 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1913 1918 if not legacyphase and hasbinaryphase:
1914 1919 kwargs[b'phases'] = True
1915 1920 pullop.stepsdone.add(b'phases')
1916 1921
1917 1922 if b'listkeys' in pullop.remotebundle2caps:
1918 1923 if b'phases' not in pullop.stepsdone:
1919 1924 kwargs[b'listkeys'] = [b'phases']
1920 1925
1921 1926 bookmarksrequested = False
1922 1927 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1923 1928 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1924 1929
1925 1930 if pullop.remotebookmarks is not None:
1926 1931 pullop.stepsdone.add(b'request-bookmarks')
1927 1932
1928 1933 if (
1929 1934 b'request-bookmarks' not in pullop.stepsdone
1930 1935 and pullop.remotebookmarks is None
1931 1936 and not legacybookmark
1932 1937 and hasbinarybook
1933 1938 ):
1934 1939 kwargs[b'bookmarks'] = True
1935 1940 bookmarksrequested = True
1936 1941
1937 1942 if b'listkeys' in pullop.remotebundle2caps:
1938 1943 if b'request-bookmarks' not in pullop.stepsdone:
1939 1944 # make sure to always includes bookmark data when migrating
1940 1945 # `hg incoming --bundle` to using this function.
1941 1946 pullop.stepsdone.add(b'request-bookmarks')
1942 1947 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1943 1948
1944 1949 # If this is a full pull / clone and the server supports the clone bundles
1945 1950 # feature, tell the server whether we attempted a clone bundle. The
1946 1951 # presence of this flag indicates the client supports clone bundles. This
1947 1952 # will enable the server to treat clients that support clone bundles
1948 1953 # differently from those that don't.
1949 1954 if (
1950 1955 pullop.remote.capable(b'clonebundles')
1951 1956 and pullop.heads is None
1952 1957 and list(pullop.common) == [nullid]
1953 1958 ):
1954 1959 kwargs[b'cbattempted'] = pullop.clonebundleattempted
1955 1960
1956 1961 if streaming:
1957 1962 pullop.repo.ui.status(_(b'streaming all changes\n'))
1958 1963 elif not pullop.fetch:
1959 1964 pullop.repo.ui.status(_(b"no changes found\n"))
1960 1965 pullop.cgresult = 0
1961 1966 else:
1962 1967 if pullop.heads is None and list(pullop.common) == [nullid]:
1963 1968 pullop.repo.ui.status(_(b"requesting all changes\n"))
1964 1969 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1965 1970 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1966 1971 if obsolete.commonversion(remoteversions) is not None:
1967 1972 kwargs[b'obsmarkers'] = True
1968 1973 pullop.stepsdone.add(b'obsmarkers')
1969 1974 _pullbundle2extraprepare(pullop, kwargs)
1970 1975
1971 1976 with pullop.remote.commandexecutor() as e:
1972 1977 args = dict(kwargs)
1973 1978 args[b'source'] = b'pull'
1974 1979 bundle = e.callcommand(b'getbundle', args).result()
1975 1980
1976 1981 try:
1977 1982 op = bundle2.bundleoperation(
1978 1983 pullop.repo, pullop.gettransaction, source=b'pull'
1979 1984 )
1980 1985 op.modes[b'bookmarks'] = b'records'
1981 1986 bundle2.processbundle(pullop.repo, bundle, op=op)
1982 1987 except bundle2.AbortFromPart as exc:
1983 1988 pullop.repo.ui.status(_(b'remote: abort: %s\n') % exc)
1984 1989 raise error.Abort(_(b'pull failed on remote'), hint=exc.hint)
1985 1990 except error.BundleValueError as exc:
1986 1991 raise error.Abort(_(b'missing support for %s') % exc)
1987 1992
1988 1993 if pullop.fetch:
1989 1994 pullop.cgresult = bundle2.combinechangegroupresults(op)
1990 1995
1991 1996 # processing phases change
1992 1997 for namespace, value in op.records[b'listkeys']:
1993 1998 if namespace == b'phases':
1994 1999 _pullapplyphases(pullop, value)
1995 2000
1996 2001 # processing bookmark update
1997 2002 if bookmarksrequested:
1998 2003 books = {}
1999 2004 for record in op.records[b'bookmarks']:
2000 2005 books[record[b'bookmark']] = record[b"node"]
2001 2006 pullop.remotebookmarks = books
2002 2007 else:
2003 2008 for namespace, value in op.records[b'listkeys']:
2004 2009 if namespace == b'bookmarks':
2005 2010 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
2006 2011
2007 2012 # bookmark data were either already there or pulled in the bundle
2008 2013 if pullop.remotebookmarks is not None:
2009 2014 _pullbookmarks(pullop)
2010 2015
2011 2016
2012 2017 def _pullbundle2extraprepare(pullop, kwargs):
2013 2018 """hook function so that extensions can extend the getbundle call"""
2014 2019
2015 2020
2016 2021 def _pullchangeset(pullop):
2017 2022 """pull changeset from unbundle into the local repo"""
2018 2023 # We delay the open of the transaction as late as possible so we
2019 2024 # don't open transaction for nothing or you break future useful
2020 2025 # rollback call
2021 2026 if b'changegroup' in pullop.stepsdone:
2022 2027 return
2023 2028 pullop.stepsdone.add(b'changegroup')
2024 2029 if not pullop.fetch:
2025 2030 pullop.repo.ui.status(_(b"no changes found\n"))
2026 2031 pullop.cgresult = 0
2027 2032 return
2028 2033 tr = pullop.gettransaction()
2029 2034 if pullop.heads is None and list(pullop.common) == [nullid]:
2030 2035 pullop.repo.ui.status(_(b"requesting all changes\n"))
2031 2036 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
2032 2037 # issue1320, avoid a race if remote changed after discovery
2033 2038 pullop.heads = pullop.rheads
2034 2039
2035 2040 if pullop.remote.capable(b'getbundle'):
2036 2041 # TODO: get bundlecaps from remote
2037 2042 cg = pullop.remote.getbundle(
2038 2043 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
2039 2044 )
2040 2045 elif pullop.heads is None:
2041 2046 with pullop.remote.commandexecutor() as e:
2042 2047 cg = e.callcommand(
2043 2048 b'changegroup', {b'nodes': pullop.fetch, b'source': b'pull',}
2044 2049 ).result()
2045 2050
2046 2051 elif not pullop.remote.capable(b'changegroupsubset'):
2047 2052 raise error.Abort(
2048 2053 _(
2049 2054 b"partial pull cannot be done because "
2050 2055 b"other repository doesn't support "
2051 2056 b"changegroupsubset."
2052 2057 )
2053 2058 )
2054 2059 else:
2055 2060 with pullop.remote.commandexecutor() as e:
2056 2061 cg = e.callcommand(
2057 2062 b'changegroupsubset',
2058 2063 {
2059 2064 b'bases': pullop.fetch,
2060 2065 b'heads': pullop.heads,
2061 2066 b'source': b'pull',
2062 2067 },
2063 2068 ).result()
2064 2069
2065 2070 bundleop = bundle2.applybundle(
2066 2071 pullop.repo, cg, tr, b'pull', pullop.remote.url()
2067 2072 )
2068 2073 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
2069 2074
2070 2075
2071 2076 def _pullphase(pullop):
2072 2077 # Get remote phases data from remote
2073 2078 if b'phases' in pullop.stepsdone:
2074 2079 return
2075 2080 remotephases = listkeys(pullop.remote, b'phases')
2076 2081 _pullapplyphases(pullop, remotephases)
2077 2082
2078 2083
2079 2084 def _pullapplyphases(pullop, remotephases):
2080 2085 """apply phase movement from observed remote state"""
2081 2086 if b'phases' in pullop.stepsdone:
2082 2087 return
2083 2088 pullop.stepsdone.add(b'phases')
2084 2089 publishing = bool(remotephases.get(b'publishing', False))
2085 2090 if remotephases and not publishing:
2086 2091 # remote is new and non-publishing
2087 2092 pheads, _dr = phases.analyzeremotephases(
2088 2093 pullop.repo, pullop.pulledsubset, remotephases
2089 2094 )
2090 2095 dheads = pullop.pulledsubset
2091 2096 else:
2092 2097 # Remote is old or publishing all common changesets
2093 2098 # should be seen as public
2094 2099 pheads = pullop.pulledsubset
2095 2100 dheads = []
2096 2101 unfi = pullop.repo.unfiltered()
2097 2102 phase = unfi._phasecache.phase
2098 2103 rev = unfi.changelog.nodemap.get
2099 2104 public = phases.public
2100 2105 draft = phases.draft
2101 2106
2102 2107 # exclude changesets already public locally and update the others
2103 2108 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2104 2109 if pheads:
2105 2110 tr = pullop.gettransaction()
2106 2111 phases.advanceboundary(pullop.repo, tr, public, pheads)
2107 2112
2108 2113 # exclude changesets already draft locally and update the others
2109 2114 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2110 2115 if dheads:
2111 2116 tr = pullop.gettransaction()
2112 2117 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2113 2118
2114 2119
2115 2120 def _pullbookmarks(pullop):
2116 2121 """process the remote bookmark information to update the local one"""
2117 2122 if b'bookmarks' in pullop.stepsdone:
2118 2123 return
2119 2124 pullop.stepsdone.add(b'bookmarks')
2120 2125 repo = pullop.repo
2121 2126 remotebookmarks = pullop.remotebookmarks
2122 2127 bookmod.updatefromremote(
2123 2128 repo.ui,
2124 2129 repo,
2125 2130 remotebookmarks,
2126 2131 pullop.remote.url(),
2127 2132 pullop.gettransaction,
2128 2133 explicit=pullop.explicitbookmarks,
2129 2134 )
2130 2135
2131 2136
2132 2137 def _pullobsolete(pullop):
2133 2138 """utility function to pull obsolete markers from a remote
2134 2139
2135 2140 The `gettransaction` is function that return the pull transaction, creating
2136 2141 one if necessary. We return the transaction to inform the calling code that
2137 2142 a new transaction have been created (when applicable).
2138 2143
2139 2144 Exists mostly to allow overriding for experimentation purpose"""
2140 2145 if b'obsmarkers' in pullop.stepsdone:
2141 2146 return
2142 2147 pullop.stepsdone.add(b'obsmarkers')
2143 2148 tr = None
2144 2149 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2145 2150 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2146 2151 remoteobs = listkeys(pullop.remote, b'obsolete')
2147 2152 if b'dump0' in remoteobs:
2148 2153 tr = pullop.gettransaction()
2149 2154 markers = []
2150 2155 for key in sorted(remoteobs, reverse=True):
2151 2156 if key.startswith(b'dump'):
2152 2157 data = util.b85decode(remoteobs[key])
2153 2158 version, newmarks = obsolete._readmarkers(data)
2154 2159 markers += newmarks
2155 2160 if markers:
2156 2161 pullop.repo.obsstore.add(tr, markers)
2157 2162 pullop.repo.invalidatevolatilesets()
2158 2163 return tr
2159 2164
2160 2165
2161 2166 def applynarrowacl(repo, kwargs):
2162 2167 """Apply narrow fetch access control.
2163 2168
2164 2169 This massages the named arguments for getbundle wire protocol commands
2165 2170 so requested data is filtered through access control rules.
2166 2171 """
2167 2172 ui = repo.ui
2168 2173 # TODO this assumes existence of HTTP and is a layering violation.
2169 2174 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2170 2175 user_includes = ui.configlist(
2171 2176 _NARROWACL_SECTION,
2172 2177 username + b'.includes',
2173 2178 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2174 2179 )
2175 2180 user_excludes = ui.configlist(
2176 2181 _NARROWACL_SECTION,
2177 2182 username + b'.excludes',
2178 2183 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2179 2184 )
2180 2185 if not user_includes:
2181 2186 raise error.Abort(
2182 2187 _(b"{} configuration for user {} is empty").format(
2183 2188 _NARROWACL_SECTION, username
2184 2189 )
2185 2190 )
2186 2191
2187 2192 user_includes = [
2188 2193 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2189 2194 ]
2190 2195 user_excludes = [
2191 2196 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2192 2197 ]
2193 2198
2194 2199 req_includes = set(kwargs.get(r'includepats', []))
2195 2200 req_excludes = set(kwargs.get(r'excludepats', []))
2196 2201
2197 2202 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2198 2203 req_includes, req_excludes, user_includes, user_excludes
2199 2204 )
2200 2205
2201 2206 if invalid_includes:
2202 2207 raise error.Abort(
2203 2208 _(b"The following includes are not accessible for {}: {}").format(
2204 2209 username, invalid_includes
2205 2210 )
2206 2211 )
2207 2212
2208 2213 new_args = {}
2209 2214 new_args.update(kwargs)
2210 2215 new_args[r'narrow'] = True
2211 2216 new_args[r'narrow_acl'] = True
2212 2217 new_args[r'includepats'] = req_includes
2213 2218 if req_excludes:
2214 2219 new_args[r'excludepats'] = req_excludes
2215 2220
2216 2221 return new_args
2217 2222
2218 2223
2219 2224 def _computeellipsis(repo, common, heads, known, match, depth=None):
2220 2225 """Compute the shape of a narrowed DAG.
2221 2226
2222 2227 Args:
2223 2228 repo: The repository we're transferring.
2224 2229 common: The roots of the DAG range we're transferring.
2225 2230 May be just [nullid], which means all ancestors of heads.
2226 2231 heads: The heads of the DAG range we're transferring.
2227 2232 match: The narrowmatcher that allows us to identify relevant changes.
2228 2233 depth: If not None, only consider nodes to be full nodes if they are at
2229 2234 most depth changesets away from one of heads.
2230 2235
2231 2236 Returns:
2232 2237 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2233 2238
2234 2239 visitnodes: The list of nodes (either full or ellipsis) which
2235 2240 need to be sent to the client.
2236 2241 relevant_nodes: The set of changelog nodes which change a file inside
2237 2242 the narrowspec. The client needs these as non-ellipsis nodes.
2238 2243 ellipsisroots: A dict of {rev: parents} that is used in
2239 2244 narrowchangegroup to produce ellipsis nodes with the
2240 2245 correct parents.
2241 2246 """
2242 2247 cl = repo.changelog
2243 2248 mfl = repo.manifestlog
2244 2249
2245 2250 clrev = cl.rev
2246 2251
2247 2252 commonrevs = {clrev(n) for n in common} | {nullrev}
2248 2253 headsrevs = {clrev(n) for n in heads}
2249 2254
2250 2255 if depth:
2251 2256 revdepth = {h: 0 for h in headsrevs}
2252 2257
2253 2258 ellipsisheads = collections.defaultdict(set)
2254 2259 ellipsisroots = collections.defaultdict(set)
2255 2260
2256 2261 def addroot(head, curchange):
2257 2262 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2258 2263 ellipsisroots[head].add(curchange)
2259 2264 # Recursively split ellipsis heads with 3 roots by finding the
2260 2265 # roots' youngest common descendant which is an elided merge commit.
2261 2266 # That descendant takes 2 of the 3 roots as its own, and becomes a
2262 2267 # root of the head.
2263 2268 while len(ellipsisroots[head]) > 2:
2264 2269 child, roots = splithead(head)
2265 2270 splitroots(head, child, roots)
2266 2271 head = child # Recurse in case we just added a 3rd root
2267 2272
2268 2273 def splitroots(head, child, roots):
2269 2274 ellipsisroots[head].difference_update(roots)
2270 2275 ellipsisroots[head].add(child)
2271 2276 ellipsisroots[child].update(roots)
2272 2277 ellipsisroots[child].discard(child)
2273 2278
2274 2279 def splithead(head):
2275 2280 r1, r2, r3 = sorted(ellipsisroots[head])
2276 2281 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2277 2282 mid = repo.revs(
2278 2283 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2279 2284 )
2280 2285 for j in mid:
2281 2286 if j == nr2:
2282 2287 return nr2, (nr1, nr2)
2283 2288 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2284 2289 return j, (nr1, nr2)
2285 2290 raise error.Abort(
2286 2291 _(
2287 2292 b'Failed to split up ellipsis node! head: %d, '
2288 2293 b'roots: %d %d %d'
2289 2294 )
2290 2295 % (head, r1, r2, r3)
2291 2296 )
2292 2297
2293 2298 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2294 2299 visit = reversed(missing)
2295 2300 relevant_nodes = set()
2296 2301 visitnodes = [cl.node(m) for m in missing]
2297 2302 required = set(headsrevs) | known
2298 2303 for rev in visit:
2299 2304 clrev = cl.changelogrevision(rev)
2300 2305 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2301 2306 if depth is not None:
2302 2307 curdepth = revdepth[rev]
2303 2308 for p in ps:
2304 2309 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2305 2310 needed = False
2306 2311 shallow_enough = depth is None or revdepth[rev] <= depth
2307 2312 if shallow_enough:
2308 2313 curmf = mfl[clrev.manifest].read()
2309 2314 if ps:
2310 2315 # We choose to not trust the changed files list in
2311 2316 # changesets because it's not always correct. TODO: could
2312 2317 # we trust it for the non-merge case?
2313 2318 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2314 2319 needed = bool(curmf.diff(p1mf, match))
2315 2320 if not needed and len(ps) > 1:
2316 2321 # For merge changes, the list of changed files is not
2317 2322 # helpful, since we need to emit the merge if a file
2318 2323 # in the narrow spec has changed on either side of the
2319 2324 # merge. As a result, we do a manifest diff to check.
2320 2325 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2321 2326 needed = bool(curmf.diff(p2mf, match))
2322 2327 else:
2323 2328 # For a root node, we need to include the node if any
2324 2329 # files in the node match the narrowspec.
2325 2330 needed = any(curmf.walk(match))
2326 2331
2327 2332 if needed:
2328 2333 for head in ellipsisheads[rev]:
2329 2334 addroot(head, rev)
2330 2335 for p in ps:
2331 2336 required.add(p)
2332 2337 relevant_nodes.add(cl.node(rev))
2333 2338 else:
2334 2339 if not ps:
2335 2340 ps = [nullrev]
2336 2341 if rev in required:
2337 2342 for head in ellipsisheads[rev]:
2338 2343 addroot(head, rev)
2339 2344 for p in ps:
2340 2345 ellipsisheads[p].add(rev)
2341 2346 else:
2342 2347 for p in ps:
2343 2348 ellipsisheads[p] |= ellipsisheads[rev]
2344 2349
2345 2350 # add common changesets as roots of their reachable ellipsis heads
2346 2351 for c in commonrevs:
2347 2352 for head in ellipsisheads[c]:
2348 2353 addroot(head, c)
2349 2354 return visitnodes, relevant_nodes, ellipsisroots
2350 2355
2351 2356
2352 2357 def caps20to10(repo, role):
2353 2358 """return a set with appropriate options to use bundle20 during getbundle"""
2354 2359 caps = {b'HG20'}
2355 2360 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2356 2361 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2357 2362 return caps
2358 2363
2359 2364
2360 2365 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2361 2366 getbundle2partsorder = []
2362 2367
2363 2368 # Mapping between step name and function
2364 2369 #
2365 2370 # This exists to help extensions wrap steps if necessary
2366 2371 getbundle2partsmapping = {}
2367 2372
2368 2373
2369 2374 def getbundle2partsgenerator(stepname, idx=None):
2370 2375 """decorator for function generating bundle2 part for getbundle
2371 2376
2372 2377 The function is added to the step -> function mapping and appended to the
2373 2378 list of steps. Beware that decorated functions will be added in order
2374 2379 (this may matter).
2375 2380
2376 2381 You can only use this decorator for new steps, if you want to wrap a step
2377 2382 from an extension, attack the getbundle2partsmapping dictionary directly."""
2378 2383
2379 2384 def dec(func):
2380 2385 assert stepname not in getbundle2partsmapping
2381 2386 getbundle2partsmapping[stepname] = func
2382 2387 if idx is None:
2383 2388 getbundle2partsorder.append(stepname)
2384 2389 else:
2385 2390 getbundle2partsorder.insert(idx, stepname)
2386 2391 return func
2387 2392
2388 2393 return dec
2389 2394
2390 2395
2391 2396 def bundle2requested(bundlecaps):
2392 2397 if bundlecaps is not None:
2393 2398 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2394 2399 return False
2395 2400
2396 2401
2397 2402 def getbundlechunks(
2398 2403 repo, source, heads=None, common=None, bundlecaps=None, **kwargs
2399 2404 ):
2400 2405 """Return chunks constituting a bundle's raw data.
2401 2406
2402 2407 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2403 2408 passed.
2404 2409
2405 2410 Returns a 2-tuple of a dict with metadata about the generated bundle
2406 2411 and an iterator over raw chunks (of varying sizes).
2407 2412 """
2408 2413 kwargs = pycompat.byteskwargs(kwargs)
2409 2414 info = {}
2410 2415 usebundle2 = bundle2requested(bundlecaps)
2411 2416 # bundle10 case
2412 2417 if not usebundle2:
2413 2418 if bundlecaps and not kwargs.get(b'cg', True):
2414 2419 raise ValueError(
2415 2420 _(b'request for bundle10 must include changegroup')
2416 2421 )
2417 2422
2418 2423 if kwargs:
2419 2424 raise ValueError(
2420 2425 _(b'unsupported getbundle arguments: %s')
2421 2426 % b', '.join(sorted(kwargs.keys()))
2422 2427 )
2423 2428 outgoing = _computeoutgoing(repo, heads, common)
2424 2429 info[b'bundleversion'] = 1
2425 2430 return (
2426 2431 info,
2427 2432 changegroup.makestream(
2428 2433 repo, outgoing, b'01', source, bundlecaps=bundlecaps
2429 2434 ),
2430 2435 )
2431 2436
2432 2437 # bundle20 case
2433 2438 info[b'bundleversion'] = 2
2434 2439 b2caps = {}
2435 2440 for bcaps in bundlecaps:
2436 2441 if bcaps.startswith(b'bundle2='):
2437 2442 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2438 2443 b2caps.update(bundle2.decodecaps(blob))
2439 2444 bundler = bundle2.bundle20(repo.ui, b2caps)
2440 2445
2441 2446 kwargs[b'heads'] = heads
2442 2447 kwargs[b'common'] = common
2443 2448
2444 2449 for name in getbundle2partsorder:
2445 2450 func = getbundle2partsmapping[name]
2446 2451 func(
2447 2452 bundler,
2448 2453 repo,
2449 2454 source,
2450 2455 bundlecaps=bundlecaps,
2451 2456 b2caps=b2caps,
2452 2457 **pycompat.strkwargs(kwargs)
2453 2458 )
2454 2459
2455 2460 info[b'prefercompressed'] = bundler.prefercompressed
2456 2461
2457 2462 return info, bundler.getchunks()
2458 2463
2459 2464
2460 2465 @getbundle2partsgenerator(b'stream2')
2461 2466 def _getbundlestream2(bundler, repo, *args, **kwargs):
2462 2467 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2463 2468
2464 2469
2465 2470 @getbundle2partsgenerator(b'changegroup')
2466 2471 def _getbundlechangegrouppart(
2467 2472 bundler,
2468 2473 repo,
2469 2474 source,
2470 2475 bundlecaps=None,
2471 2476 b2caps=None,
2472 2477 heads=None,
2473 2478 common=None,
2474 2479 **kwargs
2475 2480 ):
2476 2481 """add a changegroup part to the requested bundle"""
2477 2482 if not kwargs.get(r'cg', True):
2478 2483 return
2479 2484
2480 2485 version = b'01'
2481 2486 cgversions = b2caps.get(b'changegroup')
2482 2487 if cgversions: # 3.1 and 3.2 ship with an empty value
2483 2488 cgversions = [
2484 2489 v
2485 2490 for v in cgversions
2486 2491 if v in changegroup.supportedoutgoingversions(repo)
2487 2492 ]
2488 2493 if not cgversions:
2489 2494 raise error.Abort(_(b'no common changegroup version'))
2490 2495 version = max(cgversions)
2491 2496
2492 2497 outgoing = _computeoutgoing(repo, heads, common)
2493 2498 if not outgoing.missing:
2494 2499 return
2495 2500
2496 2501 if kwargs.get(r'narrow', False):
2497 2502 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
2498 2503 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
2499 2504 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2500 2505 else:
2501 2506 matcher = None
2502 2507
2503 2508 cgstream = changegroup.makestream(
2504 2509 repo, outgoing, version, source, bundlecaps=bundlecaps, matcher=matcher
2505 2510 )
2506 2511
2507 2512 part = bundler.newpart(b'changegroup', data=cgstream)
2508 2513 if cgversions:
2509 2514 part.addparam(b'version', version)
2510 2515
2511 2516 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2512 2517
2513 2518 if b'treemanifest' in repo.requirements:
2514 2519 part.addparam(b'treemanifest', b'1')
2515 2520
2516 2521 if b'exp-sidedata-flag' in repo.requirements:
2517 2522 part.addparam(b'exp-sidedata', b'1')
2518 2523
2519 2524 if (
2520 2525 kwargs.get(r'narrow', False)
2521 2526 and kwargs.get(r'narrow_acl', False)
2522 2527 and (include or exclude)
2523 2528 ):
2524 2529 # this is mandatory because otherwise ACL clients won't work
2525 2530 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2526 2531 narrowspecpart.data = b'%s\0%s' % (
2527 2532 b'\n'.join(include),
2528 2533 b'\n'.join(exclude),
2529 2534 )
2530 2535
2531 2536
2532 2537 @getbundle2partsgenerator(b'bookmarks')
2533 2538 def _getbundlebookmarkpart(
2534 2539 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2535 2540 ):
2536 2541 """add a bookmark part to the requested bundle"""
2537 2542 if not kwargs.get(r'bookmarks', False):
2538 2543 return
2539 2544 if b'bookmarks' not in b2caps:
2540 2545 raise error.Abort(_(b'no common bookmarks exchange method'))
2541 2546 books = bookmod.listbinbookmarks(repo)
2542 2547 data = bookmod.binaryencode(books)
2543 2548 if data:
2544 2549 bundler.newpart(b'bookmarks', data=data)
2545 2550
2546 2551
2547 2552 @getbundle2partsgenerator(b'listkeys')
2548 2553 def _getbundlelistkeysparts(
2549 2554 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2550 2555 ):
2551 2556 """add parts containing listkeys namespaces to the requested bundle"""
2552 2557 listkeys = kwargs.get(r'listkeys', ())
2553 2558 for namespace in listkeys:
2554 2559 part = bundler.newpart(b'listkeys')
2555 2560 part.addparam(b'namespace', namespace)
2556 2561 keys = repo.listkeys(namespace).items()
2557 2562 part.data = pushkey.encodekeys(keys)
2558 2563
2559 2564
2560 2565 @getbundle2partsgenerator(b'obsmarkers')
2561 2566 def _getbundleobsmarkerpart(
2562 2567 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2563 2568 ):
2564 2569 """add an obsolescence markers part to the requested bundle"""
2565 2570 if kwargs.get(r'obsmarkers', False):
2566 2571 if heads is None:
2567 2572 heads = repo.heads()
2568 2573 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2569 2574 markers = repo.obsstore.relevantmarkers(subset)
2570 # last item of marker tuple ('parents') may be None or a tuple
2571 markers = sorted(markers, key=lambda m: m[:-1] + (m[-1] or (),))
2575 markers = _sortedmarkers(markers)
2572 2576 bundle2.buildobsmarkerspart(bundler, markers)
2573 2577
2574 2578
2575 2579 @getbundle2partsgenerator(b'phases')
2576 2580 def _getbundlephasespart(
2577 2581 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2578 2582 ):
2579 2583 """add phase heads part to the requested bundle"""
2580 2584 if kwargs.get(r'phases', False):
2581 2585 if not b'heads' in b2caps.get(b'phases'):
2582 2586 raise error.Abort(_(b'no common phases exchange method'))
2583 2587 if heads is None:
2584 2588 heads = repo.heads()
2585 2589
2586 2590 headsbyphase = collections.defaultdict(set)
2587 2591 if repo.publishing():
2588 2592 headsbyphase[phases.public] = heads
2589 2593 else:
2590 2594 # find the appropriate heads to move
2591 2595
2592 2596 phase = repo._phasecache.phase
2593 2597 node = repo.changelog.node
2594 2598 rev = repo.changelog.rev
2595 2599 for h in heads:
2596 2600 headsbyphase[phase(repo, rev(h))].add(h)
2597 2601 seenphases = list(headsbyphase.keys())
2598 2602
2599 2603 # We do not handle anything but public and draft phase for now)
2600 2604 if seenphases:
2601 2605 assert max(seenphases) <= phases.draft
2602 2606
2603 2607 # if client is pulling non-public changesets, we need to find
2604 2608 # intermediate public heads.
2605 2609 draftheads = headsbyphase.get(phases.draft, set())
2606 2610 if draftheads:
2607 2611 publicheads = headsbyphase.get(phases.public, set())
2608 2612
2609 2613 revset = b'heads(only(%ln, %ln) and public())'
2610 2614 extraheads = repo.revs(revset, draftheads, publicheads)
2611 2615 for r in extraheads:
2612 2616 headsbyphase[phases.public].add(node(r))
2613 2617
2614 2618 # transform data in a format used by the encoding function
2615 2619 phasemapping = []
2616 2620 for phase in phases.allphases:
2617 2621 phasemapping.append(sorted(headsbyphase[phase]))
2618 2622
2619 2623 # generate the actual part
2620 2624 phasedata = phases.binaryencode(phasemapping)
2621 2625 bundler.newpart(b'phase-heads', data=phasedata)
2622 2626
2623 2627
2624 2628 @getbundle2partsgenerator(b'hgtagsfnodes')
2625 2629 def _getbundletagsfnodes(
2626 2630 bundler,
2627 2631 repo,
2628 2632 source,
2629 2633 bundlecaps=None,
2630 2634 b2caps=None,
2631 2635 heads=None,
2632 2636 common=None,
2633 2637 **kwargs
2634 2638 ):
2635 2639 """Transfer the .hgtags filenodes mapping.
2636 2640
2637 2641 Only values for heads in this bundle will be transferred.
2638 2642
2639 2643 The part data consists of pairs of 20 byte changeset node and .hgtags
2640 2644 filenodes raw values.
2641 2645 """
2642 2646 # Don't send unless:
2643 2647 # - changeset are being exchanged,
2644 2648 # - the client supports it.
2645 2649 if not (kwargs.get(r'cg', True) and b'hgtagsfnodes' in b2caps):
2646 2650 return
2647 2651
2648 2652 outgoing = _computeoutgoing(repo, heads, common)
2649 2653 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2650 2654
2651 2655
2652 2656 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2653 2657 def _getbundlerevbranchcache(
2654 2658 bundler,
2655 2659 repo,
2656 2660 source,
2657 2661 bundlecaps=None,
2658 2662 b2caps=None,
2659 2663 heads=None,
2660 2664 common=None,
2661 2665 **kwargs
2662 2666 ):
2663 2667 """Transfer the rev-branch-cache mapping
2664 2668
2665 2669 The payload is a series of data related to each branch
2666 2670
2667 2671 1) branch name length
2668 2672 2) number of open heads
2669 2673 3) number of closed heads
2670 2674 4) open heads nodes
2671 2675 5) closed heads nodes
2672 2676 """
2673 2677 # Don't send unless:
2674 2678 # - changeset are being exchanged,
2675 2679 # - the client supports it.
2676 2680 # - narrow bundle isn't in play (not currently compatible).
2677 2681 if (
2678 2682 not kwargs.get(r'cg', True)
2679 2683 or b'rev-branch-cache' not in b2caps
2680 2684 or kwargs.get(r'narrow', False)
2681 2685 or repo.ui.has_section(_NARROWACL_SECTION)
2682 2686 ):
2683 2687 return
2684 2688
2685 2689 outgoing = _computeoutgoing(repo, heads, common)
2686 2690 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2687 2691
2688 2692
2689 2693 def check_heads(repo, their_heads, context):
2690 2694 """check if the heads of a repo have been modified
2691 2695
2692 2696 Used by peer for unbundling.
2693 2697 """
2694 2698 heads = repo.heads()
2695 2699 heads_hash = hashlib.sha1(b''.join(sorted(heads))).digest()
2696 2700 if not (
2697 2701 their_heads == [b'force']
2698 2702 or their_heads == heads
2699 2703 or their_heads == [b'hashed', heads_hash]
2700 2704 ):
2701 2705 # someone else committed/pushed/unbundled while we
2702 2706 # were transferring data
2703 2707 raise error.PushRaced(
2704 2708 b'repository changed while %s - please try again' % context
2705 2709 )
2706 2710
2707 2711
2708 2712 def unbundle(repo, cg, heads, source, url):
2709 2713 """Apply a bundle to a repo.
2710 2714
2711 2715 this function makes sure the repo is locked during the application and have
2712 2716 mechanism to check that no push race occurred between the creation of the
2713 2717 bundle and its application.
2714 2718
2715 2719 If the push was raced as PushRaced exception is raised."""
2716 2720 r = 0
2717 2721 # need a transaction when processing a bundle2 stream
2718 2722 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2719 2723 lockandtr = [None, None, None]
2720 2724 recordout = None
2721 2725 # quick fix for output mismatch with bundle2 in 3.4
2722 2726 captureoutput = repo.ui.configbool(
2723 2727 b'experimental', b'bundle2-output-capture'
2724 2728 )
2725 2729 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2726 2730 captureoutput = True
2727 2731 try:
2728 2732 # note: outside bundle1, 'heads' is expected to be empty and this
2729 2733 # 'check_heads' call wil be a no-op
2730 2734 check_heads(repo, heads, b'uploading changes')
2731 2735 # push can proceed
2732 2736 if not isinstance(cg, bundle2.unbundle20):
2733 2737 # legacy case: bundle1 (changegroup 01)
2734 2738 txnname = b"\n".join([source, util.hidepassword(url)])
2735 2739 with repo.lock(), repo.transaction(txnname) as tr:
2736 2740 op = bundle2.applybundle(repo, cg, tr, source, url)
2737 2741 r = bundle2.combinechangegroupresults(op)
2738 2742 else:
2739 2743 r = None
2740 2744 try:
2741 2745
2742 2746 def gettransaction():
2743 2747 if not lockandtr[2]:
2744 2748 if not bookmod.bookmarksinstore(repo):
2745 2749 lockandtr[0] = repo.wlock()
2746 2750 lockandtr[1] = repo.lock()
2747 2751 lockandtr[2] = repo.transaction(source)
2748 2752 lockandtr[2].hookargs[b'source'] = source
2749 2753 lockandtr[2].hookargs[b'url'] = url
2750 2754 lockandtr[2].hookargs[b'bundle2'] = b'1'
2751 2755 return lockandtr[2]
2752 2756
2753 2757 # Do greedy locking by default until we're satisfied with lazy
2754 2758 # locking.
2755 2759 if not repo.ui.configbool(
2756 2760 b'experimental', b'bundle2lazylocking'
2757 2761 ):
2758 2762 gettransaction()
2759 2763
2760 2764 op = bundle2.bundleoperation(
2761 2765 repo,
2762 2766 gettransaction,
2763 2767 captureoutput=captureoutput,
2764 2768 source=b'push',
2765 2769 )
2766 2770 try:
2767 2771 op = bundle2.processbundle(repo, cg, op=op)
2768 2772 finally:
2769 2773 r = op.reply
2770 2774 if captureoutput and r is not None:
2771 2775 repo.ui.pushbuffer(error=True, subproc=True)
2772 2776
2773 2777 def recordout(output):
2774 2778 r.newpart(b'output', data=output, mandatory=False)
2775 2779
2776 2780 if lockandtr[2] is not None:
2777 2781 lockandtr[2].close()
2778 2782 except BaseException as exc:
2779 2783 exc.duringunbundle2 = True
2780 2784 if captureoutput and r is not None:
2781 2785 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2782 2786
2783 2787 def recordout(output):
2784 2788 part = bundle2.bundlepart(
2785 2789 b'output', data=output, mandatory=False
2786 2790 )
2787 2791 parts.append(part)
2788 2792
2789 2793 raise
2790 2794 finally:
2791 2795 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2792 2796 if recordout is not None:
2793 2797 recordout(repo.ui.popbuffer())
2794 2798 return r
2795 2799
2796 2800
2797 2801 def _maybeapplyclonebundle(pullop):
2798 2802 """Apply a clone bundle from a remote, if possible."""
2799 2803
2800 2804 repo = pullop.repo
2801 2805 remote = pullop.remote
2802 2806
2803 2807 if not repo.ui.configbool(b'ui', b'clonebundles'):
2804 2808 return
2805 2809
2806 2810 # Only run if local repo is empty.
2807 2811 if len(repo):
2808 2812 return
2809 2813
2810 2814 if pullop.heads:
2811 2815 return
2812 2816
2813 2817 if not remote.capable(b'clonebundles'):
2814 2818 return
2815 2819
2816 2820 with remote.commandexecutor() as e:
2817 2821 res = e.callcommand(b'clonebundles', {}).result()
2818 2822
2819 2823 # If we call the wire protocol command, that's good enough to record the
2820 2824 # attempt.
2821 2825 pullop.clonebundleattempted = True
2822 2826
2823 2827 entries = parseclonebundlesmanifest(repo, res)
2824 2828 if not entries:
2825 2829 repo.ui.note(
2826 2830 _(
2827 2831 b'no clone bundles available on remote; '
2828 2832 b'falling back to regular clone\n'
2829 2833 )
2830 2834 )
2831 2835 return
2832 2836
2833 2837 entries = filterclonebundleentries(
2834 2838 repo, entries, streamclonerequested=pullop.streamclonerequested
2835 2839 )
2836 2840
2837 2841 if not entries:
2838 2842 # There is a thundering herd concern here. However, if a server
2839 2843 # operator doesn't advertise bundles appropriate for its clients,
2840 2844 # they deserve what's coming. Furthermore, from a client's
2841 2845 # perspective, no automatic fallback would mean not being able to
2842 2846 # clone!
2843 2847 repo.ui.warn(
2844 2848 _(
2845 2849 b'no compatible clone bundles available on server; '
2846 2850 b'falling back to regular clone\n'
2847 2851 )
2848 2852 )
2849 2853 repo.ui.warn(
2850 2854 _(b'(you may want to report this to the server operator)\n')
2851 2855 )
2852 2856 return
2853 2857
2854 2858 entries = sortclonebundleentries(repo.ui, entries)
2855 2859
2856 2860 url = entries[0][b'URL']
2857 2861 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2858 2862 if trypullbundlefromurl(repo.ui, repo, url):
2859 2863 repo.ui.status(_(b'finished applying clone bundle\n'))
2860 2864 # Bundle failed.
2861 2865 #
2862 2866 # We abort by default to avoid the thundering herd of
2863 2867 # clients flooding a server that was expecting expensive
2864 2868 # clone load to be offloaded.
2865 2869 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2866 2870 repo.ui.warn(_(b'falling back to normal clone\n'))
2867 2871 else:
2868 2872 raise error.Abort(
2869 2873 _(b'error applying bundle'),
2870 2874 hint=_(
2871 2875 b'if this error persists, consider contacting '
2872 2876 b'the server operator or disable clone '
2873 2877 b'bundles via '
2874 2878 b'"--config ui.clonebundles=false"'
2875 2879 ),
2876 2880 )
2877 2881
2878 2882
2879 2883 def parseclonebundlesmanifest(repo, s):
2880 2884 """Parses the raw text of a clone bundles manifest.
2881 2885
2882 2886 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2883 2887 to the URL and other keys are the attributes for the entry.
2884 2888 """
2885 2889 m = []
2886 2890 for line in s.splitlines():
2887 2891 fields = line.split()
2888 2892 if not fields:
2889 2893 continue
2890 2894 attrs = {b'URL': fields[0]}
2891 2895 for rawattr in fields[1:]:
2892 2896 key, value = rawattr.split(b'=', 1)
2893 2897 key = urlreq.unquote(key)
2894 2898 value = urlreq.unquote(value)
2895 2899 attrs[key] = value
2896 2900
2897 2901 # Parse BUNDLESPEC into components. This makes client-side
2898 2902 # preferences easier to specify since you can prefer a single
2899 2903 # component of the BUNDLESPEC.
2900 2904 if key == b'BUNDLESPEC':
2901 2905 try:
2902 2906 bundlespec = parsebundlespec(repo, value)
2903 2907 attrs[b'COMPRESSION'] = bundlespec.compression
2904 2908 attrs[b'VERSION'] = bundlespec.version
2905 2909 except error.InvalidBundleSpecification:
2906 2910 pass
2907 2911 except error.UnsupportedBundleSpecification:
2908 2912 pass
2909 2913
2910 2914 m.append(attrs)
2911 2915
2912 2916 return m
2913 2917
2914 2918
2915 2919 def isstreamclonespec(bundlespec):
2916 2920 # Stream clone v1
2917 2921 if bundlespec.wirecompression == b'UN' and bundlespec.wireversion == b's1':
2918 2922 return True
2919 2923
2920 2924 # Stream clone v2
2921 2925 if (
2922 2926 bundlespec.wirecompression == b'UN'
2923 2927 and bundlespec.wireversion == b'02'
2924 2928 and bundlespec.contentopts.get(b'streamv2')
2925 2929 ):
2926 2930 return True
2927 2931
2928 2932 return False
2929 2933
2930 2934
2931 2935 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2932 2936 """Remove incompatible clone bundle manifest entries.
2933 2937
2934 2938 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2935 2939 and returns a new list consisting of only the entries that this client
2936 2940 should be able to apply.
2937 2941
2938 2942 There is no guarantee we'll be able to apply all returned entries because
2939 2943 the metadata we use to filter on may be missing or wrong.
2940 2944 """
2941 2945 newentries = []
2942 2946 for entry in entries:
2943 2947 spec = entry.get(b'BUNDLESPEC')
2944 2948 if spec:
2945 2949 try:
2946 2950 bundlespec = parsebundlespec(repo, spec, strict=True)
2947 2951
2948 2952 # If a stream clone was requested, filter out non-streamclone
2949 2953 # entries.
2950 2954 if streamclonerequested and not isstreamclonespec(bundlespec):
2951 2955 repo.ui.debug(
2952 2956 b'filtering %s because not a stream clone\n'
2953 2957 % entry[b'URL']
2954 2958 )
2955 2959 continue
2956 2960
2957 2961 except error.InvalidBundleSpecification as e:
2958 2962 repo.ui.debug(stringutil.forcebytestr(e) + b'\n')
2959 2963 continue
2960 2964 except error.UnsupportedBundleSpecification as e:
2961 2965 repo.ui.debug(
2962 2966 b'filtering %s because unsupported bundle '
2963 2967 b'spec: %s\n' % (entry[b'URL'], stringutil.forcebytestr(e))
2964 2968 )
2965 2969 continue
2966 2970 # If we don't have a spec and requested a stream clone, we don't know
2967 2971 # what the entry is so don't attempt to apply it.
2968 2972 elif streamclonerequested:
2969 2973 repo.ui.debug(
2970 2974 b'filtering %s because cannot determine if a stream '
2971 2975 b'clone bundle\n' % entry[b'URL']
2972 2976 )
2973 2977 continue
2974 2978
2975 2979 if b'REQUIRESNI' in entry and not sslutil.hassni:
2976 2980 repo.ui.debug(
2977 2981 b'filtering %s because SNI not supported\n' % entry[b'URL']
2978 2982 )
2979 2983 continue
2980 2984
2981 2985 newentries.append(entry)
2982 2986
2983 2987 return newentries
2984 2988
2985 2989
2986 2990 class clonebundleentry(object):
2987 2991 """Represents an item in a clone bundles manifest.
2988 2992
2989 2993 This rich class is needed to support sorting since sorted() in Python 3
2990 2994 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2991 2995 won't work.
2992 2996 """
2993 2997
2994 2998 def __init__(self, value, prefers):
2995 2999 self.value = value
2996 3000 self.prefers = prefers
2997 3001
2998 3002 def _cmp(self, other):
2999 3003 for prefkey, prefvalue in self.prefers:
3000 3004 avalue = self.value.get(prefkey)
3001 3005 bvalue = other.value.get(prefkey)
3002 3006
3003 3007 # Special case for b missing attribute and a matches exactly.
3004 3008 if avalue is not None and bvalue is None and avalue == prefvalue:
3005 3009 return -1
3006 3010
3007 3011 # Special case for a missing attribute and b matches exactly.
3008 3012 if bvalue is not None and avalue is None and bvalue == prefvalue:
3009 3013 return 1
3010 3014
3011 3015 # We can't compare unless attribute present on both.
3012 3016 if avalue is None or bvalue is None:
3013 3017 continue
3014 3018
3015 3019 # Same values should fall back to next attribute.
3016 3020 if avalue == bvalue:
3017 3021 continue
3018 3022
3019 3023 # Exact matches come first.
3020 3024 if avalue == prefvalue:
3021 3025 return -1
3022 3026 if bvalue == prefvalue:
3023 3027 return 1
3024 3028
3025 3029 # Fall back to next attribute.
3026 3030 continue
3027 3031
3028 3032 # If we got here we couldn't sort by attributes and prefers. Fall
3029 3033 # back to index order.
3030 3034 return 0
3031 3035
3032 3036 def __lt__(self, other):
3033 3037 return self._cmp(other) < 0
3034 3038
3035 3039 def __gt__(self, other):
3036 3040 return self._cmp(other) > 0
3037 3041
3038 3042 def __eq__(self, other):
3039 3043 return self._cmp(other) == 0
3040 3044
3041 3045 def __le__(self, other):
3042 3046 return self._cmp(other) <= 0
3043 3047
3044 3048 def __ge__(self, other):
3045 3049 return self._cmp(other) >= 0
3046 3050
3047 3051 def __ne__(self, other):
3048 3052 return self._cmp(other) != 0
3049 3053
3050 3054
3051 3055 def sortclonebundleentries(ui, entries):
3052 3056 prefers = ui.configlist(b'ui', b'clonebundleprefers')
3053 3057 if not prefers:
3054 3058 return list(entries)
3055 3059
3056 3060 prefers = [p.split(b'=', 1) for p in prefers]
3057 3061
3058 3062 items = sorted(clonebundleentry(v, prefers) for v in entries)
3059 3063 return [i.value for i in items]
3060 3064
3061 3065
3062 3066 def trypullbundlefromurl(ui, repo, url):
3063 3067 """Attempt to apply a bundle from a URL."""
3064 3068 with repo.lock(), repo.transaction(b'bundleurl') as tr:
3065 3069 try:
3066 3070 fh = urlmod.open(ui, url)
3067 3071 cg = readbundle(ui, fh, b'stream')
3068 3072
3069 3073 if isinstance(cg, streamclone.streamcloneapplier):
3070 3074 cg.apply(repo)
3071 3075 else:
3072 3076 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
3073 3077 return True
3074 3078 except urlerr.httperror as e:
3075 3079 ui.warn(
3076 3080 _(b'HTTP error fetching bundle: %s\n')
3077 3081 % stringutil.forcebytestr(e)
3078 3082 )
3079 3083 except urlerr.urlerror as e:
3080 3084 ui.warn(
3081 3085 _(b'error fetching bundle: %s\n')
3082 3086 % stringutil.forcebytestr(e.reason)
3083 3087 )
3084 3088
3085 3089 return False
General Comments 0
You need to be logged in to leave comments. Login now