##// END OF EJS Templates
clonebundles: optional memory-requirement attribution...
Joerg Sonnenberger -
r45608:9c7ff887 default draft
parent child Browse files
Show More
@@ -1,3140 +1,3157 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import weakref
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 hex,
16 16 nullid,
17 17 nullrev,
18 18 )
19 19 from .thirdparty import attr
20 20 from . import (
21 21 bookmarks as bookmod,
22 22 bundle2,
23 23 changegroup,
24 24 discovery,
25 25 error,
26 26 exchangev2,
27 27 lock as lockmod,
28 28 logexchange,
29 29 narrowspec,
30 30 obsolete,
31 31 obsutil,
32 32 phases,
33 33 pushkey,
34 34 pycompat,
35 35 scmutil,
36 36 sslutil,
37 37 streamclone,
38 38 url as urlmod,
39 39 util,
40 40 wireprototypes,
41 41 )
42 42 from .interfaces import repository
43 43 from .utils import (
44 44 hashutil,
45 45 stringutil,
46 46 )
47 47
48 48 urlerr = util.urlerr
49 49 urlreq = util.urlreq
50 50
51 51 _NARROWACL_SECTION = b'narrowacl'
52 52
53 53 # Maps bundle version human names to changegroup versions.
54 54 _bundlespeccgversions = {
55 55 b'v1': b'01',
56 56 b'v2': b'02',
57 57 b'packed1': b's1',
58 58 b'bundle2': b'02', # legacy
59 59 }
60 60
61 61 # Maps bundle version with content opts to choose which part to bundle
62 62 _bundlespeccontentopts = {
63 63 b'v1': {
64 64 b'changegroup': True,
65 65 b'cg.version': b'01',
66 66 b'obsolescence': False,
67 67 b'phases': False,
68 68 b'tagsfnodescache': False,
69 69 b'revbranchcache': False,
70 70 },
71 71 b'v2': {
72 72 b'changegroup': True,
73 73 b'cg.version': b'02',
74 74 b'obsolescence': False,
75 75 b'phases': False,
76 76 b'tagsfnodescache': True,
77 77 b'revbranchcache': True,
78 78 },
79 79 b'packed1': {b'cg.version': b's1'},
80 80 }
81 81 _bundlespeccontentopts[b'bundle2'] = _bundlespeccontentopts[b'v2']
82 82
83 83 _bundlespecvariants = {
84 84 b"streamv2": {
85 85 b"changegroup": False,
86 86 b"streamv2": True,
87 87 b"tagsfnodescache": False,
88 88 b"revbranchcache": False,
89 89 }
90 90 }
91 91
92 92 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
93 93 _bundlespecv1compengines = {b'gzip', b'bzip2', b'none'}
94 94
95 95
96 96 @attr.s
97 97 class bundlespec(object):
98 98 compression = attr.ib()
99 99 wirecompression = attr.ib()
100 100 version = attr.ib()
101 101 wireversion = attr.ib()
102 102 params = attr.ib()
103 103 contentopts = attr.ib()
104 104
105 105
106 106 def parsebundlespec(repo, spec, strict=True):
107 107 """Parse a bundle string specification into parts.
108 108
109 109 Bundle specifications denote a well-defined bundle/exchange format.
110 110 The content of a given specification should not change over time in
111 111 order to ensure that bundles produced by a newer version of Mercurial are
112 112 readable from an older version.
113 113
114 114 The string currently has the form:
115 115
116 116 <compression>-<type>[;<parameter0>[;<parameter1>]]
117 117
118 118 Where <compression> is one of the supported compression formats
119 119 and <type> is (currently) a version string. A ";" can follow the type and
120 120 all text afterwards is interpreted as URI encoded, ";" delimited key=value
121 121 pairs.
122 122
123 123 If ``strict`` is True (the default) <compression> is required. Otherwise,
124 124 it is optional.
125 125
126 126 Returns a bundlespec object of (compression, version, parameters).
127 127 Compression will be ``None`` if not in strict mode and a compression isn't
128 128 defined.
129 129
130 130 An ``InvalidBundleSpecification`` is raised when the specification is
131 131 not syntactically well formed.
132 132
133 133 An ``UnsupportedBundleSpecification`` is raised when the compression or
134 134 bundle type/version is not recognized.
135 135
136 136 Note: this function will likely eventually return a more complex data
137 137 structure, including bundle2 part information.
138 138 """
139 139
140 140 def parseparams(s):
141 141 if b';' not in s:
142 142 return s, {}
143 143
144 144 params = {}
145 145 version, paramstr = s.split(b';', 1)
146 146
147 147 for p in paramstr.split(b';'):
148 148 if b'=' not in p:
149 149 raise error.InvalidBundleSpecification(
150 150 _(
151 151 b'invalid bundle specification: '
152 152 b'missing "=" in parameter: %s'
153 153 )
154 154 % p
155 155 )
156 156
157 157 key, value = p.split(b'=', 1)
158 158 key = urlreq.unquote(key)
159 159 value = urlreq.unquote(value)
160 160 params[key] = value
161 161
162 162 return version, params
163 163
164 164 if strict and b'-' not in spec:
165 165 raise error.InvalidBundleSpecification(
166 166 _(
167 167 b'invalid bundle specification; '
168 168 b'must be prefixed with compression: %s'
169 169 )
170 170 % spec
171 171 )
172 172
173 173 if b'-' in spec:
174 174 compression, version = spec.split(b'-', 1)
175 175
176 176 if compression not in util.compengines.supportedbundlenames:
177 177 raise error.UnsupportedBundleSpecification(
178 178 _(b'%s compression is not supported') % compression
179 179 )
180 180
181 181 version, params = parseparams(version)
182 182
183 183 if version not in _bundlespeccgversions:
184 184 raise error.UnsupportedBundleSpecification(
185 185 _(b'%s is not a recognized bundle version') % version
186 186 )
187 187 else:
188 188 # Value could be just the compression or just the version, in which
189 189 # case some defaults are assumed (but only when not in strict mode).
190 190 assert not strict
191 191
192 192 spec, params = parseparams(spec)
193 193
194 194 if spec in util.compengines.supportedbundlenames:
195 195 compression = spec
196 196 version = b'v1'
197 197 # Generaldelta repos require v2.
198 198 if b'generaldelta' in repo.requirements:
199 199 version = b'v2'
200 200 # Modern compression engines require v2.
201 201 if compression not in _bundlespecv1compengines:
202 202 version = b'v2'
203 203 elif spec in _bundlespeccgversions:
204 204 if spec == b'packed1':
205 205 compression = b'none'
206 206 else:
207 207 compression = b'bzip2'
208 208 version = spec
209 209 else:
210 210 raise error.UnsupportedBundleSpecification(
211 211 _(b'%s is not a recognized bundle specification') % spec
212 212 )
213 213
214 214 # Bundle version 1 only supports a known set of compression engines.
215 215 if version == b'v1' and compression not in _bundlespecv1compengines:
216 216 raise error.UnsupportedBundleSpecification(
217 217 _(b'compression engine %s is not supported on v1 bundles')
218 218 % compression
219 219 )
220 220
221 221 # The specification for packed1 can optionally declare the data formats
222 222 # required to apply it. If we see this metadata, compare against what the
223 223 # repo supports and error if the bundle isn't compatible.
224 224 if version == b'packed1' and b'requirements' in params:
225 225 requirements = set(params[b'requirements'].split(b','))
226 226 missingreqs = requirements - repo.supportedformats
227 227 if missingreqs:
228 228 raise error.UnsupportedBundleSpecification(
229 229 _(b'missing support for repository features: %s')
230 230 % b', '.join(sorted(missingreqs))
231 231 )
232 232
233 233 # Compute contentopts based on the version
234 234 contentopts = _bundlespeccontentopts.get(version, {}).copy()
235 235
236 236 # Process the variants
237 237 if b"stream" in params and params[b"stream"] == b"v2":
238 238 variant = _bundlespecvariants[b"streamv2"]
239 239 contentopts.update(variant)
240 240
241 241 engine = util.compengines.forbundlename(compression)
242 242 compression, wirecompression = engine.bundletype()
243 243 wireversion = _bundlespeccgversions[version]
244 244
245 245 return bundlespec(
246 246 compression, wirecompression, version, wireversion, params, contentopts
247 247 )
248 248
249 249
250 250 def readbundle(ui, fh, fname, vfs=None):
251 251 header = changegroup.readexactly(fh, 4)
252 252
253 253 alg = None
254 254 if not fname:
255 255 fname = b"stream"
256 256 if not header.startswith(b'HG') and header.startswith(b'\0'):
257 257 fh = changegroup.headerlessfixup(fh, header)
258 258 header = b"HG10"
259 259 alg = b'UN'
260 260 elif vfs:
261 261 fname = vfs.join(fname)
262 262
263 263 magic, version = header[0:2], header[2:4]
264 264
265 265 if magic != b'HG':
266 266 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
267 267 if version == b'10':
268 268 if alg is None:
269 269 alg = changegroup.readexactly(fh, 2)
270 270 return changegroup.cg1unpacker(fh, alg)
271 271 elif version.startswith(b'2'):
272 272 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
273 273 elif version == b'S1':
274 274 return streamclone.streamcloneapplier(fh)
275 275 else:
276 276 raise error.Abort(
277 277 _(b'%s: unknown bundle version %s') % (fname, version)
278 278 )
279 279
280 280
281 281 def getbundlespec(ui, fh):
282 282 """Infer the bundlespec from a bundle file handle.
283 283
284 284 The input file handle is seeked and the original seek position is not
285 285 restored.
286 286 """
287 287
288 288 def speccompression(alg):
289 289 try:
290 290 return util.compengines.forbundletype(alg).bundletype()[0]
291 291 except KeyError:
292 292 return None
293 293
294 294 b = readbundle(ui, fh, None)
295 295 if isinstance(b, changegroup.cg1unpacker):
296 296 alg = b._type
297 297 if alg == b'_truncatedBZ':
298 298 alg = b'BZ'
299 299 comp = speccompression(alg)
300 300 if not comp:
301 301 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
302 302 return b'%s-v1' % comp
303 303 elif isinstance(b, bundle2.unbundle20):
304 304 if b'Compression' in b.params:
305 305 comp = speccompression(b.params[b'Compression'])
306 306 if not comp:
307 307 raise error.Abort(
308 308 _(b'unknown compression algorithm: %s') % comp
309 309 )
310 310 else:
311 311 comp = b'none'
312 312
313 313 version = None
314 314 for part in b.iterparts():
315 315 if part.type == b'changegroup':
316 316 version = part.params[b'version']
317 317 if version in (b'01', b'02'):
318 318 version = b'v2'
319 319 else:
320 320 raise error.Abort(
321 321 _(
322 322 b'changegroup version %s does not have '
323 323 b'a known bundlespec'
324 324 )
325 325 % version,
326 326 hint=_(b'try upgrading your Mercurial client'),
327 327 )
328 328 elif part.type == b'stream2' and version is None:
329 329 # A stream2 part requires to be part of a v2 bundle
330 330 requirements = urlreq.unquote(part.params[b'requirements'])
331 331 splitted = requirements.split()
332 332 params = bundle2._formatrequirementsparams(splitted)
333 333 return b'none-v2;stream=v2;%s' % params
334 334
335 335 if not version:
336 336 raise error.Abort(
337 337 _(b'could not identify changegroup version in bundle')
338 338 )
339 339
340 340 return b'%s-%s' % (comp, version)
341 341 elif isinstance(b, streamclone.streamcloneapplier):
342 342 requirements = streamclone.readbundle1header(fh)[2]
343 343 formatted = bundle2._formatrequirementsparams(requirements)
344 344 return b'none-packed1;%s' % formatted
345 345 else:
346 346 raise error.Abort(_(b'unknown bundle type: %s') % b)
347 347
348 348
349 349 def _computeoutgoing(repo, heads, common):
350 350 """Computes which revs are outgoing given a set of common
351 351 and a set of heads.
352 352
353 353 This is a separate function so extensions can have access to
354 354 the logic.
355 355
356 356 Returns a discovery.outgoing object.
357 357 """
358 358 cl = repo.changelog
359 359 if common:
360 360 hasnode = cl.hasnode
361 361 common = [n for n in common if hasnode(n)]
362 362 else:
363 363 common = [nullid]
364 364 if not heads:
365 365 heads = cl.heads()
366 366 return discovery.outgoing(repo, common, heads)
367 367
368 368
369 369 def _checkpublish(pushop):
370 370 repo = pushop.repo
371 371 ui = repo.ui
372 372 behavior = ui.config(b'experimental', b'auto-publish')
373 373 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
374 374 return
375 375 remotephases = listkeys(pushop.remote, b'phases')
376 376 if not remotephases.get(b'publishing', False):
377 377 return
378 378
379 379 if pushop.revs is None:
380 380 published = repo.filtered(b'served').revs(b'not public()')
381 381 else:
382 382 published = repo.revs(b'::%ln - public()', pushop.revs)
383 383 if published:
384 384 if behavior == b'warn':
385 385 ui.warn(
386 386 _(b'%i changesets about to be published\n') % len(published)
387 387 )
388 388 elif behavior == b'confirm':
389 389 if ui.promptchoice(
390 390 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
391 391 % len(published)
392 392 ):
393 393 raise error.Abort(_(b'user quit'))
394 394 elif behavior == b'abort':
395 395 msg = _(b'push would publish %i changesets') % len(published)
396 396 hint = _(
397 397 b"use --publish or adjust 'experimental.auto-publish'"
398 398 b" config"
399 399 )
400 400 raise error.Abort(msg, hint=hint)
401 401
402 402
403 403 def _forcebundle1(op):
404 404 """return true if a pull/push must use bundle1
405 405
406 406 This function is used to allow testing of the older bundle version"""
407 407 ui = op.repo.ui
408 408 # The goal is this config is to allow developer to choose the bundle
409 409 # version used during exchanged. This is especially handy during test.
410 410 # Value is a list of bundle version to be picked from, highest version
411 411 # should be used.
412 412 #
413 413 # developer config: devel.legacy.exchange
414 414 exchange = ui.configlist(b'devel', b'legacy.exchange')
415 415 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
416 416 return forcebundle1 or not op.remote.capable(b'bundle2')
417 417
418 418
419 419 class pushoperation(object):
420 420 """A object that represent a single push operation
421 421
422 422 Its purpose is to carry push related state and very common operations.
423 423
424 424 A new pushoperation should be created at the beginning of each push and
425 425 discarded afterward.
426 426 """
427 427
428 428 def __init__(
429 429 self,
430 430 repo,
431 431 remote,
432 432 force=False,
433 433 revs=None,
434 434 newbranch=False,
435 435 bookmarks=(),
436 436 publish=False,
437 437 pushvars=None,
438 438 ):
439 439 # repo we push from
440 440 self.repo = repo
441 441 self.ui = repo.ui
442 442 # repo we push to
443 443 self.remote = remote
444 444 # force option provided
445 445 self.force = force
446 446 # revs to be pushed (None is "all")
447 447 self.revs = revs
448 448 # bookmark explicitly pushed
449 449 self.bookmarks = bookmarks
450 450 # allow push of new branch
451 451 self.newbranch = newbranch
452 452 # step already performed
453 453 # (used to check what steps have been already performed through bundle2)
454 454 self.stepsdone = set()
455 455 # Integer version of the changegroup push result
456 456 # - None means nothing to push
457 457 # - 0 means HTTP error
458 458 # - 1 means we pushed and remote head count is unchanged *or*
459 459 # we have outgoing changesets but refused to push
460 460 # - other values as described by addchangegroup()
461 461 self.cgresult = None
462 462 # Boolean value for the bookmark push
463 463 self.bkresult = None
464 464 # discover.outgoing object (contains common and outgoing data)
465 465 self.outgoing = None
466 466 # all remote topological heads before the push
467 467 self.remoteheads = None
468 468 # Details of the remote branch pre and post push
469 469 #
470 470 # mapping: {'branch': ([remoteheads],
471 471 # [newheads],
472 472 # [unsyncedheads],
473 473 # [discardedheads])}
474 474 # - branch: the branch name
475 475 # - remoteheads: the list of remote heads known locally
476 476 # None if the branch is new
477 477 # - newheads: the new remote heads (known locally) with outgoing pushed
478 478 # - unsyncedheads: the list of remote heads unknown locally.
479 479 # - discardedheads: the list of remote heads made obsolete by the push
480 480 self.pushbranchmap = None
481 481 # testable as a boolean indicating if any nodes are missing locally.
482 482 self.incoming = None
483 483 # summary of the remote phase situation
484 484 self.remotephases = None
485 485 # phases changes that must be pushed along side the changesets
486 486 self.outdatedphases = None
487 487 # phases changes that must be pushed if changeset push fails
488 488 self.fallbackoutdatedphases = None
489 489 # outgoing obsmarkers
490 490 self.outobsmarkers = set()
491 491 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
492 492 self.outbookmarks = []
493 493 # transaction manager
494 494 self.trmanager = None
495 495 # map { pushkey partid -> callback handling failure}
496 496 # used to handle exception from mandatory pushkey part failure
497 497 self.pkfailcb = {}
498 498 # an iterable of pushvars or None
499 499 self.pushvars = pushvars
500 500 # publish pushed changesets
501 501 self.publish = publish
502 502
503 503 @util.propertycache
504 504 def futureheads(self):
505 505 """future remote heads if the changeset push succeeds"""
506 506 return self.outgoing.missingheads
507 507
508 508 @util.propertycache
509 509 def fallbackheads(self):
510 510 """future remote heads if the changeset push fails"""
511 511 if self.revs is None:
512 512 # not target to push, all common are relevant
513 513 return self.outgoing.commonheads
514 514 unfi = self.repo.unfiltered()
515 515 # I want cheads = heads(::missingheads and ::commonheads)
516 516 # (missingheads is revs with secret changeset filtered out)
517 517 #
518 518 # This can be expressed as:
519 519 # cheads = ( (missingheads and ::commonheads)
520 520 # + (commonheads and ::missingheads))"
521 521 # )
522 522 #
523 523 # while trying to push we already computed the following:
524 524 # common = (::commonheads)
525 525 # missing = ((commonheads::missingheads) - commonheads)
526 526 #
527 527 # We can pick:
528 528 # * missingheads part of common (::commonheads)
529 529 common = self.outgoing.common
530 530 rev = self.repo.changelog.index.rev
531 531 cheads = [node for node in self.revs if rev(node) in common]
532 532 # and
533 533 # * commonheads parents on missing
534 534 revset = unfi.set(
535 535 b'%ln and parents(roots(%ln))',
536 536 self.outgoing.commonheads,
537 537 self.outgoing.missing,
538 538 )
539 539 cheads.extend(c.node() for c in revset)
540 540 return cheads
541 541
542 542 @property
543 543 def commonheads(self):
544 544 """set of all common heads after changeset bundle push"""
545 545 if self.cgresult:
546 546 return self.futureheads
547 547 else:
548 548 return self.fallbackheads
549 549
550 550
551 551 # mapping of message used when pushing bookmark
552 552 bookmsgmap = {
553 553 b'update': (
554 554 _(b"updating bookmark %s\n"),
555 555 _(b'updating bookmark %s failed!\n'),
556 556 ),
557 557 b'export': (
558 558 _(b"exporting bookmark %s\n"),
559 559 _(b'exporting bookmark %s failed!\n'),
560 560 ),
561 561 b'delete': (
562 562 _(b"deleting remote bookmark %s\n"),
563 563 _(b'deleting remote bookmark %s failed!\n'),
564 564 ),
565 565 }
566 566
567 567
568 568 def push(
569 569 repo,
570 570 remote,
571 571 force=False,
572 572 revs=None,
573 573 newbranch=False,
574 574 bookmarks=(),
575 575 publish=False,
576 576 opargs=None,
577 577 ):
578 578 '''Push outgoing changesets (limited by revs) from a local
579 579 repository to remote. Return an integer:
580 580 - None means nothing to push
581 581 - 0 means HTTP error
582 582 - 1 means we pushed and remote head count is unchanged *or*
583 583 we have outgoing changesets but refused to push
584 584 - other values as described by addchangegroup()
585 585 '''
586 586 if opargs is None:
587 587 opargs = {}
588 588 pushop = pushoperation(
589 589 repo,
590 590 remote,
591 591 force,
592 592 revs,
593 593 newbranch,
594 594 bookmarks,
595 595 publish,
596 596 **pycompat.strkwargs(opargs)
597 597 )
598 598 if pushop.remote.local():
599 599 missing = (
600 600 set(pushop.repo.requirements) - pushop.remote.local().supported
601 601 )
602 602 if missing:
603 603 msg = _(
604 604 b"required features are not"
605 605 b" supported in the destination:"
606 606 b" %s"
607 607 ) % (b', '.join(sorted(missing)))
608 608 raise error.Abort(msg)
609 609
610 610 if not pushop.remote.canpush():
611 611 raise error.Abort(_(b"destination does not support push"))
612 612
613 613 if not pushop.remote.capable(b'unbundle'):
614 614 raise error.Abort(
615 615 _(
616 616 b'cannot push: destination does not support the '
617 617 b'unbundle wire protocol command'
618 618 )
619 619 )
620 620
621 621 # get lock as we might write phase data
622 622 wlock = lock = None
623 623 try:
624 624 # bundle2 push may receive a reply bundle touching bookmarks
625 625 # requiring the wlock. Take it now to ensure proper ordering.
626 626 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
627 627 if (
628 628 (not _forcebundle1(pushop))
629 629 and maypushback
630 630 and not bookmod.bookmarksinstore(repo)
631 631 ):
632 632 wlock = pushop.repo.wlock()
633 633 lock = pushop.repo.lock()
634 634 pushop.trmanager = transactionmanager(
635 635 pushop.repo, b'push-response', pushop.remote.url()
636 636 )
637 637 except error.LockUnavailable as err:
638 638 # source repo cannot be locked.
639 639 # We do not abort the push, but just disable the local phase
640 640 # synchronisation.
641 641 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
642 642 err
643 643 )
644 644 pushop.ui.debug(msg)
645 645
646 646 with wlock or util.nullcontextmanager():
647 647 with lock or util.nullcontextmanager():
648 648 with pushop.trmanager or util.nullcontextmanager():
649 649 pushop.repo.checkpush(pushop)
650 650 _checkpublish(pushop)
651 651 _pushdiscovery(pushop)
652 652 if not pushop.force:
653 653 _checksubrepostate(pushop)
654 654 if not _forcebundle1(pushop):
655 655 _pushbundle2(pushop)
656 656 _pushchangeset(pushop)
657 657 _pushsyncphase(pushop)
658 658 _pushobsolete(pushop)
659 659 _pushbookmark(pushop)
660 660
661 661 if repo.ui.configbool(b'experimental', b'remotenames'):
662 662 logexchange.pullremotenames(repo, remote)
663 663
664 664 return pushop
665 665
666 666
667 667 # list of steps to perform discovery before push
668 668 pushdiscoveryorder = []
669 669
670 670 # Mapping between step name and function
671 671 #
672 672 # This exists to help extensions wrap steps if necessary
673 673 pushdiscoverymapping = {}
674 674
675 675
676 676 def pushdiscovery(stepname):
677 677 """decorator for function performing discovery before push
678 678
679 679 The function is added to the step -> function mapping and appended to the
680 680 list of steps. Beware that decorated function will be added in order (this
681 681 may matter).
682 682
683 683 You can only use this decorator for a new step, if you want to wrap a step
684 684 from an extension, change the pushdiscovery dictionary directly."""
685 685
686 686 def dec(func):
687 687 assert stepname not in pushdiscoverymapping
688 688 pushdiscoverymapping[stepname] = func
689 689 pushdiscoveryorder.append(stepname)
690 690 return func
691 691
692 692 return dec
693 693
694 694
695 695 def _pushdiscovery(pushop):
696 696 """Run all discovery steps"""
697 697 for stepname in pushdiscoveryorder:
698 698 step = pushdiscoverymapping[stepname]
699 699 step(pushop)
700 700
701 701
702 702 def _checksubrepostate(pushop):
703 703 """Ensure all outgoing referenced subrepo revisions are present locally"""
704 704 for n in pushop.outgoing.missing:
705 705 ctx = pushop.repo[n]
706 706
707 707 if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files():
708 708 for subpath in sorted(ctx.substate):
709 709 sub = ctx.sub(subpath)
710 710 sub.verify(onpush=True)
711 711
712 712
713 713 @pushdiscovery(b'changeset')
714 714 def _pushdiscoverychangeset(pushop):
715 715 """discover the changeset that need to be pushed"""
716 716 fci = discovery.findcommonincoming
717 717 if pushop.revs:
718 718 commoninc = fci(
719 719 pushop.repo,
720 720 pushop.remote,
721 721 force=pushop.force,
722 722 ancestorsof=pushop.revs,
723 723 )
724 724 else:
725 725 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
726 726 common, inc, remoteheads = commoninc
727 727 fco = discovery.findcommonoutgoing
728 728 outgoing = fco(
729 729 pushop.repo,
730 730 pushop.remote,
731 731 onlyheads=pushop.revs,
732 732 commoninc=commoninc,
733 733 force=pushop.force,
734 734 )
735 735 pushop.outgoing = outgoing
736 736 pushop.remoteheads = remoteheads
737 737 pushop.incoming = inc
738 738
739 739
740 740 @pushdiscovery(b'phase')
741 741 def _pushdiscoveryphase(pushop):
742 742 """discover the phase that needs to be pushed
743 743
744 744 (computed for both success and failure case for changesets push)"""
745 745 outgoing = pushop.outgoing
746 746 unfi = pushop.repo.unfiltered()
747 747 remotephases = listkeys(pushop.remote, b'phases')
748 748
749 749 if (
750 750 pushop.ui.configbool(b'ui', b'_usedassubrepo')
751 751 and remotephases # server supports phases
752 752 and not pushop.outgoing.missing # no changesets to be pushed
753 753 and remotephases.get(b'publishing', False)
754 754 ):
755 755 # When:
756 756 # - this is a subrepo push
757 757 # - and remote support phase
758 758 # - and no changeset are to be pushed
759 759 # - and remote is publishing
760 760 # We may be in issue 3781 case!
761 761 # We drop the possible phase synchronisation done by
762 762 # courtesy to publish changesets possibly locally draft
763 763 # on the remote.
764 764 pushop.outdatedphases = []
765 765 pushop.fallbackoutdatedphases = []
766 766 return
767 767
768 768 pushop.remotephases = phases.remotephasessummary(
769 769 pushop.repo, pushop.fallbackheads, remotephases
770 770 )
771 771 droots = pushop.remotephases.draftroots
772 772
773 773 extracond = b''
774 774 if not pushop.remotephases.publishing:
775 775 extracond = b' and public()'
776 776 revset = b'heads((%%ln::%%ln) %s)' % extracond
777 777 # Get the list of all revs draft on remote by public here.
778 778 # XXX Beware that revset break if droots is not strictly
779 779 # XXX root we may want to ensure it is but it is costly
780 780 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
781 781 if not pushop.remotephases.publishing and pushop.publish:
782 782 future = list(
783 783 unfi.set(
784 784 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
785 785 )
786 786 )
787 787 elif not outgoing.missing:
788 788 future = fallback
789 789 else:
790 790 # adds changeset we are going to push as draft
791 791 #
792 792 # should not be necessary for publishing server, but because of an
793 793 # issue fixed in xxxxx we have to do it anyway.
794 794 fdroots = list(
795 795 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
796 796 )
797 797 fdroots = [f.node() for f in fdroots]
798 798 future = list(unfi.set(revset, fdroots, pushop.futureheads))
799 799 pushop.outdatedphases = future
800 800 pushop.fallbackoutdatedphases = fallback
801 801
802 802
803 803 @pushdiscovery(b'obsmarker')
804 804 def _pushdiscoveryobsmarkers(pushop):
805 805 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
806 806 return
807 807
808 808 if not pushop.repo.obsstore:
809 809 return
810 810
811 811 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
812 812 return
813 813
814 814 repo = pushop.repo
815 815 # very naive computation, that can be quite expensive on big repo.
816 816 # However: evolution is currently slow on them anyway.
817 817 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
818 818 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
819 819
820 820
821 821 @pushdiscovery(b'bookmarks')
822 822 def _pushdiscoverybookmarks(pushop):
823 823 ui = pushop.ui
824 824 repo = pushop.repo.unfiltered()
825 825 remote = pushop.remote
826 826 ui.debug(b"checking for updated bookmarks\n")
827 827 ancestors = ()
828 828 if pushop.revs:
829 829 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
830 830 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
831 831
832 832 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
833 833
834 834 explicit = {
835 835 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
836 836 }
837 837
838 838 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
839 839 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
840 840
841 841
842 842 def _processcompared(pushop, pushed, explicit, remotebms, comp):
843 843 """take decision on bookmarks to push to the remote repo
844 844
845 845 Exists to help extensions alter this behavior.
846 846 """
847 847 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
848 848
849 849 repo = pushop.repo
850 850
851 851 for b, scid, dcid in advsrc:
852 852 if b in explicit:
853 853 explicit.remove(b)
854 854 if not pushed or repo[scid].rev() in pushed:
855 855 pushop.outbookmarks.append((b, dcid, scid))
856 856 # search added bookmark
857 857 for b, scid, dcid in addsrc:
858 858 if b in explicit:
859 859 explicit.remove(b)
860 860 if bookmod.isdivergent(b):
861 861 pushop.ui.warn(_(b'cannot push divergent bookmark %s!\n') % b)
862 862 pushop.bkresult = 2
863 863 else:
864 864 pushop.outbookmarks.append((b, b'', scid))
865 865 # search for overwritten bookmark
866 866 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
867 867 if b in explicit:
868 868 explicit.remove(b)
869 869 pushop.outbookmarks.append((b, dcid, scid))
870 870 # search for bookmark to delete
871 871 for b, scid, dcid in adddst:
872 872 if b in explicit:
873 873 explicit.remove(b)
874 874 # treat as "deleted locally"
875 875 pushop.outbookmarks.append((b, dcid, b''))
876 876 # identical bookmarks shouldn't get reported
877 877 for b, scid, dcid in same:
878 878 if b in explicit:
879 879 explicit.remove(b)
880 880
881 881 if explicit:
882 882 explicit = sorted(explicit)
883 883 # we should probably list all of them
884 884 pushop.ui.warn(
885 885 _(
886 886 b'bookmark %s does not exist on the local '
887 887 b'or remote repository!\n'
888 888 )
889 889 % explicit[0]
890 890 )
891 891 pushop.bkresult = 2
892 892
893 893 pushop.outbookmarks.sort()
894 894
895 895
896 896 def _pushcheckoutgoing(pushop):
897 897 outgoing = pushop.outgoing
898 898 unfi = pushop.repo.unfiltered()
899 899 if not outgoing.missing:
900 900 # nothing to push
901 901 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
902 902 return False
903 903 # something to push
904 904 if not pushop.force:
905 905 # if repo.obsstore == False --> no obsolete
906 906 # then, save the iteration
907 907 if unfi.obsstore:
908 908 # this message are here for 80 char limit reason
909 909 mso = _(b"push includes obsolete changeset: %s!")
910 910 mspd = _(b"push includes phase-divergent changeset: %s!")
911 911 mscd = _(b"push includes content-divergent changeset: %s!")
912 912 mst = {
913 913 b"orphan": _(b"push includes orphan changeset: %s!"),
914 914 b"phase-divergent": mspd,
915 915 b"content-divergent": mscd,
916 916 }
917 917 # If we are to push if there is at least one
918 918 # obsolete or unstable changeset in missing, at
919 919 # least one of the missinghead will be obsolete or
920 920 # unstable. So checking heads only is ok
921 921 for node in outgoing.missingheads:
922 922 ctx = unfi[node]
923 923 if ctx.obsolete():
924 924 raise error.Abort(mso % ctx)
925 925 elif ctx.isunstable():
926 926 # TODO print more than one instability in the abort
927 927 # message
928 928 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
929 929
930 930 discovery.checkheads(pushop)
931 931 return True
932 932
933 933
934 934 # List of names of steps to perform for an outgoing bundle2, order matters.
935 935 b2partsgenorder = []
936 936
937 937 # Mapping between step name and function
938 938 #
939 939 # This exists to help extensions wrap steps if necessary
940 940 b2partsgenmapping = {}
941 941
942 942
943 943 def b2partsgenerator(stepname, idx=None):
944 944 """decorator for function generating bundle2 part
945 945
946 946 The function is added to the step -> function mapping and appended to the
947 947 list of steps. Beware that decorated functions will be added in order
948 948 (this may matter).
949 949
950 950 You can only use this decorator for new steps, if you want to wrap a step
951 951 from an extension, attack the b2partsgenmapping dictionary directly."""
952 952
953 953 def dec(func):
954 954 assert stepname not in b2partsgenmapping
955 955 b2partsgenmapping[stepname] = func
956 956 if idx is None:
957 957 b2partsgenorder.append(stepname)
958 958 else:
959 959 b2partsgenorder.insert(idx, stepname)
960 960 return func
961 961
962 962 return dec
963 963
964 964
965 965 def _pushb2ctxcheckheads(pushop, bundler):
966 966 """Generate race condition checking parts
967 967
968 968 Exists as an independent function to aid extensions
969 969 """
970 970 # * 'force' do not check for push race,
971 971 # * if we don't push anything, there are nothing to check.
972 972 if not pushop.force and pushop.outgoing.missingheads:
973 973 allowunrelated = b'related' in bundler.capabilities.get(
974 974 b'checkheads', ()
975 975 )
976 976 emptyremote = pushop.pushbranchmap is None
977 977 if not allowunrelated or emptyremote:
978 978 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
979 979 else:
980 980 affected = set()
981 981 for branch, heads in pycompat.iteritems(pushop.pushbranchmap):
982 982 remoteheads, newheads, unsyncedheads, discardedheads = heads
983 983 if remoteheads is not None:
984 984 remote = set(remoteheads)
985 985 affected |= set(discardedheads) & remote
986 986 affected |= remote - set(newheads)
987 987 if affected:
988 988 data = iter(sorted(affected))
989 989 bundler.newpart(b'check:updated-heads', data=data)
990 990
991 991
992 992 def _pushing(pushop):
993 993 """return True if we are pushing anything"""
994 994 return bool(
995 995 pushop.outgoing.missing
996 996 or pushop.outdatedphases
997 997 or pushop.outobsmarkers
998 998 or pushop.outbookmarks
999 999 )
1000 1000
1001 1001
1002 1002 @b2partsgenerator(b'check-bookmarks')
1003 1003 def _pushb2checkbookmarks(pushop, bundler):
1004 1004 """insert bookmark move checking"""
1005 1005 if not _pushing(pushop) or pushop.force:
1006 1006 return
1007 1007 b2caps = bundle2.bundle2caps(pushop.remote)
1008 1008 hasbookmarkcheck = b'bookmarks' in b2caps
1009 1009 if not (pushop.outbookmarks and hasbookmarkcheck):
1010 1010 return
1011 1011 data = []
1012 1012 for book, old, new in pushop.outbookmarks:
1013 1013 data.append((book, old))
1014 1014 checkdata = bookmod.binaryencode(data)
1015 1015 bundler.newpart(b'check:bookmarks', data=checkdata)
1016 1016
1017 1017
1018 1018 @b2partsgenerator(b'check-phases')
1019 1019 def _pushb2checkphases(pushop, bundler):
1020 1020 """insert phase move checking"""
1021 1021 if not _pushing(pushop) or pushop.force:
1022 1022 return
1023 1023 b2caps = bundle2.bundle2caps(pushop.remote)
1024 1024 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1025 1025 if pushop.remotephases is not None and hasphaseheads:
1026 1026 # check that the remote phase has not changed
1027 1027 checks = [[] for p in phases.allphases]
1028 1028 checks[phases.public].extend(pushop.remotephases.publicheads)
1029 1029 checks[phases.draft].extend(pushop.remotephases.draftroots)
1030 1030 if any(checks):
1031 1031 for nodes in checks:
1032 1032 nodes.sort()
1033 1033 checkdata = phases.binaryencode(checks)
1034 1034 bundler.newpart(b'check:phases', data=checkdata)
1035 1035
1036 1036
1037 1037 @b2partsgenerator(b'changeset')
1038 1038 def _pushb2ctx(pushop, bundler):
1039 1039 """handle changegroup push through bundle2
1040 1040
1041 1041 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
1042 1042 """
1043 1043 if b'changesets' in pushop.stepsdone:
1044 1044 return
1045 1045 pushop.stepsdone.add(b'changesets')
1046 1046 # Send known heads to the server for race detection.
1047 1047 if not _pushcheckoutgoing(pushop):
1048 1048 return
1049 1049 pushop.repo.prepushoutgoinghooks(pushop)
1050 1050
1051 1051 _pushb2ctxcheckheads(pushop, bundler)
1052 1052
1053 1053 b2caps = bundle2.bundle2caps(pushop.remote)
1054 1054 version = b'01'
1055 1055 cgversions = b2caps.get(b'changegroup')
1056 1056 if cgversions: # 3.1 and 3.2 ship with an empty value
1057 1057 cgversions = [
1058 1058 v
1059 1059 for v in cgversions
1060 1060 if v in changegroup.supportedoutgoingversions(pushop.repo)
1061 1061 ]
1062 1062 if not cgversions:
1063 1063 raise error.Abort(_(b'no common changegroup version'))
1064 1064 version = max(cgversions)
1065 1065 cgstream = changegroup.makestream(
1066 1066 pushop.repo, pushop.outgoing, version, b'push'
1067 1067 )
1068 1068 cgpart = bundler.newpart(b'changegroup', data=cgstream)
1069 1069 if cgversions:
1070 1070 cgpart.addparam(b'version', version)
1071 1071 if b'treemanifest' in pushop.repo.requirements:
1072 1072 cgpart.addparam(b'treemanifest', b'1')
1073 1073 if b'exp-sidedata-flag' in pushop.repo.requirements:
1074 1074 cgpart.addparam(b'exp-sidedata', b'1')
1075 1075
1076 1076 def handlereply(op):
1077 1077 """extract addchangegroup returns from server reply"""
1078 1078 cgreplies = op.records.getreplies(cgpart.id)
1079 1079 assert len(cgreplies[b'changegroup']) == 1
1080 1080 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
1081 1081
1082 1082 return handlereply
1083 1083
1084 1084
1085 1085 @b2partsgenerator(b'phase')
1086 1086 def _pushb2phases(pushop, bundler):
1087 1087 """handle phase push through bundle2"""
1088 1088 if b'phases' in pushop.stepsdone:
1089 1089 return
1090 1090 b2caps = bundle2.bundle2caps(pushop.remote)
1091 1091 ui = pushop.repo.ui
1092 1092
1093 1093 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1094 1094 haspushkey = b'pushkey' in b2caps
1095 1095 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1096 1096
1097 1097 if hasphaseheads and not legacyphase:
1098 1098 return _pushb2phaseheads(pushop, bundler)
1099 1099 elif haspushkey:
1100 1100 return _pushb2phasespushkey(pushop, bundler)
1101 1101
1102 1102
1103 1103 def _pushb2phaseheads(pushop, bundler):
1104 1104 """push phase information through a bundle2 - binary part"""
1105 1105 pushop.stepsdone.add(b'phases')
1106 1106 if pushop.outdatedphases:
1107 1107 updates = [[] for p in phases.allphases]
1108 1108 updates[0].extend(h.node() for h in pushop.outdatedphases)
1109 1109 phasedata = phases.binaryencode(updates)
1110 1110 bundler.newpart(b'phase-heads', data=phasedata)
1111 1111
1112 1112
1113 1113 def _pushb2phasespushkey(pushop, bundler):
1114 1114 """push phase information through a bundle2 - pushkey part"""
1115 1115 pushop.stepsdone.add(b'phases')
1116 1116 part2node = []
1117 1117
1118 1118 def handlefailure(pushop, exc):
1119 1119 targetid = int(exc.partid)
1120 1120 for partid, node in part2node:
1121 1121 if partid == targetid:
1122 1122 raise error.Abort(_(b'updating %s to public failed') % node)
1123 1123
1124 1124 enc = pushkey.encode
1125 1125 for newremotehead in pushop.outdatedphases:
1126 1126 part = bundler.newpart(b'pushkey')
1127 1127 part.addparam(b'namespace', enc(b'phases'))
1128 1128 part.addparam(b'key', enc(newremotehead.hex()))
1129 1129 part.addparam(b'old', enc(b'%d' % phases.draft))
1130 1130 part.addparam(b'new', enc(b'%d' % phases.public))
1131 1131 part2node.append((part.id, newremotehead))
1132 1132 pushop.pkfailcb[part.id] = handlefailure
1133 1133
1134 1134 def handlereply(op):
1135 1135 for partid, node in part2node:
1136 1136 partrep = op.records.getreplies(partid)
1137 1137 results = partrep[b'pushkey']
1138 1138 assert len(results) <= 1
1139 1139 msg = None
1140 1140 if not results:
1141 1141 msg = _(b'server ignored update of %s to public!\n') % node
1142 1142 elif not int(results[0][b'return']):
1143 1143 msg = _(b'updating %s to public failed!\n') % node
1144 1144 if msg is not None:
1145 1145 pushop.ui.warn(msg)
1146 1146
1147 1147 return handlereply
1148 1148
1149 1149
1150 1150 @b2partsgenerator(b'obsmarkers')
1151 1151 def _pushb2obsmarkers(pushop, bundler):
1152 1152 if b'obsmarkers' in pushop.stepsdone:
1153 1153 return
1154 1154 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1155 1155 if obsolete.commonversion(remoteversions) is None:
1156 1156 return
1157 1157 pushop.stepsdone.add(b'obsmarkers')
1158 1158 if pushop.outobsmarkers:
1159 1159 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1160 1160 bundle2.buildobsmarkerspart(bundler, markers)
1161 1161
1162 1162
1163 1163 @b2partsgenerator(b'bookmarks')
1164 1164 def _pushb2bookmarks(pushop, bundler):
1165 1165 """handle bookmark push through bundle2"""
1166 1166 if b'bookmarks' in pushop.stepsdone:
1167 1167 return
1168 1168 b2caps = bundle2.bundle2caps(pushop.remote)
1169 1169
1170 1170 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1171 1171 legacybooks = b'bookmarks' in legacy
1172 1172
1173 1173 if not legacybooks and b'bookmarks' in b2caps:
1174 1174 return _pushb2bookmarkspart(pushop, bundler)
1175 1175 elif b'pushkey' in b2caps:
1176 1176 return _pushb2bookmarkspushkey(pushop, bundler)
1177 1177
1178 1178
1179 1179 def _bmaction(old, new):
1180 1180 """small utility for bookmark pushing"""
1181 1181 if not old:
1182 1182 return b'export'
1183 1183 elif not new:
1184 1184 return b'delete'
1185 1185 return b'update'
1186 1186
1187 1187
1188 1188 def _abortonsecretctx(pushop, node, b):
1189 1189 """abort if a given bookmark points to a secret changeset"""
1190 1190 if node and pushop.repo[node].phase() == phases.secret:
1191 1191 raise error.Abort(
1192 1192 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1193 1193 )
1194 1194
1195 1195
1196 1196 def _pushb2bookmarkspart(pushop, bundler):
1197 1197 pushop.stepsdone.add(b'bookmarks')
1198 1198 if not pushop.outbookmarks:
1199 1199 return
1200 1200
1201 1201 allactions = []
1202 1202 data = []
1203 1203 for book, old, new in pushop.outbookmarks:
1204 1204 _abortonsecretctx(pushop, new, book)
1205 1205 data.append((book, new))
1206 1206 allactions.append((book, _bmaction(old, new)))
1207 1207 checkdata = bookmod.binaryencode(data)
1208 1208 bundler.newpart(b'bookmarks', data=checkdata)
1209 1209
1210 1210 def handlereply(op):
1211 1211 ui = pushop.ui
1212 1212 # if success
1213 1213 for book, action in allactions:
1214 1214 ui.status(bookmsgmap[action][0] % book)
1215 1215
1216 1216 return handlereply
1217 1217
1218 1218
1219 1219 def _pushb2bookmarkspushkey(pushop, bundler):
1220 1220 pushop.stepsdone.add(b'bookmarks')
1221 1221 part2book = []
1222 1222 enc = pushkey.encode
1223 1223
1224 1224 def handlefailure(pushop, exc):
1225 1225 targetid = int(exc.partid)
1226 1226 for partid, book, action in part2book:
1227 1227 if partid == targetid:
1228 1228 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1229 1229 # we should not be called for part we did not generated
1230 1230 assert False
1231 1231
1232 1232 for book, old, new in pushop.outbookmarks:
1233 1233 _abortonsecretctx(pushop, new, book)
1234 1234 part = bundler.newpart(b'pushkey')
1235 1235 part.addparam(b'namespace', enc(b'bookmarks'))
1236 1236 part.addparam(b'key', enc(book))
1237 1237 part.addparam(b'old', enc(hex(old)))
1238 1238 part.addparam(b'new', enc(hex(new)))
1239 1239 action = b'update'
1240 1240 if not old:
1241 1241 action = b'export'
1242 1242 elif not new:
1243 1243 action = b'delete'
1244 1244 part2book.append((part.id, book, action))
1245 1245 pushop.pkfailcb[part.id] = handlefailure
1246 1246
1247 1247 def handlereply(op):
1248 1248 ui = pushop.ui
1249 1249 for partid, book, action in part2book:
1250 1250 partrep = op.records.getreplies(partid)
1251 1251 results = partrep[b'pushkey']
1252 1252 assert len(results) <= 1
1253 1253 if not results:
1254 1254 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1255 1255 else:
1256 1256 ret = int(results[0][b'return'])
1257 1257 if ret:
1258 1258 ui.status(bookmsgmap[action][0] % book)
1259 1259 else:
1260 1260 ui.warn(bookmsgmap[action][1] % book)
1261 1261 if pushop.bkresult is not None:
1262 1262 pushop.bkresult = 1
1263 1263
1264 1264 return handlereply
1265 1265
1266 1266
1267 1267 @b2partsgenerator(b'pushvars', idx=0)
1268 1268 def _getbundlesendvars(pushop, bundler):
1269 1269 '''send shellvars via bundle2'''
1270 1270 pushvars = pushop.pushvars
1271 1271 if pushvars:
1272 1272 shellvars = {}
1273 1273 for raw in pushvars:
1274 1274 if b'=' not in raw:
1275 1275 msg = (
1276 1276 b"unable to parse variable '%s', should follow "
1277 1277 b"'KEY=VALUE' or 'KEY=' format"
1278 1278 )
1279 1279 raise error.Abort(msg % raw)
1280 1280 k, v = raw.split(b'=', 1)
1281 1281 shellvars[k] = v
1282 1282
1283 1283 part = bundler.newpart(b'pushvars')
1284 1284
1285 1285 for key, value in pycompat.iteritems(shellvars):
1286 1286 part.addparam(key, value, mandatory=False)
1287 1287
1288 1288
1289 1289 def _pushbundle2(pushop):
1290 1290 """push data to the remote using bundle2
1291 1291
1292 1292 The only currently supported type of data is changegroup but this will
1293 1293 evolve in the future."""
1294 1294 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1295 1295 pushback = pushop.trmanager and pushop.ui.configbool(
1296 1296 b'experimental', b'bundle2.pushback'
1297 1297 )
1298 1298
1299 1299 # create reply capability
1300 1300 capsblob = bundle2.encodecaps(
1301 1301 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1302 1302 )
1303 1303 bundler.newpart(b'replycaps', data=capsblob)
1304 1304 replyhandlers = []
1305 1305 for partgenname in b2partsgenorder:
1306 1306 partgen = b2partsgenmapping[partgenname]
1307 1307 ret = partgen(pushop, bundler)
1308 1308 if callable(ret):
1309 1309 replyhandlers.append(ret)
1310 1310 # do not push if nothing to push
1311 1311 if bundler.nbparts <= 1:
1312 1312 return
1313 1313 stream = util.chunkbuffer(bundler.getchunks())
1314 1314 try:
1315 1315 try:
1316 1316 with pushop.remote.commandexecutor() as e:
1317 1317 reply = e.callcommand(
1318 1318 b'unbundle',
1319 1319 {
1320 1320 b'bundle': stream,
1321 1321 b'heads': [b'force'],
1322 1322 b'url': pushop.remote.url(),
1323 1323 },
1324 1324 ).result()
1325 1325 except error.BundleValueError as exc:
1326 1326 raise error.Abort(_(b'missing support for %s') % exc)
1327 1327 try:
1328 1328 trgetter = None
1329 1329 if pushback:
1330 1330 trgetter = pushop.trmanager.transaction
1331 1331 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1332 1332 except error.BundleValueError as exc:
1333 1333 raise error.Abort(_(b'missing support for %s') % exc)
1334 1334 except bundle2.AbortFromPart as exc:
1335 1335 pushop.ui.status(_(b'remote: %s\n') % exc)
1336 1336 if exc.hint is not None:
1337 1337 pushop.ui.status(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1338 1338 raise error.Abort(_(b'push failed on remote'))
1339 1339 except error.PushkeyFailed as exc:
1340 1340 partid = int(exc.partid)
1341 1341 if partid not in pushop.pkfailcb:
1342 1342 raise
1343 1343 pushop.pkfailcb[partid](pushop, exc)
1344 1344 for rephand in replyhandlers:
1345 1345 rephand(op)
1346 1346
1347 1347
1348 1348 def _pushchangeset(pushop):
1349 1349 """Make the actual push of changeset bundle to remote repo"""
1350 1350 if b'changesets' in pushop.stepsdone:
1351 1351 return
1352 1352 pushop.stepsdone.add(b'changesets')
1353 1353 if not _pushcheckoutgoing(pushop):
1354 1354 return
1355 1355
1356 1356 # Should have verified this in push().
1357 1357 assert pushop.remote.capable(b'unbundle')
1358 1358
1359 1359 pushop.repo.prepushoutgoinghooks(pushop)
1360 1360 outgoing = pushop.outgoing
1361 1361 # TODO: get bundlecaps from remote
1362 1362 bundlecaps = None
1363 1363 # create a changegroup from local
1364 1364 if pushop.revs is None and not (
1365 1365 outgoing.excluded or pushop.repo.changelog.filteredrevs
1366 1366 ):
1367 1367 # push everything,
1368 1368 # use the fast path, no race possible on push
1369 1369 cg = changegroup.makechangegroup(
1370 1370 pushop.repo,
1371 1371 outgoing,
1372 1372 b'01',
1373 1373 b'push',
1374 1374 fastpath=True,
1375 1375 bundlecaps=bundlecaps,
1376 1376 )
1377 1377 else:
1378 1378 cg = changegroup.makechangegroup(
1379 1379 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1380 1380 )
1381 1381
1382 1382 # apply changegroup to remote
1383 1383 # local repo finds heads on server, finds out what
1384 1384 # revs it must push. once revs transferred, if server
1385 1385 # finds it has different heads (someone else won
1386 1386 # commit/push race), server aborts.
1387 1387 if pushop.force:
1388 1388 remoteheads = [b'force']
1389 1389 else:
1390 1390 remoteheads = pushop.remoteheads
1391 1391 # ssh: return remote's addchangegroup()
1392 1392 # http: return remote's addchangegroup() or 0 for error
1393 1393 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1394 1394
1395 1395
1396 1396 def _pushsyncphase(pushop):
1397 1397 """synchronise phase information locally and remotely"""
1398 1398 cheads = pushop.commonheads
1399 1399 # even when we don't push, exchanging phase data is useful
1400 1400 remotephases = listkeys(pushop.remote, b'phases')
1401 1401 if (
1402 1402 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1403 1403 and remotephases # server supports phases
1404 1404 and pushop.cgresult is None # nothing was pushed
1405 1405 and remotephases.get(b'publishing', False)
1406 1406 ):
1407 1407 # When:
1408 1408 # - this is a subrepo push
1409 1409 # - and remote support phase
1410 1410 # - and no changeset was pushed
1411 1411 # - and remote is publishing
1412 1412 # We may be in issue 3871 case!
1413 1413 # We drop the possible phase synchronisation done by
1414 1414 # courtesy to publish changesets possibly locally draft
1415 1415 # on the remote.
1416 1416 remotephases = {b'publishing': b'True'}
1417 1417 if not remotephases: # old server or public only reply from non-publishing
1418 1418 _localphasemove(pushop, cheads)
1419 1419 # don't push any phase data as there is nothing to push
1420 1420 else:
1421 1421 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1422 1422 pheads, droots = ana
1423 1423 ### Apply remote phase on local
1424 1424 if remotephases.get(b'publishing', False):
1425 1425 _localphasemove(pushop, cheads)
1426 1426 else: # publish = False
1427 1427 _localphasemove(pushop, pheads)
1428 1428 _localphasemove(pushop, cheads, phases.draft)
1429 1429 ### Apply local phase on remote
1430 1430
1431 1431 if pushop.cgresult:
1432 1432 if b'phases' in pushop.stepsdone:
1433 1433 # phases already pushed though bundle2
1434 1434 return
1435 1435 outdated = pushop.outdatedphases
1436 1436 else:
1437 1437 outdated = pushop.fallbackoutdatedphases
1438 1438
1439 1439 pushop.stepsdone.add(b'phases')
1440 1440
1441 1441 # filter heads already turned public by the push
1442 1442 outdated = [c for c in outdated if c.node() not in pheads]
1443 1443 # fallback to independent pushkey command
1444 1444 for newremotehead in outdated:
1445 1445 with pushop.remote.commandexecutor() as e:
1446 1446 r = e.callcommand(
1447 1447 b'pushkey',
1448 1448 {
1449 1449 b'namespace': b'phases',
1450 1450 b'key': newremotehead.hex(),
1451 1451 b'old': b'%d' % phases.draft,
1452 1452 b'new': b'%d' % phases.public,
1453 1453 },
1454 1454 ).result()
1455 1455
1456 1456 if not r:
1457 1457 pushop.ui.warn(
1458 1458 _(b'updating %s to public failed!\n') % newremotehead
1459 1459 )
1460 1460
1461 1461
1462 1462 def _localphasemove(pushop, nodes, phase=phases.public):
1463 1463 """move <nodes> to <phase> in the local source repo"""
1464 1464 if pushop.trmanager:
1465 1465 phases.advanceboundary(
1466 1466 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1467 1467 )
1468 1468 else:
1469 1469 # repo is not locked, do not change any phases!
1470 1470 # Informs the user that phases should have been moved when
1471 1471 # applicable.
1472 1472 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1473 1473 phasestr = phases.phasenames[phase]
1474 1474 if actualmoves:
1475 1475 pushop.ui.status(
1476 1476 _(
1477 1477 b'cannot lock source repo, skipping '
1478 1478 b'local %s phase update\n'
1479 1479 )
1480 1480 % phasestr
1481 1481 )
1482 1482
1483 1483
1484 1484 def _pushobsolete(pushop):
1485 1485 """utility function to push obsolete markers to a remote"""
1486 1486 if b'obsmarkers' in pushop.stepsdone:
1487 1487 return
1488 1488 repo = pushop.repo
1489 1489 remote = pushop.remote
1490 1490 pushop.stepsdone.add(b'obsmarkers')
1491 1491 if pushop.outobsmarkers:
1492 1492 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1493 1493 rslts = []
1494 1494 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1495 1495 remotedata = obsolete._pushkeyescape(markers)
1496 1496 for key in sorted(remotedata, reverse=True):
1497 1497 # reverse sort to ensure we end with dump0
1498 1498 data = remotedata[key]
1499 1499 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1500 1500 if [r for r in rslts if not r]:
1501 1501 msg = _(b'failed to push some obsolete markers!\n')
1502 1502 repo.ui.warn(msg)
1503 1503
1504 1504
1505 1505 def _pushbookmark(pushop):
1506 1506 """Update bookmark position on remote"""
1507 1507 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1508 1508 return
1509 1509 pushop.stepsdone.add(b'bookmarks')
1510 1510 ui = pushop.ui
1511 1511 remote = pushop.remote
1512 1512
1513 1513 for b, old, new in pushop.outbookmarks:
1514 1514 action = b'update'
1515 1515 if not old:
1516 1516 action = b'export'
1517 1517 elif not new:
1518 1518 action = b'delete'
1519 1519
1520 1520 with remote.commandexecutor() as e:
1521 1521 r = e.callcommand(
1522 1522 b'pushkey',
1523 1523 {
1524 1524 b'namespace': b'bookmarks',
1525 1525 b'key': b,
1526 1526 b'old': hex(old),
1527 1527 b'new': hex(new),
1528 1528 },
1529 1529 ).result()
1530 1530
1531 1531 if r:
1532 1532 ui.status(bookmsgmap[action][0] % b)
1533 1533 else:
1534 1534 ui.warn(bookmsgmap[action][1] % b)
1535 1535 # discovery can have set the value form invalid entry
1536 1536 if pushop.bkresult is not None:
1537 1537 pushop.bkresult = 1
1538 1538
1539 1539
1540 1540 class pulloperation(object):
1541 1541 """A object that represent a single pull operation
1542 1542
1543 1543 It purpose is to carry pull related state and very common operation.
1544 1544
1545 1545 A new should be created at the beginning of each pull and discarded
1546 1546 afterward.
1547 1547 """
1548 1548
1549 1549 def __init__(
1550 1550 self,
1551 1551 repo,
1552 1552 remote,
1553 1553 heads=None,
1554 1554 force=False,
1555 1555 bookmarks=(),
1556 1556 remotebookmarks=None,
1557 1557 streamclonerequested=None,
1558 1558 includepats=None,
1559 1559 excludepats=None,
1560 1560 depth=None,
1561 1561 ):
1562 1562 # repo we pull into
1563 1563 self.repo = repo
1564 1564 # repo we pull from
1565 1565 self.remote = remote
1566 1566 # revision we try to pull (None is "all")
1567 1567 self.heads = heads
1568 1568 # bookmark pulled explicitly
1569 1569 self.explicitbookmarks = [
1570 1570 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1571 1571 ]
1572 1572 # do we force pull?
1573 1573 self.force = force
1574 1574 # whether a streaming clone was requested
1575 1575 self.streamclonerequested = streamclonerequested
1576 1576 # transaction manager
1577 1577 self.trmanager = None
1578 1578 # set of common changeset between local and remote before pull
1579 1579 self.common = None
1580 1580 # set of pulled head
1581 1581 self.rheads = None
1582 1582 # list of missing changeset to fetch remotely
1583 1583 self.fetch = None
1584 1584 # remote bookmarks data
1585 1585 self.remotebookmarks = remotebookmarks
1586 1586 # result of changegroup pulling (used as return code by pull)
1587 1587 self.cgresult = None
1588 1588 # list of step already done
1589 1589 self.stepsdone = set()
1590 1590 # Whether we attempted a clone from pre-generated bundles.
1591 1591 self.clonebundleattempted = False
1592 1592 # Set of file patterns to include.
1593 1593 self.includepats = includepats
1594 1594 # Set of file patterns to exclude.
1595 1595 self.excludepats = excludepats
1596 1596 # Number of ancestor changesets to pull from each pulled head.
1597 1597 self.depth = depth
1598 1598
1599 1599 @util.propertycache
1600 1600 def pulledsubset(self):
1601 1601 """heads of the set of changeset target by the pull"""
1602 1602 # compute target subset
1603 1603 if self.heads is None:
1604 1604 # We pulled every thing possible
1605 1605 # sync on everything common
1606 1606 c = set(self.common)
1607 1607 ret = list(self.common)
1608 1608 for n in self.rheads:
1609 1609 if n not in c:
1610 1610 ret.append(n)
1611 1611 return ret
1612 1612 else:
1613 1613 # We pulled a specific subset
1614 1614 # sync on this subset
1615 1615 return self.heads
1616 1616
1617 1617 @util.propertycache
1618 1618 def canusebundle2(self):
1619 1619 return not _forcebundle1(self)
1620 1620
1621 1621 @util.propertycache
1622 1622 def remotebundle2caps(self):
1623 1623 return bundle2.bundle2caps(self.remote)
1624 1624
1625 1625 def gettransaction(self):
1626 1626 # deprecated; talk to trmanager directly
1627 1627 return self.trmanager.transaction()
1628 1628
1629 1629
1630 1630 class transactionmanager(util.transactional):
1631 1631 """An object to manage the life cycle of a transaction
1632 1632
1633 1633 It creates the transaction on demand and calls the appropriate hooks when
1634 1634 closing the transaction."""
1635 1635
1636 1636 def __init__(self, repo, source, url):
1637 1637 self.repo = repo
1638 1638 self.source = source
1639 1639 self.url = url
1640 1640 self._tr = None
1641 1641
1642 1642 def transaction(self):
1643 1643 """Return an open transaction object, constructing if necessary"""
1644 1644 if not self._tr:
1645 1645 trname = b'%s\n%s' % (self.source, util.hidepassword(self.url))
1646 1646 self._tr = self.repo.transaction(trname)
1647 1647 self._tr.hookargs[b'source'] = self.source
1648 1648 self._tr.hookargs[b'url'] = self.url
1649 1649 return self._tr
1650 1650
1651 1651 def close(self):
1652 1652 """close transaction if created"""
1653 1653 if self._tr is not None:
1654 1654 self._tr.close()
1655 1655
1656 1656 def release(self):
1657 1657 """release transaction if created"""
1658 1658 if self._tr is not None:
1659 1659 self._tr.release()
1660 1660
1661 1661
1662 1662 def listkeys(remote, namespace):
1663 1663 with remote.commandexecutor() as e:
1664 1664 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1665 1665
1666 1666
1667 1667 def _fullpullbundle2(repo, pullop):
1668 1668 # The server may send a partial reply, i.e. when inlining
1669 1669 # pre-computed bundles. In that case, update the common
1670 1670 # set based on the results and pull another bundle.
1671 1671 #
1672 1672 # There are two indicators that the process is finished:
1673 1673 # - no changeset has been added, or
1674 1674 # - all remote heads are known locally.
1675 1675 # The head check must use the unfiltered view as obsoletion
1676 1676 # markers can hide heads.
1677 1677 unfi = repo.unfiltered()
1678 1678 unficl = unfi.changelog
1679 1679
1680 1680 def headsofdiff(h1, h2):
1681 1681 """Returns heads(h1 % h2)"""
1682 1682 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1683 1683 return {ctx.node() for ctx in res}
1684 1684
1685 1685 def headsofunion(h1, h2):
1686 1686 """Returns heads((h1 + h2) - null)"""
1687 1687 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1688 1688 return {ctx.node() for ctx in res}
1689 1689
1690 1690 while True:
1691 1691 old_heads = unficl.heads()
1692 1692 clstart = len(unficl)
1693 1693 _pullbundle2(pullop)
1694 1694 if repository.NARROW_REQUIREMENT in repo.requirements:
1695 1695 # XXX narrow clones filter the heads on the server side during
1696 1696 # XXX getbundle and result in partial replies as well.
1697 1697 # XXX Disable pull bundles in this case as band aid to avoid
1698 1698 # XXX extra round trips.
1699 1699 break
1700 1700 if clstart == len(unficl):
1701 1701 break
1702 1702 if all(unficl.hasnode(n) for n in pullop.rheads):
1703 1703 break
1704 1704 new_heads = headsofdiff(unficl.heads(), old_heads)
1705 1705 pullop.common = headsofunion(new_heads, pullop.common)
1706 1706 pullop.rheads = set(pullop.rheads) - pullop.common
1707 1707
1708 1708
1709 1709 def add_confirm_callback(repo, pullop):
1710 1710 """ adds a finalize callback to transaction which can be used to show stats
1711 1711 to user and confirm the pull before committing transaction """
1712 1712
1713 1713 tr = pullop.trmanager.transaction()
1714 1714 scmutil.registersummarycallback(
1715 1715 repo, tr, txnname=b'pull', as_validator=True
1716 1716 )
1717 1717 reporef = weakref.ref(repo.unfiltered())
1718 1718
1719 1719 def prompt(tr):
1720 1720 repo = reporef()
1721 1721 cm = _(b'accept incoming changes (yn)?$$ &Yes $$ &No')
1722 1722 if repo.ui.promptchoice(cm):
1723 1723 raise error.Abort("user aborted")
1724 1724
1725 1725 tr.addvalidator(b'900-pull-prompt', prompt)
1726 1726
1727 1727
1728 1728 def pull(
1729 1729 repo,
1730 1730 remote,
1731 1731 heads=None,
1732 1732 force=False,
1733 1733 bookmarks=(),
1734 1734 opargs=None,
1735 1735 streamclonerequested=None,
1736 1736 includepats=None,
1737 1737 excludepats=None,
1738 1738 depth=None,
1739 1739 confirm=None,
1740 1740 ):
1741 1741 """Fetch repository data from a remote.
1742 1742
1743 1743 This is the main function used to retrieve data from a remote repository.
1744 1744
1745 1745 ``repo`` is the local repository to clone into.
1746 1746 ``remote`` is a peer instance.
1747 1747 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1748 1748 default) means to pull everything from the remote.
1749 1749 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1750 1750 default, all remote bookmarks are pulled.
1751 1751 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1752 1752 initialization.
1753 1753 ``streamclonerequested`` is a boolean indicating whether a "streaming
1754 1754 clone" is requested. A "streaming clone" is essentially a raw file copy
1755 1755 of revlogs from the server. This only works when the local repository is
1756 1756 empty. The default value of ``None`` means to respect the server
1757 1757 configuration for preferring stream clones.
1758 1758 ``includepats`` and ``excludepats`` define explicit file patterns to
1759 1759 include and exclude in storage, respectively. If not defined, narrow
1760 1760 patterns from the repo instance are used, if available.
1761 1761 ``depth`` is an integer indicating the DAG depth of history we're
1762 1762 interested in. If defined, for each revision specified in ``heads``, we
1763 1763 will fetch up to this many of its ancestors and data associated with them.
1764 1764 ``confirm`` is a boolean indicating whether the pull should be confirmed
1765 1765 before committing the transaction. This overrides HGPLAIN.
1766 1766
1767 1767 Returns the ``pulloperation`` created for this pull.
1768 1768 """
1769 1769 if opargs is None:
1770 1770 opargs = {}
1771 1771
1772 1772 # We allow the narrow patterns to be passed in explicitly to provide more
1773 1773 # flexibility for API consumers.
1774 1774 if includepats or excludepats:
1775 1775 includepats = includepats or set()
1776 1776 excludepats = excludepats or set()
1777 1777 else:
1778 1778 includepats, excludepats = repo.narrowpats
1779 1779
1780 1780 narrowspec.validatepatterns(includepats)
1781 1781 narrowspec.validatepatterns(excludepats)
1782 1782
1783 1783 pullop = pulloperation(
1784 1784 repo,
1785 1785 remote,
1786 1786 heads,
1787 1787 force,
1788 1788 bookmarks=bookmarks,
1789 1789 streamclonerequested=streamclonerequested,
1790 1790 includepats=includepats,
1791 1791 excludepats=excludepats,
1792 1792 depth=depth,
1793 1793 **pycompat.strkwargs(opargs)
1794 1794 )
1795 1795
1796 1796 peerlocal = pullop.remote.local()
1797 1797 if peerlocal:
1798 1798 missing = set(peerlocal.requirements) - pullop.repo.supported
1799 1799 if missing:
1800 1800 msg = _(
1801 1801 b"required features are not"
1802 1802 b" supported in the destination:"
1803 1803 b" %s"
1804 1804 ) % (b', '.join(sorted(missing)))
1805 1805 raise error.Abort(msg)
1806 1806
1807 1807 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1808 1808 wlock = util.nullcontextmanager()
1809 1809 if not bookmod.bookmarksinstore(repo):
1810 1810 wlock = repo.wlock()
1811 1811 with wlock, repo.lock(), pullop.trmanager:
1812 1812 if confirm or (
1813 1813 repo.ui.configbool(b"pull", b"confirm") and not repo.ui.plain()
1814 1814 ):
1815 1815 add_confirm_callback(repo, pullop)
1816 1816
1817 1817 # Use the modern wire protocol, if available.
1818 1818 if remote.capable(b'command-changesetdata'):
1819 1819 exchangev2.pull(pullop)
1820 1820 else:
1821 1821 # This should ideally be in _pullbundle2(). However, it needs to run
1822 1822 # before discovery to avoid extra work.
1823 1823 _maybeapplyclonebundle(pullop)
1824 1824 streamclone.maybeperformlegacystreamclone(pullop)
1825 1825 _pulldiscovery(pullop)
1826 1826 if pullop.canusebundle2:
1827 1827 _fullpullbundle2(repo, pullop)
1828 1828 _pullchangeset(pullop)
1829 1829 _pullphase(pullop)
1830 1830 _pullbookmarks(pullop)
1831 1831 _pullobsolete(pullop)
1832 1832
1833 1833 # storing remotenames
1834 1834 if repo.ui.configbool(b'experimental', b'remotenames'):
1835 1835 logexchange.pullremotenames(repo, remote)
1836 1836
1837 1837 return pullop
1838 1838
1839 1839
1840 1840 # list of steps to perform discovery before pull
1841 1841 pulldiscoveryorder = []
1842 1842
1843 1843 # Mapping between step name and function
1844 1844 #
1845 1845 # This exists to help extensions wrap steps if necessary
1846 1846 pulldiscoverymapping = {}
1847 1847
1848 1848
1849 1849 def pulldiscovery(stepname):
1850 1850 """decorator for function performing discovery before pull
1851 1851
1852 1852 The function is added to the step -> function mapping and appended to the
1853 1853 list of steps. Beware that decorated function will be added in order (this
1854 1854 may matter).
1855 1855
1856 1856 You can only use this decorator for a new step, if you want to wrap a step
1857 1857 from an extension, change the pulldiscovery dictionary directly."""
1858 1858
1859 1859 def dec(func):
1860 1860 assert stepname not in pulldiscoverymapping
1861 1861 pulldiscoverymapping[stepname] = func
1862 1862 pulldiscoveryorder.append(stepname)
1863 1863 return func
1864 1864
1865 1865 return dec
1866 1866
1867 1867
1868 1868 def _pulldiscovery(pullop):
1869 1869 """Run all discovery steps"""
1870 1870 for stepname in pulldiscoveryorder:
1871 1871 step = pulldiscoverymapping[stepname]
1872 1872 step(pullop)
1873 1873
1874 1874
1875 1875 @pulldiscovery(b'b1:bookmarks')
1876 1876 def _pullbookmarkbundle1(pullop):
1877 1877 """fetch bookmark data in bundle1 case
1878 1878
1879 1879 If not using bundle2, we have to fetch bookmarks before changeset
1880 1880 discovery to reduce the chance and impact of race conditions."""
1881 1881 if pullop.remotebookmarks is not None:
1882 1882 return
1883 1883 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1884 1884 # all known bundle2 servers now support listkeys, but lets be nice with
1885 1885 # new implementation.
1886 1886 return
1887 1887 books = listkeys(pullop.remote, b'bookmarks')
1888 1888 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1889 1889
1890 1890
1891 1891 @pulldiscovery(b'changegroup')
1892 1892 def _pulldiscoverychangegroup(pullop):
1893 1893 """discovery phase for the pull
1894 1894
1895 1895 Current handle changeset discovery only, will change handle all discovery
1896 1896 at some point."""
1897 1897 tmp = discovery.findcommonincoming(
1898 1898 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1899 1899 )
1900 1900 common, fetch, rheads = tmp
1901 1901 has_node = pullop.repo.unfiltered().changelog.index.has_node
1902 1902 if fetch and rheads:
1903 1903 # If a remote heads is filtered locally, put in back in common.
1904 1904 #
1905 1905 # This is a hackish solution to catch most of "common but locally
1906 1906 # hidden situation". We do not performs discovery on unfiltered
1907 1907 # repository because it end up doing a pathological amount of round
1908 1908 # trip for w huge amount of changeset we do not care about.
1909 1909 #
1910 1910 # If a set of such "common but filtered" changeset exist on the server
1911 1911 # but are not including a remote heads, we'll not be able to detect it,
1912 1912 scommon = set(common)
1913 1913 for n in rheads:
1914 1914 if has_node(n):
1915 1915 if n not in scommon:
1916 1916 common.append(n)
1917 1917 if set(rheads).issubset(set(common)):
1918 1918 fetch = []
1919 1919 pullop.common = common
1920 1920 pullop.fetch = fetch
1921 1921 pullop.rheads = rheads
1922 1922
1923 1923
1924 1924 def _pullbundle2(pullop):
1925 1925 """pull data using bundle2
1926 1926
1927 1927 For now, the only supported data are changegroup."""
1928 1928 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1929 1929
1930 1930 # make ui easier to access
1931 1931 ui = pullop.repo.ui
1932 1932
1933 1933 # At the moment we don't do stream clones over bundle2. If that is
1934 1934 # implemented then here's where the check for that will go.
1935 1935 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1936 1936
1937 1937 # declare pull perimeters
1938 1938 kwargs[b'common'] = pullop.common
1939 1939 kwargs[b'heads'] = pullop.heads or pullop.rheads
1940 1940
1941 1941 # check server supports narrow and then adding includepats and excludepats
1942 1942 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1943 1943 if servernarrow and pullop.includepats:
1944 1944 kwargs[b'includepats'] = pullop.includepats
1945 1945 if servernarrow and pullop.excludepats:
1946 1946 kwargs[b'excludepats'] = pullop.excludepats
1947 1947
1948 1948 if streaming:
1949 1949 kwargs[b'cg'] = False
1950 1950 kwargs[b'stream'] = True
1951 1951 pullop.stepsdone.add(b'changegroup')
1952 1952 pullop.stepsdone.add(b'phases')
1953 1953
1954 1954 else:
1955 1955 # pulling changegroup
1956 1956 pullop.stepsdone.add(b'changegroup')
1957 1957
1958 1958 kwargs[b'cg'] = pullop.fetch
1959 1959
1960 1960 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1961 1961 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1962 1962 if not legacyphase and hasbinaryphase:
1963 1963 kwargs[b'phases'] = True
1964 1964 pullop.stepsdone.add(b'phases')
1965 1965
1966 1966 if b'listkeys' in pullop.remotebundle2caps:
1967 1967 if b'phases' not in pullop.stepsdone:
1968 1968 kwargs[b'listkeys'] = [b'phases']
1969 1969
1970 1970 bookmarksrequested = False
1971 1971 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1972 1972 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1973 1973
1974 1974 if pullop.remotebookmarks is not None:
1975 1975 pullop.stepsdone.add(b'request-bookmarks')
1976 1976
1977 1977 if (
1978 1978 b'request-bookmarks' not in pullop.stepsdone
1979 1979 and pullop.remotebookmarks is None
1980 1980 and not legacybookmark
1981 1981 and hasbinarybook
1982 1982 ):
1983 1983 kwargs[b'bookmarks'] = True
1984 1984 bookmarksrequested = True
1985 1985
1986 1986 if b'listkeys' in pullop.remotebundle2caps:
1987 1987 if b'request-bookmarks' not in pullop.stepsdone:
1988 1988 # make sure to always includes bookmark data when migrating
1989 1989 # `hg incoming --bundle` to using this function.
1990 1990 pullop.stepsdone.add(b'request-bookmarks')
1991 1991 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1992 1992
1993 1993 # If this is a full pull / clone and the server supports the clone bundles
1994 1994 # feature, tell the server whether we attempted a clone bundle. The
1995 1995 # presence of this flag indicates the client supports clone bundles. This
1996 1996 # will enable the server to treat clients that support clone bundles
1997 1997 # differently from those that don't.
1998 1998 if (
1999 1999 pullop.remote.capable(b'clonebundles')
2000 2000 and pullop.heads is None
2001 2001 and list(pullop.common) == [nullid]
2002 2002 ):
2003 2003 kwargs[b'cbattempted'] = pullop.clonebundleattempted
2004 2004
2005 2005 if streaming:
2006 2006 pullop.repo.ui.status(_(b'streaming all changes\n'))
2007 2007 elif not pullop.fetch:
2008 2008 pullop.repo.ui.status(_(b"no changes found\n"))
2009 2009 pullop.cgresult = 0
2010 2010 else:
2011 2011 if pullop.heads is None and list(pullop.common) == [nullid]:
2012 2012 pullop.repo.ui.status(_(b"requesting all changes\n"))
2013 2013 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2014 2014 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
2015 2015 if obsolete.commonversion(remoteversions) is not None:
2016 2016 kwargs[b'obsmarkers'] = True
2017 2017 pullop.stepsdone.add(b'obsmarkers')
2018 2018 _pullbundle2extraprepare(pullop, kwargs)
2019 2019
2020 2020 with pullop.remote.commandexecutor() as e:
2021 2021 args = dict(kwargs)
2022 2022 args[b'source'] = b'pull'
2023 2023 bundle = e.callcommand(b'getbundle', args).result()
2024 2024
2025 2025 try:
2026 2026 op = bundle2.bundleoperation(
2027 2027 pullop.repo, pullop.gettransaction, source=b'pull'
2028 2028 )
2029 2029 op.modes[b'bookmarks'] = b'records'
2030 2030 bundle2.processbundle(pullop.repo, bundle, op=op)
2031 2031 except bundle2.AbortFromPart as exc:
2032 2032 pullop.repo.ui.status(_(b'remote: abort: %s\n') % exc)
2033 2033 raise error.Abort(_(b'pull failed on remote'), hint=exc.hint)
2034 2034 except error.BundleValueError as exc:
2035 2035 raise error.Abort(_(b'missing support for %s') % exc)
2036 2036
2037 2037 if pullop.fetch:
2038 2038 pullop.cgresult = bundle2.combinechangegroupresults(op)
2039 2039
2040 2040 # processing phases change
2041 2041 for namespace, value in op.records[b'listkeys']:
2042 2042 if namespace == b'phases':
2043 2043 _pullapplyphases(pullop, value)
2044 2044
2045 2045 # processing bookmark update
2046 2046 if bookmarksrequested:
2047 2047 books = {}
2048 2048 for record in op.records[b'bookmarks']:
2049 2049 books[record[b'bookmark']] = record[b"node"]
2050 2050 pullop.remotebookmarks = books
2051 2051 else:
2052 2052 for namespace, value in op.records[b'listkeys']:
2053 2053 if namespace == b'bookmarks':
2054 2054 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
2055 2055
2056 2056 # bookmark data were either already there or pulled in the bundle
2057 2057 if pullop.remotebookmarks is not None:
2058 2058 _pullbookmarks(pullop)
2059 2059
2060 2060
2061 2061 def _pullbundle2extraprepare(pullop, kwargs):
2062 2062 """hook function so that extensions can extend the getbundle call"""
2063 2063
2064 2064
2065 2065 def _pullchangeset(pullop):
2066 2066 """pull changeset from unbundle into the local repo"""
2067 2067 # We delay the open of the transaction as late as possible so we
2068 2068 # don't open transaction for nothing or you break future useful
2069 2069 # rollback call
2070 2070 if b'changegroup' in pullop.stepsdone:
2071 2071 return
2072 2072 pullop.stepsdone.add(b'changegroup')
2073 2073 if not pullop.fetch:
2074 2074 pullop.repo.ui.status(_(b"no changes found\n"))
2075 2075 pullop.cgresult = 0
2076 2076 return
2077 2077 tr = pullop.gettransaction()
2078 2078 if pullop.heads is None and list(pullop.common) == [nullid]:
2079 2079 pullop.repo.ui.status(_(b"requesting all changes\n"))
2080 2080 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
2081 2081 # issue1320, avoid a race if remote changed after discovery
2082 2082 pullop.heads = pullop.rheads
2083 2083
2084 2084 if pullop.remote.capable(b'getbundle'):
2085 2085 # TODO: get bundlecaps from remote
2086 2086 cg = pullop.remote.getbundle(
2087 2087 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
2088 2088 )
2089 2089 elif pullop.heads is None:
2090 2090 with pullop.remote.commandexecutor() as e:
2091 2091 cg = e.callcommand(
2092 2092 b'changegroup', {b'nodes': pullop.fetch, b'source': b'pull',}
2093 2093 ).result()
2094 2094
2095 2095 elif not pullop.remote.capable(b'changegroupsubset'):
2096 2096 raise error.Abort(
2097 2097 _(
2098 2098 b"partial pull cannot be done because "
2099 2099 b"other repository doesn't support "
2100 2100 b"changegroupsubset."
2101 2101 )
2102 2102 )
2103 2103 else:
2104 2104 with pullop.remote.commandexecutor() as e:
2105 2105 cg = e.callcommand(
2106 2106 b'changegroupsubset',
2107 2107 {
2108 2108 b'bases': pullop.fetch,
2109 2109 b'heads': pullop.heads,
2110 2110 b'source': b'pull',
2111 2111 },
2112 2112 ).result()
2113 2113
2114 2114 bundleop = bundle2.applybundle(
2115 2115 pullop.repo, cg, tr, b'pull', pullop.remote.url()
2116 2116 )
2117 2117 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
2118 2118
2119 2119
2120 2120 def _pullphase(pullop):
2121 2121 # Get remote phases data from remote
2122 2122 if b'phases' in pullop.stepsdone:
2123 2123 return
2124 2124 remotephases = listkeys(pullop.remote, b'phases')
2125 2125 _pullapplyphases(pullop, remotephases)
2126 2126
2127 2127
2128 2128 def _pullapplyphases(pullop, remotephases):
2129 2129 """apply phase movement from observed remote state"""
2130 2130 if b'phases' in pullop.stepsdone:
2131 2131 return
2132 2132 pullop.stepsdone.add(b'phases')
2133 2133 publishing = bool(remotephases.get(b'publishing', False))
2134 2134 if remotephases and not publishing:
2135 2135 # remote is new and non-publishing
2136 2136 pheads, _dr = phases.analyzeremotephases(
2137 2137 pullop.repo, pullop.pulledsubset, remotephases
2138 2138 )
2139 2139 dheads = pullop.pulledsubset
2140 2140 else:
2141 2141 # Remote is old or publishing all common changesets
2142 2142 # should be seen as public
2143 2143 pheads = pullop.pulledsubset
2144 2144 dheads = []
2145 2145 unfi = pullop.repo.unfiltered()
2146 2146 phase = unfi._phasecache.phase
2147 2147 rev = unfi.changelog.index.get_rev
2148 2148 public = phases.public
2149 2149 draft = phases.draft
2150 2150
2151 2151 # exclude changesets already public locally and update the others
2152 2152 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2153 2153 if pheads:
2154 2154 tr = pullop.gettransaction()
2155 2155 phases.advanceboundary(pullop.repo, tr, public, pheads)
2156 2156
2157 2157 # exclude changesets already draft locally and update the others
2158 2158 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2159 2159 if dheads:
2160 2160 tr = pullop.gettransaction()
2161 2161 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2162 2162
2163 2163
2164 2164 def _pullbookmarks(pullop):
2165 2165 """process the remote bookmark information to update the local one"""
2166 2166 if b'bookmarks' in pullop.stepsdone:
2167 2167 return
2168 2168 pullop.stepsdone.add(b'bookmarks')
2169 2169 repo = pullop.repo
2170 2170 remotebookmarks = pullop.remotebookmarks
2171 2171 bookmod.updatefromremote(
2172 2172 repo.ui,
2173 2173 repo,
2174 2174 remotebookmarks,
2175 2175 pullop.remote.url(),
2176 2176 pullop.gettransaction,
2177 2177 explicit=pullop.explicitbookmarks,
2178 2178 )
2179 2179
2180 2180
2181 2181 def _pullobsolete(pullop):
2182 2182 """utility function to pull obsolete markers from a remote
2183 2183
2184 2184 The `gettransaction` is function that return the pull transaction, creating
2185 2185 one if necessary. We return the transaction to inform the calling code that
2186 2186 a new transaction have been created (when applicable).
2187 2187
2188 2188 Exists mostly to allow overriding for experimentation purpose"""
2189 2189 if b'obsmarkers' in pullop.stepsdone:
2190 2190 return
2191 2191 pullop.stepsdone.add(b'obsmarkers')
2192 2192 tr = None
2193 2193 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2194 2194 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2195 2195 remoteobs = listkeys(pullop.remote, b'obsolete')
2196 2196 if b'dump0' in remoteobs:
2197 2197 tr = pullop.gettransaction()
2198 2198 markers = []
2199 2199 for key in sorted(remoteobs, reverse=True):
2200 2200 if key.startswith(b'dump'):
2201 2201 data = util.b85decode(remoteobs[key])
2202 2202 version, newmarks = obsolete._readmarkers(data)
2203 2203 markers += newmarks
2204 2204 if markers:
2205 2205 pullop.repo.obsstore.add(tr, markers)
2206 2206 pullop.repo.invalidatevolatilesets()
2207 2207 return tr
2208 2208
2209 2209
2210 2210 def applynarrowacl(repo, kwargs):
2211 2211 """Apply narrow fetch access control.
2212 2212
2213 2213 This massages the named arguments for getbundle wire protocol commands
2214 2214 so requested data is filtered through access control rules.
2215 2215 """
2216 2216 ui = repo.ui
2217 2217 # TODO this assumes existence of HTTP and is a layering violation.
2218 2218 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2219 2219 user_includes = ui.configlist(
2220 2220 _NARROWACL_SECTION,
2221 2221 username + b'.includes',
2222 2222 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2223 2223 )
2224 2224 user_excludes = ui.configlist(
2225 2225 _NARROWACL_SECTION,
2226 2226 username + b'.excludes',
2227 2227 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2228 2228 )
2229 2229 if not user_includes:
2230 2230 raise error.Abort(
2231 2231 _(b"%s configuration for user %s is empty")
2232 2232 % (_NARROWACL_SECTION, username)
2233 2233 )
2234 2234
2235 2235 user_includes = [
2236 2236 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2237 2237 ]
2238 2238 user_excludes = [
2239 2239 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2240 2240 ]
2241 2241
2242 2242 req_includes = set(kwargs.get('includepats', []))
2243 2243 req_excludes = set(kwargs.get('excludepats', []))
2244 2244
2245 2245 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2246 2246 req_includes, req_excludes, user_includes, user_excludes
2247 2247 )
2248 2248
2249 2249 if invalid_includes:
2250 2250 raise error.Abort(
2251 2251 _(b"The following includes are not accessible for %s: %s")
2252 2252 % (username, stringutil.pprint(invalid_includes))
2253 2253 )
2254 2254
2255 2255 new_args = {}
2256 2256 new_args.update(kwargs)
2257 2257 new_args['narrow'] = True
2258 2258 new_args['narrow_acl'] = True
2259 2259 new_args['includepats'] = req_includes
2260 2260 if req_excludes:
2261 2261 new_args['excludepats'] = req_excludes
2262 2262
2263 2263 return new_args
2264 2264
2265 2265
2266 2266 def _computeellipsis(repo, common, heads, known, match, depth=None):
2267 2267 """Compute the shape of a narrowed DAG.
2268 2268
2269 2269 Args:
2270 2270 repo: The repository we're transferring.
2271 2271 common: The roots of the DAG range we're transferring.
2272 2272 May be just [nullid], which means all ancestors of heads.
2273 2273 heads: The heads of the DAG range we're transferring.
2274 2274 match: The narrowmatcher that allows us to identify relevant changes.
2275 2275 depth: If not None, only consider nodes to be full nodes if they are at
2276 2276 most depth changesets away from one of heads.
2277 2277
2278 2278 Returns:
2279 2279 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2280 2280
2281 2281 visitnodes: The list of nodes (either full or ellipsis) which
2282 2282 need to be sent to the client.
2283 2283 relevant_nodes: The set of changelog nodes which change a file inside
2284 2284 the narrowspec. The client needs these as non-ellipsis nodes.
2285 2285 ellipsisroots: A dict of {rev: parents} that is used in
2286 2286 narrowchangegroup to produce ellipsis nodes with the
2287 2287 correct parents.
2288 2288 """
2289 2289 cl = repo.changelog
2290 2290 mfl = repo.manifestlog
2291 2291
2292 2292 clrev = cl.rev
2293 2293
2294 2294 commonrevs = {clrev(n) for n in common} | {nullrev}
2295 2295 headsrevs = {clrev(n) for n in heads}
2296 2296
2297 2297 if depth:
2298 2298 revdepth = {h: 0 for h in headsrevs}
2299 2299
2300 2300 ellipsisheads = collections.defaultdict(set)
2301 2301 ellipsisroots = collections.defaultdict(set)
2302 2302
2303 2303 def addroot(head, curchange):
2304 2304 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2305 2305 ellipsisroots[head].add(curchange)
2306 2306 # Recursively split ellipsis heads with 3 roots by finding the
2307 2307 # roots' youngest common descendant which is an elided merge commit.
2308 2308 # That descendant takes 2 of the 3 roots as its own, and becomes a
2309 2309 # root of the head.
2310 2310 while len(ellipsisroots[head]) > 2:
2311 2311 child, roots = splithead(head)
2312 2312 splitroots(head, child, roots)
2313 2313 head = child # Recurse in case we just added a 3rd root
2314 2314
2315 2315 def splitroots(head, child, roots):
2316 2316 ellipsisroots[head].difference_update(roots)
2317 2317 ellipsisroots[head].add(child)
2318 2318 ellipsisroots[child].update(roots)
2319 2319 ellipsisroots[child].discard(child)
2320 2320
2321 2321 def splithead(head):
2322 2322 r1, r2, r3 = sorted(ellipsisroots[head])
2323 2323 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2324 2324 mid = repo.revs(
2325 2325 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2326 2326 )
2327 2327 for j in mid:
2328 2328 if j == nr2:
2329 2329 return nr2, (nr1, nr2)
2330 2330 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2331 2331 return j, (nr1, nr2)
2332 2332 raise error.Abort(
2333 2333 _(
2334 2334 b'Failed to split up ellipsis node! head: %d, '
2335 2335 b'roots: %d %d %d'
2336 2336 )
2337 2337 % (head, r1, r2, r3)
2338 2338 )
2339 2339
2340 2340 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2341 2341 visit = reversed(missing)
2342 2342 relevant_nodes = set()
2343 2343 visitnodes = [cl.node(m) for m in missing]
2344 2344 required = set(headsrevs) | known
2345 2345 for rev in visit:
2346 2346 clrev = cl.changelogrevision(rev)
2347 2347 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2348 2348 if depth is not None:
2349 2349 curdepth = revdepth[rev]
2350 2350 for p in ps:
2351 2351 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2352 2352 needed = False
2353 2353 shallow_enough = depth is None or revdepth[rev] <= depth
2354 2354 if shallow_enough:
2355 2355 curmf = mfl[clrev.manifest].read()
2356 2356 if ps:
2357 2357 # We choose to not trust the changed files list in
2358 2358 # changesets because it's not always correct. TODO: could
2359 2359 # we trust it for the non-merge case?
2360 2360 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2361 2361 needed = bool(curmf.diff(p1mf, match))
2362 2362 if not needed and len(ps) > 1:
2363 2363 # For merge changes, the list of changed files is not
2364 2364 # helpful, since we need to emit the merge if a file
2365 2365 # in the narrow spec has changed on either side of the
2366 2366 # merge. As a result, we do a manifest diff to check.
2367 2367 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2368 2368 needed = bool(curmf.diff(p2mf, match))
2369 2369 else:
2370 2370 # For a root node, we need to include the node if any
2371 2371 # files in the node match the narrowspec.
2372 2372 needed = any(curmf.walk(match))
2373 2373
2374 2374 if needed:
2375 2375 for head in ellipsisheads[rev]:
2376 2376 addroot(head, rev)
2377 2377 for p in ps:
2378 2378 required.add(p)
2379 2379 relevant_nodes.add(cl.node(rev))
2380 2380 else:
2381 2381 if not ps:
2382 2382 ps = [nullrev]
2383 2383 if rev in required:
2384 2384 for head in ellipsisheads[rev]:
2385 2385 addroot(head, rev)
2386 2386 for p in ps:
2387 2387 ellipsisheads[p].add(rev)
2388 2388 else:
2389 2389 for p in ps:
2390 2390 ellipsisheads[p] |= ellipsisheads[rev]
2391 2391
2392 2392 # add common changesets as roots of their reachable ellipsis heads
2393 2393 for c in commonrevs:
2394 2394 for head in ellipsisheads[c]:
2395 2395 addroot(head, c)
2396 2396 return visitnodes, relevant_nodes, ellipsisroots
2397 2397
2398 2398
2399 2399 def caps20to10(repo, role):
2400 2400 """return a set with appropriate options to use bundle20 during getbundle"""
2401 2401 caps = {b'HG20'}
2402 2402 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2403 2403 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2404 2404 return caps
2405 2405
2406 2406
2407 2407 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2408 2408 getbundle2partsorder = []
2409 2409
2410 2410 # Mapping between step name and function
2411 2411 #
2412 2412 # This exists to help extensions wrap steps if necessary
2413 2413 getbundle2partsmapping = {}
2414 2414
2415 2415
2416 2416 def getbundle2partsgenerator(stepname, idx=None):
2417 2417 """decorator for function generating bundle2 part for getbundle
2418 2418
2419 2419 The function is added to the step -> function mapping and appended to the
2420 2420 list of steps. Beware that decorated functions will be added in order
2421 2421 (this may matter).
2422 2422
2423 2423 You can only use this decorator for new steps, if you want to wrap a step
2424 2424 from an extension, attack the getbundle2partsmapping dictionary directly."""
2425 2425
2426 2426 def dec(func):
2427 2427 assert stepname not in getbundle2partsmapping
2428 2428 getbundle2partsmapping[stepname] = func
2429 2429 if idx is None:
2430 2430 getbundle2partsorder.append(stepname)
2431 2431 else:
2432 2432 getbundle2partsorder.insert(idx, stepname)
2433 2433 return func
2434 2434
2435 2435 return dec
2436 2436
2437 2437
2438 2438 def bundle2requested(bundlecaps):
2439 2439 if bundlecaps is not None:
2440 2440 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2441 2441 return False
2442 2442
2443 2443
2444 2444 def getbundlechunks(
2445 2445 repo, source, heads=None, common=None, bundlecaps=None, **kwargs
2446 2446 ):
2447 2447 """Return chunks constituting a bundle's raw data.
2448 2448
2449 2449 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2450 2450 passed.
2451 2451
2452 2452 Returns a 2-tuple of a dict with metadata about the generated bundle
2453 2453 and an iterator over raw chunks (of varying sizes).
2454 2454 """
2455 2455 kwargs = pycompat.byteskwargs(kwargs)
2456 2456 info = {}
2457 2457 usebundle2 = bundle2requested(bundlecaps)
2458 2458 # bundle10 case
2459 2459 if not usebundle2:
2460 2460 if bundlecaps and not kwargs.get(b'cg', True):
2461 2461 raise ValueError(
2462 2462 _(b'request for bundle10 must include changegroup')
2463 2463 )
2464 2464
2465 2465 if kwargs:
2466 2466 raise ValueError(
2467 2467 _(b'unsupported getbundle arguments: %s')
2468 2468 % b', '.join(sorted(kwargs.keys()))
2469 2469 )
2470 2470 outgoing = _computeoutgoing(repo, heads, common)
2471 2471 info[b'bundleversion'] = 1
2472 2472 return (
2473 2473 info,
2474 2474 changegroup.makestream(
2475 2475 repo, outgoing, b'01', source, bundlecaps=bundlecaps
2476 2476 ),
2477 2477 )
2478 2478
2479 2479 # bundle20 case
2480 2480 info[b'bundleversion'] = 2
2481 2481 b2caps = {}
2482 2482 for bcaps in bundlecaps:
2483 2483 if bcaps.startswith(b'bundle2='):
2484 2484 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2485 2485 b2caps.update(bundle2.decodecaps(blob))
2486 2486 bundler = bundle2.bundle20(repo.ui, b2caps)
2487 2487
2488 2488 kwargs[b'heads'] = heads
2489 2489 kwargs[b'common'] = common
2490 2490
2491 2491 for name in getbundle2partsorder:
2492 2492 func = getbundle2partsmapping[name]
2493 2493 func(
2494 2494 bundler,
2495 2495 repo,
2496 2496 source,
2497 2497 bundlecaps=bundlecaps,
2498 2498 b2caps=b2caps,
2499 2499 **pycompat.strkwargs(kwargs)
2500 2500 )
2501 2501
2502 2502 info[b'prefercompressed'] = bundler.prefercompressed
2503 2503
2504 2504 return info, bundler.getchunks()
2505 2505
2506 2506
2507 2507 @getbundle2partsgenerator(b'stream2')
2508 2508 def _getbundlestream2(bundler, repo, *args, **kwargs):
2509 2509 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2510 2510
2511 2511
2512 2512 @getbundle2partsgenerator(b'changegroup')
2513 2513 def _getbundlechangegrouppart(
2514 2514 bundler,
2515 2515 repo,
2516 2516 source,
2517 2517 bundlecaps=None,
2518 2518 b2caps=None,
2519 2519 heads=None,
2520 2520 common=None,
2521 2521 **kwargs
2522 2522 ):
2523 2523 """add a changegroup part to the requested bundle"""
2524 2524 if not kwargs.get('cg', True) or not b2caps:
2525 2525 return
2526 2526
2527 2527 version = b'01'
2528 2528 cgversions = b2caps.get(b'changegroup')
2529 2529 if cgversions: # 3.1 and 3.2 ship with an empty value
2530 2530 cgversions = [
2531 2531 v
2532 2532 for v in cgversions
2533 2533 if v in changegroup.supportedoutgoingversions(repo)
2534 2534 ]
2535 2535 if not cgversions:
2536 2536 raise error.Abort(_(b'no common changegroup version'))
2537 2537 version = max(cgversions)
2538 2538
2539 2539 outgoing = _computeoutgoing(repo, heads, common)
2540 2540 if not outgoing.missing:
2541 2541 return
2542 2542
2543 2543 if kwargs.get('narrow', False):
2544 2544 include = sorted(filter(bool, kwargs.get('includepats', [])))
2545 2545 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2546 2546 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2547 2547 else:
2548 2548 matcher = None
2549 2549
2550 2550 cgstream = changegroup.makestream(
2551 2551 repo, outgoing, version, source, bundlecaps=bundlecaps, matcher=matcher
2552 2552 )
2553 2553
2554 2554 part = bundler.newpart(b'changegroup', data=cgstream)
2555 2555 if cgversions:
2556 2556 part.addparam(b'version', version)
2557 2557
2558 2558 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2559 2559
2560 2560 if b'treemanifest' in repo.requirements:
2561 2561 part.addparam(b'treemanifest', b'1')
2562 2562
2563 2563 if b'exp-sidedata-flag' in repo.requirements:
2564 2564 part.addparam(b'exp-sidedata', b'1')
2565 2565
2566 2566 if (
2567 2567 kwargs.get('narrow', False)
2568 2568 and kwargs.get('narrow_acl', False)
2569 2569 and (include or exclude)
2570 2570 ):
2571 2571 # this is mandatory because otherwise ACL clients won't work
2572 2572 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2573 2573 narrowspecpart.data = b'%s\0%s' % (
2574 2574 b'\n'.join(include),
2575 2575 b'\n'.join(exclude),
2576 2576 )
2577 2577
2578 2578
2579 2579 @getbundle2partsgenerator(b'bookmarks')
2580 2580 def _getbundlebookmarkpart(
2581 2581 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2582 2582 ):
2583 2583 """add a bookmark part to the requested bundle"""
2584 2584 if not kwargs.get('bookmarks', False):
2585 2585 return
2586 2586 if not b2caps or b'bookmarks' not in b2caps:
2587 2587 raise error.Abort(_(b'no common bookmarks exchange method'))
2588 2588 books = bookmod.listbinbookmarks(repo)
2589 2589 data = bookmod.binaryencode(books)
2590 2590 if data:
2591 2591 bundler.newpart(b'bookmarks', data=data)
2592 2592
2593 2593
2594 2594 @getbundle2partsgenerator(b'listkeys')
2595 2595 def _getbundlelistkeysparts(
2596 2596 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2597 2597 ):
2598 2598 """add parts containing listkeys namespaces to the requested bundle"""
2599 2599 listkeys = kwargs.get('listkeys', ())
2600 2600 for namespace in listkeys:
2601 2601 part = bundler.newpart(b'listkeys')
2602 2602 part.addparam(b'namespace', namespace)
2603 2603 keys = repo.listkeys(namespace).items()
2604 2604 part.data = pushkey.encodekeys(keys)
2605 2605
2606 2606
2607 2607 @getbundle2partsgenerator(b'obsmarkers')
2608 2608 def _getbundleobsmarkerpart(
2609 2609 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2610 2610 ):
2611 2611 """add an obsolescence markers part to the requested bundle"""
2612 2612 if kwargs.get('obsmarkers', False):
2613 2613 if heads is None:
2614 2614 heads = repo.heads()
2615 2615 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2616 2616 markers = repo.obsstore.relevantmarkers(subset)
2617 2617 markers = obsutil.sortedmarkers(markers)
2618 2618 bundle2.buildobsmarkerspart(bundler, markers)
2619 2619
2620 2620
2621 2621 @getbundle2partsgenerator(b'phases')
2622 2622 def _getbundlephasespart(
2623 2623 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2624 2624 ):
2625 2625 """add phase heads part to the requested bundle"""
2626 2626 if kwargs.get('phases', False):
2627 2627 if not b2caps or b'heads' not in b2caps.get(b'phases'):
2628 2628 raise error.Abort(_(b'no common phases exchange method'))
2629 2629 if heads is None:
2630 2630 heads = repo.heads()
2631 2631
2632 2632 headsbyphase = collections.defaultdict(set)
2633 2633 if repo.publishing():
2634 2634 headsbyphase[phases.public] = heads
2635 2635 else:
2636 2636 # find the appropriate heads to move
2637 2637
2638 2638 phase = repo._phasecache.phase
2639 2639 node = repo.changelog.node
2640 2640 rev = repo.changelog.rev
2641 2641 for h in heads:
2642 2642 headsbyphase[phase(repo, rev(h))].add(h)
2643 2643 seenphases = list(headsbyphase.keys())
2644 2644
2645 2645 # We do not handle anything but public and draft phase for now)
2646 2646 if seenphases:
2647 2647 assert max(seenphases) <= phases.draft
2648 2648
2649 2649 # if client is pulling non-public changesets, we need to find
2650 2650 # intermediate public heads.
2651 2651 draftheads = headsbyphase.get(phases.draft, set())
2652 2652 if draftheads:
2653 2653 publicheads = headsbyphase.get(phases.public, set())
2654 2654
2655 2655 revset = b'heads(only(%ln, %ln) and public())'
2656 2656 extraheads = repo.revs(revset, draftheads, publicheads)
2657 2657 for r in extraheads:
2658 2658 headsbyphase[phases.public].add(node(r))
2659 2659
2660 2660 # transform data in a format used by the encoding function
2661 2661 phasemapping = []
2662 2662 for phase in phases.allphases:
2663 2663 phasemapping.append(sorted(headsbyphase[phase]))
2664 2664
2665 2665 # generate the actual part
2666 2666 phasedata = phases.binaryencode(phasemapping)
2667 2667 bundler.newpart(b'phase-heads', data=phasedata)
2668 2668
2669 2669
2670 2670 @getbundle2partsgenerator(b'hgtagsfnodes')
2671 2671 def _getbundletagsfnodes(
2672 2672 bundler,
2673 2673 repo,
2674 2674 source,
2675 2675 bundlecaps=None,
2676 2676 b2caps=None,
2677 2677 heads=None,
2678 2678 common=None,
2679 2679 **kwargs
2680 2680 ):
2681 2681 """Transfer the .hgtags filenodes mapping.
2682 2682
2683 2683 Only values for heads in this bundle will be transferred.
2684 2684
2685 2685 The part data consists of pairs of 20 byte changeset node and .hgtags
2686 2686 filenodes raw values.
2687 2687 """
2688 2688 # Don't send unless:
2689 2689 # - changeset are being exchanged,
2690 2690 # - the client supports it.
2691 2691 if not b2caps or not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2692 2692 return
2693 2693
2694 2694 outgoing = _computeoutgoing(repo, heads, common)
2695 2695 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2696 2696
2697 2697
2698 2698 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2699 2699 def _getbundlerevbranchcache(
2700 2700 bundler,
2701 2701 repo,
2702 2702 source,
2703 2703 bundlecaps=None,
2704 2704 b2caps=None,
2705 2705 heads=None,
2706 2706 common=None,
2707 2707 **kwargs
2708 2708 ):
2709 2709 """Transfer the rev-branch-cache mapping
2710 2710
2711 2711 The payload is a series of data related to each branch
2712 2712
2713 2713 1) branch name length
2714 2714 2) number of open heads
2715 2715 3) number of closed heads
2716 2716 4) open heads nodes
2717 2717 5) closed heads nodes
2718 2718 """
2719 2719 # Don't send unless:
2720 2720 # - changeset are being exchanged,
2721 2721 # - the client supports it.
2722 2722 # - narrow bundle isn't in play (not currently compatible).
2723 2723 if (
2724 2724 not kwargs.get('cg', True)
2725 2725 or not b2caps
2726 2726 or b'rev-branch-cache' not in b2caps
2727 2727 or kwargs.get('narrow', False)
2728 2728 or repo.ui.has_section(_NARROWACL_SECTION)
2729 2729 ):
2730 2730 return
2731 2731
2732 2732 outgoing = _computeoutgoing(repo, heads, common)
2733 2733 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2734 2734
2735 2735
2736 2736 def check_heads(repo, their_heads, context):
2737 2737 """check if the heads of a repo have been modified
2738 2738
2739 2739 Used by peer for unbundling.
2740 2740 """
2741 2741 heads = repo.heads()
2742 2742 heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest()
2743 2743 if not (
2744 2744 their_heads == [b'force']
2745 2745 or their_heads == heads
2746 2746 or their_heads == [b'hashed', heads_hash]
2747 2747 ):
2748 2748 # someone else committed/pushed/unbundled while we
2749 2749 # were transferring data
2750 2750 raise error.PushRaced(
2751 2751 b'repository changed while %s - please try again' % context
2752 2752 )
2753 2753
2754 2754
2755 2755 def unbundle(repo, cg, heads, source, url):
2756 2756 """Apply a bundle to a repo.
2757 2757
2758 2758 this function makes sure the repo is locked during the application and have
2759 2759 mechanism to check that no push race occurred between the creation of the
2760 2760 bundle and its application.
2761 2761
2762 2762 If the push was raced as PushRaced exception is raised."""
2763 2763 r = 0
2764 2764 # need a transaction when processing a bundle2 stream
2765 2765 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2766 2766 lockandtr = [None, None, None]
2767 2767 recordout = None
2768 2768 # quick fix for output mismatch with bundle2 in 3.4
2769 2769 captureoutput = repo.ui.configbool(
2770 2770 b'experimental', b'bundle2-output-capture'
2771 2771 )
2772 2772 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2773 2773 captureoutput = True
2774 2774 try:
2775 2775 # note: outside bundle1, 'heads' is expected to be empty and this
2776 2776 # 'check_heads' call wil be a no-op
2777 2777 check_heads(repo, heads, b'uploading changes')
2778 2778 # push can proceed
2779 2779 if not isinstance(cg, bundle2.unbundle20):
2780 2780 # legacy case: bundle1 (changegroup 01)
2781 2781 txnname = b"\n".join([source, util.hidepassword(url)])
2782 2782 with repo.lock(), repo.transaction(txnname) as tr:
2783 2783 op = bundle2.applybundle(repo, cg, tr, source, url)
2784 2784 r = bundle2.combinechangegroupresults(op)
2785 2785 else:
2786 2786 r = None
2787 2787 try:
2788 2788
2789 2789 def gettransaction():
2790 2790 if not lockandtr[2]:
2791 2791 if not bookmod.bookmarksinstore(repo):
2792 2792 lockandtr[0] = repo.wlock()
2793 2793 lockandtr[1] = repo.lock()
2794 2794 lockandtr[2] = repo.transaction(source)
2795 2795 lockandtr[2].hookargs[b'source'] = source
2796 2796 lockandtr[2].hookargs[b'url'] = url
2797 2797 lockandtr[2].hookargs[b'bundle2'] = b'1'
2798 2798 return lockandtr[2]
2799 2799
2800 2800 # Do greedy locking by default until we're satisfied with lazy
2801 2801 # locking.
2802 2802 if not repo.ui.configbool(
2803 2803 b'experimental', b'bundle2lazylocking'
2804 2804 ):
2805 2805 gettransaction()
2806 2806
2807 2807 op = bundle2.bundleoperation(
2808 2808 repo,
2809 2809 gettransaction,
2810 2810 captureoutput=captureoutput,
2811 2811 source=b'push',
2812 2812 )
2813 2813 try:
2814 2814 op = bundle2.processbundle(repo, cg, op=op)
2815 2815 finally:
2816 2816 r = op.reply
2817 2817 if captureoutput and r is not None:
2818 2818 repo.ui.pushbuffer(error=True, subproc=True)
2819 2819
2820 2820 def recordout(output):
2821 2821 r.newpart(b'output', data=output, mandatory=False)
2822 2822
2823 2823 if lockandtr[2] is not None:
2824 2824 lockandtr[2].close()
2825 2825 except BaseException as exc:
2826 2826 exc.duringunbundle2 = True
2827 2827 if captureoutput and r is not None:
2828 2828 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2829 2829
2830 2830 def recordout(output):
2831 2831 part = bundle2.bundlepart(
2832 2832 b'output', data=output, mandatory=False
2833 2833 )
2834 2834 parts.append(part)
2835 2835
2836 2836 raise
2837 2837 finally:
2838 2838 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2839 2839 if recordout is not None:
2840 2840 recordout(repo.ui.popbuffer())
2841 2841 return r
2842 2842
2843 2843
2844 2844 def _maybeapplyclonebundle(pullop):
2845 2845 """Apply a clone bundle from a remote, if possible."""
2846 2846
2847 2847 repo = pullop.repo
2848 2848 remote = pullop.remote
2849 2849
2850 2850 if not repo.ui.configbool(b'ui', b'clonebundles'):
2851 2851 return
2852 2852
2853 2853 # Only run if local repo is empty.
2854 2854 if len(repo):
2855 2855 return
2856 2856
2857 2857 if pullop.heads:
2858 2858 return
2859 2859
2860 2860 if not remote.capable(b'clonebundles'):
2861 2861 return
2862 2862
2863 2863 with remote.commandexecutor() as e:
2864 2864 res = e.callcommand(b'clonebundles', {}).result()
2865 2865
2866 2866 # If we call the wire protocol command, that's good enough to record the
2867 2867 # attempt.
2868 2868 pullop.clonebundleattempted = True
2869 2869
2870 2870 entries = parseclonebundlesmanifest(repo, res)
2871 2871 if not entries:
2872 2872 repo.ui.note(
2873 2873 _(
2874 2874 b'no clone bundles available on remote; '
2875 2875 b'falling back to regular clone\n'
2876 2876 )
2877 2877 )
2878 2878 return
2879 2879
2880 2880 entries = filterclonebundleentries(
2881 2881 repo, entries, streamclonerequested=pullop.streamclonerequested
2882 2882 )
2883 2883
2884 2884 if not entries:
2885 2885 # There is a thundering herd concern here. However, if a server
2886 2886 # operator doesn't advertise bundles appropriate for its clients,
2887 2887 # they deserve what's coming. Furthermore, from a client's
2888 2888 # perspective, no automatic fallback would mean not being able to
2889 2889 # clone!
2890 2890 repo.ui.warn(
2891 2891 _(
2892 2892 b'no compatible clone bundles available on server; '
2893 2893 b'falling back to regular clone\n'
2894 2894 )
2895 2895 )
2896 2896 repo.ui.warn(
2897 2897 _(b'(you may want to report this to the server operator)\n')
2898 2898 )
2899 2899 return
2900 2900
2901 2901 entries = sortclonebundleentries(repo.ui, entries)
2902 2902
2903 2903 url = entries[0][b'URL']
2904 2904 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2905 2905 if trypullbundlefromurl(repo.ui, repo, url):
2906 2906 repo.ui.status(_(b'finished applying clone bundle\n'))
2907 2907 # Bundle failed.
2908 2908 #
2909 2909 # We abort by default to avoid the thundering herd of
2910 2910 # clients flooding a server that was expecting expensive
2911 2911 # clone load to be offloaded.
2912 2912 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2913 2913 repo.ui.warn(_(b'falling back to normal clone\n'))
2914 2914 else:
2915 2915 raise error.Abort(
2916 2916 _(b'error applying bundle'),
2917 2917 hint=_(
2918 2918 b'if this error persists, consider contacting '
2919 2919 b'the server operator or disable clone '
2920 2920 b'bundles via '
2921 2921 b'"--config ui.clonebundles=false"'
2922 2922 ),
2923 2923 )
2924 2924
2925 2925
2926 2926 def parseclonebundlesmanifest(repo, s):
2927 2927 """Parses the raw text of a clone bundles manifest.
2928 2928
2929 2929 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2930 2930 to the URL and other keys are the attributes for the entry.
2931 2931 """
2932 2932 m = []
2933 2933 for line in s.splitlines():
2934 2934 fields = line.split()
2935 2935 if not fields:
2936 2936 continue
2937 2937 attrs = {b'URL': fields[0]}
2938 2938 for rawattr in fields[1:]:
2939 2939 key, value = rawattr.split(b'=', 1)
2940 2940 key = urlreq.unquote(key)
2941 2941 value = urlreq.unquote(value)
2942 2942 attrs[key] = value
2943 2943
2944 2944 # Parse BUNDLESPEC into components. This makes client-side
2945 2945 # preferences easier to specify since you can prefer a single
2946 2946 # component of the BUNDLESPEC.
2947 2947 if key == b'BUNDLESPEC':
2948 2948 try:
2949 2949 bundlespec = parsebundlespec(repo, value)
2950 2950 attrs[b'COMPRESSION'] = bundlespec.compression
2951 2951 attrs[b'VERSION'] = bundlespec.version
2952 2952 except error.InvalidBundleSpecification:
2953 2953 pass
2954 2954 except error.UnsupportedBundleSpecification:
2955 2955 pass
2956 2956
2957 2957 m.append(attrs)
2958 2958
2959 2959 return m
2960 2960
2961 2961
2962 2962 def isstreamclonespec(bundlespec):
2963 2963 # Stream clone v1
2964 2964 if bundlespec.wirecompression == b'UN' and bundlespec.wireversion == b's1':
2965 2965 return True
2966 2966
2967 2967 # Stream clone v2
2968 2968 if (
2969 2969 bundlespec.wirecompression == b'UN'
2970 2970 and bundlespec.wireversion == b'02'
2971 2971 and bundlespec.contentopts.get(b'streamv2')
2972 2972 ):
2973 2973 return True
2974 2974
2975 2975 return False
2976 2976
2977 2977
2978 2978 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2979 2979 """Remove incompatible clone bundle manifest entries.
2980 2980
2981 2981 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2982 2982 and returns a new list consisting of only the entries that this client
2983 2983 should be able to apply.
2984 2984
2985 2985 There is no guarantee we'll be able to apply all returned entries because
2986 2986 the metadata we use to filter on may be missing or wrong.
2987 2987 """
2988 2988 newentries = []
2989 2989 for entry in entries:
2990 2990 spec = entry.get(b'BUNDLESPEC')
2991 2991 if spec:
2992 2992 try:
2993 2993 bundlespec = parsebundlespec(repo, spec, strict=True)
2994 2994
2995 2995 # If a stream clone was requested, filter out non-streamclone
2996 2996 # entries.
2997 2997 if streamclonerequested and not isstreamclonespec(bundlespec):
2998 2998 repo.ui.debug(
2999 2999 b'filtering %s because not a stream clone\n'
3000 3000 % entry[b'URL']
3001 3001 )
3002 3002 continue
3003 3003
3004 3004 except error.InvalidBundleSpecification as e:
3005 3005 repo.ui.debug(stringutil.forcebytestr(e) + b'\n')
3006 3006 continue
3007 3007 except error.UnsupportedBundleSpecification as e:
3008 3008 repo.ui.debug(
3009 3009 b'filtering %s because unsupported bundle '
3010 3010 b'spec: %s\n' % (entry[b'URL'], stringutil.forcebytestr(e))
3011 3011 )
3012 3012 continue
3013 3013 # If we don't have a spec and requested a stream clone, we don't know
3014 3014 # what the entry is so don't attempt to apply it.
3015 3015 elif streamclonerequested:
3016 3016 repo.ui.debug(
3017 3017 b'filtering %s because cannot determine if a stream '
3018 3018 b'clone bundle\n' % entry[b'URL']
3019 3019 )
3020 3020 continue
3021 3021
3022 3022 if b'REQUIRESNI' in entry and not sslutil.hassni:
3023 3023 repo.ui.debug(
3024 3024 b'filtering %s because SNI not supported\n' % entry[b'URL']
3025 3025 )
3026 3026 continue
3027 3027
3028 if b'REQUIREDRAM' in entry:
3029 try:
3030 requiredram = util.sizetoint(entry[b'REQUIREDRAM'])
3031 except error.ParseError:
3032 repo.ui.debug(
3033 b'filtering %s due to a bad REQUIREDRAM attribute\n'
3034 % entry[b'URL']
3035 )
3036 continue
3037 actualram = repo.ui.estimatememory()
3038 if actualram is not None and actualram * 0.66 < requiredram:
3039 repo.ui.debug(
3040 b'filtering %s as it needs more than 2/3 of system memory\n'
3041 % entry[b'URL']
3042 )
3043 continue
3044
3028 3045 newentries.append(entry)
3029 3046
3030 3047 return newentries
3031 3048
3032 3049
3033 3050 class clonebundleentry(object):
3034 3051 """Represents an item in a clone bundles manifest.
3035 3052
3036 3053 This rich class is needed to support sorting since sorted() in Python 3
3037 3054 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
3038 3055 won't work.
3039 3056 """
3040 3057
3041 3058 def __init__(self, value, prefers):
3042 3059 self.value = value
3043 3060 self.prefers = prefers
3044 3061
3045 3062 def _cmp(self, other):
3046 3063 for prefkey, prefvalue in self.prefers:
3047 3064 avalue = self.value.get(prefkey)
3048 3065 bvalue = other.value.get(prefkey)
3049 3066
3050 3067 # Special case for b missing attribute and a matches exactly.
3051 3068 if avalue is not None and bvalue is None and avalue == prefvalue:
3052 3069 return -1
3053 3070
3054 3071 # Special case for a missing attribute and b matches exactly.
3055 3072 if bvalue is not None and avalue is None and bvalue == prefvalue:
3056 3073 return 1
3057 3074
3058 3075 # We can't compare unless attribute present on both.
3059 3076 if avalue is None or bvalue is None:
3060 3077 continue
3061 3078
3062 3079 # Same values should fall back to next attribute.
3063 3080 if avalue == bvalue:
3064 3081 continue
3065 3082
3066 3083 # Exact matches come first.
3067 3084 if avalue == prefvalue:
3068 3085 return -1
3069 3086 if bvalue == prefvalue:
3070 3087 return 1
3071 3088
3072 3089 # Fall back to next attribute.
3073 3090 continue
3074 3091
3075 3092 # If we got here we couldn't sort by attributes and prefers. Fall
3076 3093 # back to index order.
3077 3094 return 0
3078 3095
3079 3096 def __lt__(self, other):
3080 3097 return self._cmp(other) < 0
3081 3098
3082 3099 def __gt__(self, other):
3083 3100 return self._cmp(other) > 0
3084 3101
3085 3102 def __eq__(self, other):
3086 3103 return self._cmp(other) == 0
3087 3104
3088 3105 def __le__(self, other):
3089 3106 return self._cmp(other) <= 0
3090 3107
3091 3108 def __ge__(self, other):
3092 3109 return self._cmp(other) >= 0
3093 3110
3094 3111 def __ne__(self, other):
3095 3112 return self._cmp(other) != 0
3096 3113
3097 3114
3098 3115 def sortclonebundleentries(ui, entries):
3099 3116 prefers = ui.configlist(b'ui', b'clonebundleprefers')
3100 3117 if not prefers:
3101 3118 return list(entries)
3102 3119
3103 3120 def _split(p):
3104 3121 if b'=' not in p:
3105 3122 hint = _(b"each comma separated item should be key=value pairs")
3106 3123 raise error.Abort(
3107 3124 _(b"invalid ui.clonebundleprefers item: %s") % p, hint=hint
3108 3125 )
3109 3126 return p.split(b'=', 1)
3110 3127
3111 3128 prefers = [_split(p) for p in prefers]
3112 3129
3113 3130 items = sorted(clonebundleentry(v, prefers) for v in entries)
3114 3131 return [i.value for i in items]
3115 3132
3116 3133
3117 3134 def trypullbundlefromurl(ui, repo, url):
3118 3135 """Attempt to apply a bundle from a URL."""
3119 3136 with repo.lock(), repo.transaction(b'bundleurl') as tr:
3120 3137 try:
3121 3138 fh = urlmod.open(ui, url)
3122 3139 cg = readbundle(ui, fh, b'stream')
3123 3140
3124 3141 if isinstance(cg, streamclone.streamcloneapplier):
3125 3142 cg.apply(repo)
3126 3143 else:
3127 3144 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
3128 3145 return True
3129 3146 except urlerr.httperror as e:
3130 3147 ui.warn(
3131 3148 _(b'HTTP error fetching bundle: %s\n')
3132 3149 % stringutil.forcebytestr(e)
3133 3150 )
3134 3151 except urlerr.urlerror as e:
3135 3152 ui.warn(
3136 3153 _(b'error fetching bundle: %s\n')
3137 3154 % stringutil.forcebytestr(e.reason)
3138 3155 )
3139 3156
3140 3157 return False
@@ -1,31 +1,35 b''
1 1 == New Features ==
2 2
3 * clonebundles can be annotated with the expected memory requirements
4 using the `REQUIREDRAM` option. This allows clients to skip
5 bundles created with large zstd windows and fallback to larger, but
6 less demanding bundles.
3 7
4 8 == New Experimental Features ==
5 9
6 10 * The core of some hg operations have been (and are being)
7 11 implemented in rust, for speed. `hg status` on a repository with
8 12 300k tracked files goes from 1.8s to 0.6s for instance.
9 13 This has currently been tested only on linux, and does not build on
10 14 windows. See rust/README.rst in the mercurial repository for
11 15 instructions to opt into this.
12 16
13 17 == Backwards Compatibility Changes ==
14 18
15 19 * Mercurial now requires at least Python 2.7.9 or a Python version that
16 20 backported modern SSL/TLS features (as defined in PEP 466), and that Python
17 21 was compiled against a OpenSSL version supporting TLS 1.1 or TLS 1.2
18 22 (likely this requires the OpenSSL version to be at least 1.0.1).
19 23
20 24 * The `hg perfwrite` command from contrib/perf.py was made more flexible and
21 25 changed its default behavior. To get the previous behavior, run `hg perfwrite
22 26 --nlines=100000 --nitems=1 --item='Testing write performance' --batch-line`.
23 27
24 28
25 29 == Internal API Changes ==
26 30
27 31 * logcmdutil.diffordiffstat() now takes contexts instead of nodes.
28 32
29 33 * The `mergestate` class along with some related methods and constants have
30 34 moved from `mercurial.merge` to a new `mercurial.mergestate` module.
31 35
@@ -1,553 +1,638 b''
1 1 #require no-reposimplestore no-chg
2 2
3 3 Set up a server
4 4
5 5 $ hg init server
6 6 $ cd server
7 7 $ cat >> .hg/hgrc << EOF
8 8 > [extensions]
9 9 > clonebundles =
10 10 > EOF
11 11
12 12 $ touch foo
13 13 $ hg -q commit -A -m 'add foo'
14 14 $ touch bar
15 15 $ hg -q commit -A -m 'add bar'
16 16
17 17 $ hg serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
18 18 $ cat hg.pid >> $DAEMON_PIDS
19 19 $ cd ..
20 20
21 21 Missing manifest should not result in server lookup
22 22
23 23 $ hg --verbose clone -U http://localhost:$HGPORT no-manifest
24 24 requesting all changes
25 25 adding changesets
26 26 adding manifests
27 27 adding file changes
28 28 added 2 changesets with 2 changes to 2 files
29 29 new changesets 53245c60e682:aaff8d2ffbbf
30 30 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
31 31
32 32 $ cat server/access.log
33 33 * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
34 34 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
35 35 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=1&common=0000000000000000000000000000000000000000&heads=aaff8d2ffbbf07a46dd1f05d8ae7877e3f56e2a2&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
36 36
37 37 Empty manifest file results in retrieval
38 38 (the extension only checks if the manifest file exists)
39 39
40 40 $ touch server/.hg/clonebundles.manifest
41 41 $ hg --verbose clone -U http://localhost:$HGPORT empty-manifest
42 42 no clone bundles available on remote; falling back to regular clone
43 43 requesting all changes
44 44 adding changesets
45 45 adding manifests
46 46 adding file changes
47 47 added 2 changesets with 2 changes to 2 files
48 48 new changesets 53245c60e682:aaff8d2ffbbf
49 49 (sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
50 50
51 51 Manifest file with invalid URL aborts
52 52
53 53 $ echo 'http://does.not.exist/bundle.hg' > server/.hg/clonebundles.manifest
54 54 $ hg clone http://localhost:$HGPORT 404-url
55 55 applying clone bundle from http://does.not.exist/bundle.hg
56 56 error fetching bundle: (.* not known|(\[Errno -?\d+] )?([Nn]o address associated with (host)?name|Temporary failure in name resolution|Name does not resolve)) (re) (no-windows !)
57 57 error fetching bundle: [Errno 1100*] getaddrinfo failed (glob) (windows !)
58 58 abort: error applying bundle
59 59 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
60 60 [255]
61 61
62 62 Server is not running aborts
63 63
64 64 $ echo "http://localhost:$HGPORT1/bundle.hg" > server/.hg/clonebundles.manifest
65 65 $ hg clone http://localhost:$HGPORT server-not-runner
66 66 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
67 67 error fetching bundle: (.* refused.*|Protocol not supported|(.* )?\$EADDRNOTAVAIL\$|.* No route to host) (re)
68 68 abort: error applying bundle
69 69 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
70 70 [255]
71 71
72 72 Server returns 404
73 73
74 74 $ "$PYTHON" $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
75 75 $ cat http.pid >> $DAEMON_PIDS
76 76 $ hg clone http://localhost:$HGPORT running-404
77 77 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
78 78 HTTP error fetching bundle: HTTP Error 404: File not found
79 79 abort: error applying bundle
80 80 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
81 81 [255]
82 82
83 83 We can override failure to fall back to regular clone
84 84
85 85 $ hg --config ui.clonebundlefallback=true clone -U http://localhost:$HGPORT 404-fallback
86 86 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
87 87 HTTP error fetching bundle: HTTP Error 404: File not found
88 88 falling back to normal clone
89 89 requesting all changes
90 90 adding changesets
91 91 adding manifests
92 92 adding file changes
93 93 added 2 changesets with 2 changes to 2 files
94 94 new changesets 53245c60e682:aaff8d2ffbbf
95 95
96 96 Bundle with partial content works
97 97
98 98 $ hg -R server bundle --type gzip-v1 --base null -r 53245c60e682 partial.hg
99 99 1 changesets found
100 100
101 101 We verify exact bundle content as an extra check against accidental future
102 102 changes. If this output changes, we could break old clients.
103 103
104 104 $ f --size --hexdump partial.hg
105 105 partial.hg: size=207
106 106 0000: 48 47 31 30 47 5a 78 9c 63 60 60 98 17 ac 12 93 |HG10GZx.c``.....|
107 107 0010: f0 ac a9 23 45 70 cb bf 0d 5f 59 4e 4a 7f 79 21 |...#Ep..._YNJ.y!|
108 108 0020: 9b cc 40 24 20 a0 d7 ce 2c d1 38 25 cd 24 25 d5 |..@$ ...,.8%.$%.|
109 109 0030: d8 c2 22 cd 38 d9 24 cd 22 d5 c8 22 cd 24 cd 32 |..".8.$."..".$.2|
110 110 0040: d1 c2 d0 c4 c8 d2 32 d1 38 39 29 c9 34 cd d4 80 |......2.89).4...|
111 111 0050: ab 24 b5 b8 84 cb 40 c1 80 2b 2d 3f 9f 8b 2b 31 |.$....@..+-?..+1|
112 112 0060: 25 45 01 c8 80 9a d2 9b 65 fb e5 9e 45 bf 8d 7f |%E......e...E...|
113 113 0070: 9f c6 97 9f 2b 44 34 67 d9 ec 8e 0f a0 92 0b 75 |....+D4g.......u|
114 114 0080: 41 d6 24 59 18 a4 a4 9a a6 18 1a 5b 98 9b 5a 98 |A.$Y.......[..Z.|
115 115 0090: 9a 18 26 9b a6 19 98 1a 99 99 26 a6 18 9a 98 24 |..&.......&....$|
116 116 00a0: 26 59 a6 25 5a 98 a5 18 a6 24 71 41 35 b1 43 dc |&Y.%Z....$qA5.C.|
117 117 00b0: 16 b2 83 f7 e9 45 8b d2 56 c7 a3 1f 82 52 d7 8a |.....E..V....R..|
118 118 00c0: 78 ed fc d5 76 f1 36 35 dc 05 00 36 ed 5e c7 |x...v.65...6.^.|
119 119
120 120 $ echo "http://localhost:$HGPORT1/partial.hg" > server/.hg/clonebundles.manifest
121 121 $ hg clone -U http://localhost:$HGPORT partial-bundle
122 122 applying clone bundle from http://localhost:$HGPORT1/partial.hg
123 123 adding changesets
124 124 adding manifests
125 125 adding file changes
126 126 added 1 changesets with 1 changes to 1 files
127 127 finished applying clone bundle
128 128 searching for changes
129 129 adding changesets
130 130 adding manifests
131 131 adding file changes
132 132 added 1 changesets with 1 changes to 1 files
133 133 new changesets aaff8d2ffbbf
134 134 1 local changesets published
135 135
136 136 Incremental pull doesn't fetch bundle
137 137
138 138 $ hg clone -r 53245c60e682 -U http://localhost:$HGPORT partial-clone
139 139 adding changesets
140 140 adding manifests
141 141 adding file changes
142 142 added 1 changesets with 1 changes to 1 files
143 143 new changesets 53245c60e682
144 144
145 145 $ cd partial-clone
146 146 $ hg pull
147 147 pulling from http://localhost:$HGPORT/
148 148 searching for changes
149 149 adding changesets
150 150 adding manifests
151 151 adding file changes
152 152 added 1 changesets with 1 changes to 1 files
153 153 new changesets aaff8d2ffbbf
154 154 (run 'hg update' to get a working copy)
155 155 $ cd ..
156 156
157 157 Bundle with full content works
158 158
159 159 $ hg -R server bundle --type gzip-v2 --base null -r tip full.hg
160 160 2 changesets found
161 161
162 162 Again, we perform an extra check against bundle content changes. If this content
163 163 changes, clone bundles produced by new Mercurial versions may not be readable
164 164 by old clients.
165 165
166 166 $ f --size --hexdump full.hg
167 167 full.hg: size=442
168 168 0000: 48 47 32 30 00 00 00 0e 43 6f 6d 70 72 65 73 73 |HG20....Compress|
169 169 0010: 69 6f 6e 3d 47 5a 78 9c 63 60 60 d0 e4 76 f6 70 |ion=GZx.c``..v.p|
170 170 0020: f4 73 77 75 0f f2 0f 0d 60 00 02 46 46 76 26 4e |.swu....`..FFv&N|
171 171 0030: c6 b2 d4 a2 e2 cc fc 3c 03 a3 bc a4 e4 8c c4 bc |.......<........|
172 172 0040: f4 d4 62 23 06 06 e6 19 40 f9 4d c1 2a 31 09 cf |..b#....@.M.*1..|
173 173 0050: 9a 3a 52 04 b7 fc db f0 95 e5 a4 f4 97 17 b2 c9 |.:R.............|
174 174 0060: 0c 14 00 02 e6 d9 99 25 1a a7 a4 99 a4 a4 1a 5b |.......%.......[|
175 175 0070: 58 a4 19 27 9b a4 59 a4 1a 59 a4 99 a4 59 26 5a |X..'..Y..Y...Y&Z|
176 176 0080: 18 9a 18 59 5a 26 1a 27 27 25 99 a6 99 1a 70 95 |...YZ&.''%....p.|
177 177 0090: a4 16 97 70 19 28 18 70 a5 e5 e7 73 71 25 a6 a4 |...p.(.p...sq%..|
178 178 00a0: 28 00 19 20 17 af fa df ab ff 7b 3f fb 92 dc 8b |(.. ......{?....|
179 179 00b0: 1f 62 bb 9e b7 d7 d9 87 3d 5a 44 89 2f b0 99 87 |.b......=ZD./...|
180 180 00c0: ec e2 54 63 43 e3 b4 64 43 73 23 33 43 53 0b 63 |..TcC..dCs#3CS.c|
181 181 00d0: d3 14 23 03 a0 fb 2c 2c 0c d3 80 1e 30 49 49 b1 |..#...,,....0II.|
182 182 00e0: 4c 4a 32 48 33 30 b0 34 42 b8 38 29 b1 08 e2 62 |LJ2H30.4B.8)...b|
183 183 00f0: 20 03 6a ca c2 2c db 2f f7 2c fa 6d fc fb 34 be | .j..,./.,.m..4.|
184 184 0100: fc 5c 21 a2 39 cb 66 77 7c 00 0d c3 59 17 14 58 |.\!.9.fw|...Y..X|
185 185 0110: 49 16 06 29 a9 a6 29 86 c6 16 e6 a6 16 a6 26 86 |I..)..).......&.|
186 186 0120: c9 a6 69 06 a6 46 66 a6 89 29 86 26 26 89 49 96 |..i..Ff..).&&.I.|
187 187 0130: 69 89 16 66 29 86 29 49 5c 20 07 3e 16 fe 23 ae |i..f).)I\ .>..#.|
188 188 0140: 26 da 1c ab 10 1f d1 f8 e3 b3 ef cd dd fc 0c 93 |&...............|
189 189 0150: 88 75 34 36 75 04 82 55 17 14 36 a4 38 10 04 d8 |.u46u..U..6.8...|
190 190 0160: 21 01 9a b1 83 f7 e9 45 8b d2 56 c7 a3 1f 82 52 |!......E..V....R|
191 191 0170: d7 8a 78 ed fc d5 76 f1 36 25 81 89 c7 ad ec 90 |..x...v.6%......|
192 192 0180: 54 47 75 2b 89 48 b1 b2 62 c9 89 c9 19 a9 56 45 |TGu+.H..b.....VE|
193 193 0190: a9 65 ba 49 45 89 79 c9 19 ba 60 01 a0 14 23 58 |.e.IE.y...`...#X|
194 194 01a0: 81 35 c8 7d 40 cc 04 e2 a4 a4 a6 25 96 e6 94 60 |.5.}@......%...`|
195 195 01b0: 33 17 5f 54 00 00 d3 1b 0d 4c |3._T.....L|
196 196
197 197 $ echo "http://localhost:$HGPORT1/full.hg" > server/.hg/clonebundles.manifest
198 198 $ hg clone -U http://localhost:$HGPORT full-bundle
199 199 applying clone bundle from http://localhost:$HGPORT1/full.hg
200 200 adding changesets
201 201 adding manifests
202 202 adding file changes
203 203 added 2 changesets with 2 changes to 2 files
204 204 finished applying clone bundle
205 205 searching for changes
206 206 no changes found
207 207 2 local changesets published
208 208
209 209 Feature works over SSH
210 210
211 211 $ hg clone -U -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/server ssh-full-clone
212 212 applying clone bundle from http://localhost:$HGPORT1/full.hg
213 213 adding changesets
214 214 adding manifests
215 215 adding file changes
216 216 added 2 changesets with 2 changes to 2 files
217 217 finished applying clone bundle
218 218 searching for changes
219 219 no changes found
220 220 2 local changesets published
221 221
222 222 Entry with unknown BUNDLESPEC is filtered and not used
223 223
224 224 $ cat > server/.hg/clonebundles.manifest << EOF
225 225 > http://bad.entry1 BUNDLESPEC=UNKNOWN
226 226 > http://bad.entry2 BUNDLESPEC=xz-v1
227 227 > http://bad.entry3 BUNDLESPEC=none-v100
228 228 > http://localhost:$HGPORT1/full.hg BUNDLESPEC=gzip-v2
229 229 > EOF
230 230
231 231 $ hg clone -U http://localhost:$HGPORT filter-unknown-type
232 232 applying clone bundle from http://localhost:$HGPORT1/full.hg
233 233 adding changesets
234 234 adding manifests
235 235 adding file changes
236 236 added 2 changesets with 2 changes to 2 files
237 237 finished applying clone bundle
238 238 searching for changes
239 239 no changes found
240 240 2 local changesets published
241 241
242 242 Automatic fallback when all entries are filtered
243 243
244 244 $ cat > server/.hg/clonebundles.manifest << EOF
245 245 > http://bad.entry BUNDLESPEC=UNKNOWN
246 246 > EOF
247 247
248 248 $ hg clone -U http://localhost:$HGPORT filter-all
249 249 no compatible clone bundles available on server; falling back to regular clone
250 250 (you may want to report this to the server operator)
251 251 requesting all changes
252 252 adding changesets
253 253 adding manifests
254 254 adding file changes
255 255 added 2 changesets with 2 changes to 2 files
256 256 new changesets 53245c60e682:aaff8d2ffbbf
257 257
258 258 We require a Python version that supports SNI. Therefore, URLs requiring SNI
259 259 are not filtered.
260 260
261 261 $ cp full.hg sni.hg
262 262 $ cat > server/.hg/clonebundles.manifest << EOF
263 263 > http://localhost:$HGPORT1/sni.hg REQUIRESNI=true
264 264 > http://localhost:$HGPORT1/full.hg
265 265 > EOF
266 266
267 267 $ hg clone -U http://localhost:$HGPORT sni-supported
268 268 applying clone bundle from http://localhost:$HGPORT1/sni.hg
269 269 adding changesets
270 270 adding manifests
271 271 adding file changes
272 272 added 2 changesets with 2 changes to 2 files
273 273 finished applying clone bundle
274 274 searching for changes
275 275 no changes found
276 276 2 local changesets published
277 277
278 278 Stream clone bundles are supported
279 279
280 280 $ hg -R server debugcreatestreamclonebundle packed.hg
281 281 writing 613 bytes for 4 files
282 282 bundle requirements: generaldelta, revlogv1, sparserevlog
283 283
284 284 No bundle spec should work
285 285
286 286 $ cat > server/.hg/clonebundles.manifest << EOF
287 287 > http://localhost:$HGPORT1/packed.hg
288 288 > EOF
289 289
290 290 $ hg clone -U http://localhost:$HGPORT stream-clone-no-spec
291 291 applying clone bundle from http://localhost:$HGPORT1/packed.hg
292 292 4 files to transfer, 613 bytes of data
293 293 transferred 613 bytes in *.* seconds (*) (glob)
294 294 finished applying clone bundle
295 295 searching for changes
296 296 no changes found
297 297
298 298 Bundle spec without parameters should work
299 299
300 300 $ cat > server/.hg/clonebundles.manifest << EOF
301 301 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1
302 302 > EOF
303 303
304 304 $ hg clone -U http://localhost:$HGPORT stream-clone-vanilla-spec
305 305 applying clone bundle from http://localhost:$HGPORT1/packed.hg
306 306 4 files to transfer, 613 bytes of data
307 307 transferred 613 bytes in *.* seconds (*) (glob)
308 308 finished applying clone bundle
309 309 searching for changes
310 310 no changes found
311 311
312 312 Bundle spec with format requirements should work
313 313
314 314 $ cat > server/.hg/clonebundles.manifest << EOF
315 315 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv1
316 316 > EOF
317 317
318 318 $ hg clone -U http://localhost:$HGPORT stream-clone-supported-requirements
319 319 applying clone bundle from http://localhost:$HGPORT1/packed.hg
320 320 4 files to transfer, 613 bytes of data
321 321 transferred 613 bytes in *.* seconds (*) (glob)
322 322 finished applying clone bundle
323 323 searching for changes
324 324 no changes found
325 325
326 326 Stream bundle spec with unknown requirements should be filtered out
327 327
328 328 $ cat > server/.hg/clonebundles.manifest << EOF
329 329 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv42
330 330 > EOF
331 331
332 332 $ hg clone -U http://localhost:$HGPORT stream-clone-unsupported-requirements
333 333 no compatible clone bundles available on server; falling back to regular clone
334 334 (you may want to report this to the server operator)
335 335 requesting all changes
336 336 adding changesets
337 337 adding manifests
338 338 adding file changes
339 339 added 2 changesets with 2 changes to 2 files
340 340 new changesets 53245c60e682:aaff8d2ffbbf
341 341
342 342 Set up manifest for testing preferences
343 343 (Remember, the TYPE does not have to match reality - the URL is
344 344 important)
345 345
346 346 $ cp full.hg gz-a.hg
347 347 $ cp full.hg gz-b.hg
348 348 $ cp full.hg bz2-a.hg
349 349 $ cp full.hg bz2-b.hg
350 350 $ cat > server/.hg/clonebundles.manifest << EOF
351 351 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2 extra=a
352 352 > http://localhost:$HGPORT1/bz2-a.hg BUNDLESPEC=bzip2-v2 extra=a
353 353 > http://localhost:$HGPORT1/gz-b.hg BUNDLESPEC=gzip-v2 extra=b
354 354 > http://localhost:$HGPORT1/bz2-b.hg BUNDLESPEC=bzip2-v2 extra=b
355 355 > EOF
356 356
357 357 Preferring an undefined attribute will take first entry
358 358
359 359 $ hg --config ui.clonebundleprefers=foo=bar clone -U http://localhost:$HGPORT prefer-foo
360 360 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
361 361 adding changesets
362 362 adding manifests
363 363 adding file changes
364 364 added 2 changesets with 2 changes to 2 files
365 365 finished applying clone bundle
366 366 searching for changes
367 367 no changes found
368 368 2 local changesets published
369 369
370 370 Preferring bz2 type will download first entry of that type
371 371
372 372 $ hg --config ui.clonebundleprefers=COMPRESSION=bzip2 clone -U http://localhost:$HGPORT prefer-bz
373 373 applying clone bundle from http://localhost:$HGPORT1/bz2-a.hg
374 374 adding changesets
375 375 adding manifests
376 376 adding file changes
377 377 added 2 changesets with 2 changes to 2 files
378 378 finished applying clone bundle
379 379 searching for changes
380 380 no changes found
381 381 2 local changesets published
382 382
383 383 Preferring multiple values of an option works
384 384
385 385 $ hg --config ui.clonebundleprefers=COMPRESSION=unknown,COMPRESSION=bzip2 clone -U http://localhost:$HGPORT prefer-multiple-bz
386 386 applying clone bundle from http://localhost:$HGPORT1/bz2-a.hg
387 387 adding changesets
388 388 adding manifests
389 389 adding file changes
390 390 added 2 changesets with 2 changes to 2 files
391 391 finished applying clone bundle
392 392 searching for changes
393 393 no changes found
394 394 2 local changesets published
395 395
396 396 Sorting multiple values should get us back to original first entry
397 397
398 398 $ hg --config ui.clonebundleprefers=BUNDLESPEC=unknown,BUNDLESPEC=gzip-v2,BUNDLESPEC=bzip2-v2 clone -U http://localhost:$HGPORT prefer-multiple-gz
399 399 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
400 400 adding changesets
401 401 adding manifests
402 402 adding file changes
403 403 added 2 changesets with 2 changes to 2 files
404 404 finished applying clone bundle
405 405 searching for changes
406 406 no changes found
407 407 2 local changesets published
408 408
409 409 Preferring multiple attributes has correct order
410 410
411 411 $ hg --config ui.clonebundleprefers=extra=b,BUNDLESPEC=bzip2-v2 clone -U http://localhost:$HGPORT prefer-separate-attributes
412 412 applying clone bundle from http://localhost:$HGPORT1/bz2-b.hg
413 413 adding changesets
414 414 adding manifests
415 415 adding file changes
416 416 added 2 changesets with 2 changes to 2 files
417 417 finished applying clone bundle
418 418 searching for changes
419 419 no changes found
420 420 2 local changesets published
421 421
422 422 Test where attribute is missing from some entries
423 423
424 424 $ cat > server/.hg/clonebundles.manifest << EOF
425 425 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
426 426 > http://localhost:$HGPORT1/bz2-a.hg BUNDLESPEC=bzip2-v2
427 427 > http://localhost:$HGPORT1/gz-b.hg BUNDLESPEC=gzip-v2 extra=b
428 428 > http://localhost:$HGPORT1/bz2-b.hg BUNDLESPEC=bzip2-v2 extra=b
429 429 > EOF
430 430
431 431 $ hg --config ui.clonebundleprefers=extra=b clone -U http://localhost:$HGPORT prefer-partially-defined-attribute
432 432 applying clone bundle from http://localhost:$HGPORT1/gz-b.hg
433 433 adding changesets
434 434 adding manifests
435 435 adding file changes
436 436 added 2 changesets with 2 changes to 2 files
437 437 finished applying clone bundle
438 438 searching for changes
439 439 no changes found
440 440 2 local changesets published
441 441
442 442 Test a bad attribute list
443 443
444 444 $ hg --config ui.clonebundleprefers=bad clone -U http://localhost:$HGPORT bad-input
445 445 abort: invalid ui.clonebundleprefers item: bad
446 446 (each comma separated item should be key=value pairs)
447 447 [255]
448 448 $ hg --config ui.clonebundleprefers=key=val,bad,key2=val2 clone \
449 449 > -U http://localhost:$HGPORT bad-input
450 450 abort: invalid ui.clonebundleprefers item: bad
451 451 (each comma separated item should be key=value pairs)
452 452 [255]
453 453
454 454
455 455 Test interaction between clone bundles and --stream
456 456
457 457 A manifest with just a gzip bundle
458 458
459 459 $ cat > server/.hg/clonebundles.manifest << EOF
460 460 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
461 461 > EOF
462 462
463 463 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip
464 464 no compatible clone bundles available on server; falling back to regular clone
465 465 (you may want to report this to the server operator)
466 466 streaming all changes
467 467 9 files to transfer, 816 bytes of data
468 468 transferred 816 bytes in * seconds (*) (glob)
469 469
470 470 A manifest with a stream clone but no BUNDLESPEC
471 471
472 472 $ cat > server/.hg/clonebundles.manifest << EOF
473 473 > http://localhost:$HGPORT1/packed.hg
474 474 > EOF
475 475
476 476 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-no-bundlespec
477 477 no compatible clone bundles available on server; falling back to regular clone
478 478 (you may want to report this to the server operator)
479 479 streaming all changes
480 480 9 files to transfer, 816 bytes of data
481 481 transferred 816 bytes in * seconds (*) (glob)
482 482
483 483 A manifest with a gzip bundle and a stream clone
484 484
485 485 $ cat > server/.hg/clonebundles.manifest << EOF
486 486 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
487 487 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1
488 488 > EOF
489 489
490 490 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed
491 491 applying clone bundle from http://localhost:$HGPORT1/packed.hg
492 492 4 files to transfer, 613 bytes of data
493 493 transferred 613 bytes in * seconds (*) (glob)
494 494 finished applying clone bundle
495 495 searching for changes
496 496 no changes found
497 497
498 498 A manifest with a gzip bundle and stream clone with supported requirements
499 499
500 500 $ cat > server/.hg/clonebundles.manifest << EOF
501 501 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
502 502 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv1
503 503 > EOF
504 504
505 505 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed-requirements
506 506 applying clone bundle from http://localhost:$HGPORT1/packed.hg
507 507 4 files to transfer, 613 bytes of data
508 508 transferred 613 bytes in * seconds (*) (glob)
509 509 finished applying clone bundle
510 510 searching for changes
511 511 no changes found
512 512
513 513 A manifest with a gzip bundle and a stream clone with unsupported requirements
514 514
515 515 $ cat > server/.hg/clonebundles.manifest << EOF
516 516 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
517 517 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv42
518 518 > EOF
519 519
520 520 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed-unsupported-requirements
521 521 no compatible clone bundles available on server; falling back to regular clone
522 522 (you may want to report this to the server operator)
523 523 streaming all changes
524 524 9 files to transfer, 816 bytes of data
525 525 transferred 816 bytes in * seconds (*) (glob)
526 526
527 527 Test clone bundle retrieved through bundle2
528 528
529 529 $ cat << EOF >> $HGRCPATH
530 530 > [extensions]
531 531 > largefiles=
532 532 > EOF
533 533 $ killdaemons.py
534 534 $ hg -R server serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
535 535 $ cat hg.pid >> $DAEMON_PIDS
536 536
537 537 $ hg -R server debuglfput gz-a.hg
538 538 1f74b3d08286b9b3a16fb3fa185dd29219cbc6ae
539 539
540 540 $ cat > server/.hg/clonebundles.manifest << EOF
541 541 > largefile://1f74b3d08286b9b3a16fb3fa185dd29219cbc6ae BUNDLESPEC=gzip-v2
542 542 > EOF
543 543
544 544 $ hg clone -U http://localhost:$HGPORT largefile-provided --traceback
545 545 applying clone bundle from largefile://1f74b3d08286b9b3a16fb3fa185dd29219cbc6ae
546 546 adding changesets
547 547 adding manifests
548 548 adding file changes
549 549 added 2 changesets with 2 changes to 2 files
550 550 finished applying clone bundle
551 551 searching for changes
552 552 no changes found
553 553 2 local changesets published
554 $ killdaemons.py
555
556 A manifest with a gzip bundle requiring too much memory for a 16MB system and working
557 on a 32MB system.
558
559 $ "$PYTHON" $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
560 $ cat http.pid >> $DAEMON_PIDS
561 $ hg -R server serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
562 $ cat hg.pid >> $DAEMON_PIDS
563
564 $ cat > server/.hg/clonebundles.manifest << EOF
565 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2 REQUIREDRAM=12MB
566 > EOF
567
568 $ hg clone -U --debug --config ui.available-memory=16MB http://localhost:$HGPORT gzip-too-large
569 using http://localhost:$HGPORT/
570 sending capabilities command
571 sending clonebundles command
572 filtering http://localhost:$HGPORT1/gz-a.hg as it needs more than 2/3 of system memory
573 no compatible clone bundles available on server; falling back to regular clone
574 (you may want to report this to the server operator)
575 query 1; heads
576 sending batch command
577 requesting all changes
578 sending getbundle command
579 bundle2-input-bundle: with-transaction
580 bundle2-input-part: "changegroup" (params: 1 mandatory 1 advisory) supported
581 adding changesets
582 add changeset 53245c60e682
583 add changeset aaff8d2ffbbf
584 adding manifests
585 adding file changes
586 adding bar revisions
587 adding foo revisions
588 bundle2-input-part: total payload size 920
589 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
590 bundle2-input-part: "phase-heads" supported
591 bundle2-input-part: total payload size 24
592 bundle2-input-part: "cache:rev-branch-cache" (advisory) supported
593 bundle2-input-part: total payload size 59
594 bundle2-input-bundle: 4 parts total
595 checking for updated bookmarks
596 updating the branch cache
597 added 2 changesets with 2 changes to 2 files
598 new changesets 53245c60e682:aaff8d2ffbbf
599 calling hook changegroup.lfiles: hgext.largefiles.reposetup.checkrequireslfiles
600 (sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
601
602 $ hg clone -U --debug --config ui.available-memory=32MB http://localhost:$HGPORT gzip-too-large2
603 using http://localhost:$HGPORT/
604 sending capabilities command
605 sending clonebundles command
606 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
607 bundle2-input-bundle: 1 params with-transaction
608 bundle2-input-part: "changegroup" (params: 1 mandatory 1 advisory) supported
609 adding changesets
610 add changeset 53245c60e682
611 add changeset aaff8d2ffbbf
612 adding manifests
613 adding file changes
614 adding bar revisions
615 adding foo revisions
616 bundle2-input-part: total payload size 920
617 bundle2-input-part: "cache:rev-branch-cache" (advisory) supported
618 bundle2-input-part: total payload size 59
619 bundle2-input-bundle: 2 parts total
620 updating the branch cache
621 added 2 changesets with 2 changes to 2 files
622 finished applying clone bundle
623 query 1; heads
624 sending batch command
625 searching for changes
626 all remote heads known locally
627 no changes found
628 sending getbundle command
629 bundle2-input-bundle: with-transaction
630 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
631 bundle2-input-part: "phase-heads" supported
632 bundle2-input-part: total payload size 24
633 bundle2-input-bundle: 2 parts total
634 checking for updated bookmarks
635 2 local changesets published
636 calling hook changegroup.lfiles: hgext.largefiles.reposetup.checkrequireslfiles
637 (sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
638 $ killdaemons.py
General Comments 0
You need to be logged in to leave comments. Login now