##// END OF EJS Templates
exchange: perform stream clone with clone bundle with --uncompressed...
Gregory Szorc -
r34360:ff406f3e default
parent child Browse files
Show More
@@ -1,2060 +1,2076 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import errno
12 12 import hashlib
13 13
14 14 from .i18n import _
15 15 from .node import (
16 16 hex,
17 17 nullid,
18 18 )
19 19 from . import (
20 20 bookmarks as bookmod,
21 21 bundle2,
22 22 changegroup,
23 23 discovery,
24 24 error,
25 25 lock as lockmod,
26 26 obsolete,
27 27 phases,
28 28 pushkey,
29 29 pycompat,
30 30 scmutil,
31 31 sslutil,
32 32 streamclone,
33 33 url as urlmod,
34 34 util,
35 35 )
36 36
37 37 urlerr = util.urlerr
38 38 urlreq = util.urlreq
39 39
40 40 # Maps bundle version human names to changegroup versions.
41 41 _bundlespeccgversions = {'v1': '01',
42 42 'v2': '02',
43 43 'packed1': 's1',
44 44 'bundle2': '02', #legacy
45 45 }
46 46
47 47 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
48 48 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
49 49
50 50 def parsebundlespec(repo, spec, strict=True, externalnames=False):
51 51 """Parse a bundle string specification into parts.
52 52
53 53 Bundle specifications denote a well-defined bundle/exchange format.
54 54 The content of a given specification should not change over time in
55 55 order to ensure that bundles produced by a newer version of Mercurial are
56 56 readable from an older version.
57 57
58 58 The string currently has the form:
59 59
60 60 <compression>-<type>[;<parameter0>[;<parameter1>]]
61 61
62 62 Where <compression> is one of the supported compression formats
63 63 and <type> is (currently) a version string. A ";" can follow the type and
64 64 all text afterwards is interpreted as URI encoded, ";" delimited key=value
65 65 pairs.
66 66
67 67 If ``strict`` is True (the default) <compression> is required. Otherwise,
68 68 it is optional.
69 69
70 70 If ``externalnames`` is False (the default), the human-centric names will
71 71 be converted to their internal representation.
72 72
73 73 Returns a 3-tuple of (compression, version, parameters). Compression will
74 74 be ``None`` if not in strict mode and a compression isn't defined.
75 75
76 76 An ``InvalidBundleSpecification`` is raised when the specification is
77 77 not syntactically well formed.
78 78
79 79 An ``UnsupportedBundleSpecification`` is raised when the compression or
80 80 bundle type/version is not recognized.
81 81
82 82 Note: this function will likely eventually return a more complex data
83 83 structure, including bundle2 part information.
84 84 """
85 85 def parseparams(s):
86 86 if ';' not in s:
87 87 return s, {}
88 88
89 89 params = {}
90 90 version, paramstr = s.split(';', 1)
91 91
92 92 for p in paramstr.split(';'):
93 93 if '=' not in p:
94 94 raise error.InvalidBundleSpecification(
95 95 _('invalid bundle specification: '
96 96 'missing "=" in parameter: %s') % p)
97 97
98 98 key, value = p.split('=', 1)
99 99 key = urlreq.unquote(key)
100 100 value = urlreq.unquote(value)
101 101 params[key] = value
102 102
103 103 return version, params
104 104
105 105
106 106 if strict and '-' not in spec:
107 107 raise error.InvalidBundleSpecification(
108 108 _('invalid bundle specification; '
109 109 'must be prefixed with compression: %s') % spec)
110 110
111 111 if '-' in spec:
112 112 compression, version = spec.split('-', 1)
113 113
114 114 if compression not in util.compengines.supportedbundlenames:
115 115 raise error.UnsupportedBundleSpecification(
116 116 _('%s compression is not supported') % compression)
117 117
118 118 version, params = parseparams(version)
119 119
120 120 if version not in _bundlespeccgversions:
121 121 raise error.UnsupportedBundleSpecification(
122 122 _('%s is not a recognized bundle version') % version)
123 123 else:
124 124 # Value could be just the compression or just the version, in which
125 125 # case some defaults are assumed (but only when not in strict mode).
126 126 assert not strict
127 127
128 128 spec, params = parseparams(spec)
129 129
130 130 if spec in util.compengines.supportedbundlenames:
131 131 compression = spec
132 132 version = 'v1'
133 133 # Generaldelta repos require v2.
134 134 if 'generaldelta' in repo.requirements:
135 135 version = 'v2'
136 136 # Modern compression engines require v2.
137 137 if compression not in _bundlespecv1compengines:
138 138 version = 'v2'
139 139 elif spec in _bundlespeccgversions:
140 140 if spec == 'packed1':
141 141 compression = 'none'
142 142 else:
143 143 compression = 'bzip2'
144 144 version = spec
145 145 else:
146 146 raise error.UnsupportedBundleSpecification(
147 147 _('%s is not a recognized bundle specification') % spec)
148 148
149 149 # Bundle version 1 only supports a known set of compression engines.
150 150 if version == 'v1' and compression not in _bundlespecv1compengines:
151 151 raise error.UnsupportedBundleSpecification(
152 152 _('compression engine %s is not supported on v1 bundles') %
153 153 compression)
154 154
155 155 # The specification for packed1 can optionally declare the data formats
156 156 # required to apply it. If we see this metadata, compare against what the
157 157 # repo supports and error if the bundle isn't compatible.
158 158 if version == 'packed1' and 'requirements' in params:
159 159 requirements = set(params['requirements'].split(','))
160 160 missingreqs = requirements - repo.supportedformats
161 161 if missingreqs:
162 162 raise error.UnsupportedBundleSpecification(
163 163 _('missing support for repository features: %s') %
164 164 ', '.join(sorted(missingreqs)))
165 165
166 166 if not externalnames:
167 167 engine = util.compengines.forbundlename(compression)
168 168 compression = engine.bundletype()[1]
169 169 version = _bundlespeccgversions[version]
170 170 return compression, version, params
171 171
172 172 def readbundle(ui, fh, fname, vfs=None):
173 173 header = changegroup.readexactly(fh, 4)
174 174
175 175 alg = None
176 176 if not fname:
177 177 fname = "stream"
178 178 if not header.startswith('HG') and header.startswith('\0'):
179 179 fh = changegroup.headerlessfixup(fh, header)
180 180 header = "HG10"
181 181 alg = 'UN'
182 182 elif vfs:
183 183 fname = vfs.join(fname)
184 184
185 185 magic, version = header[0:2], header[2:4]
186 186
187 187 if magic != 'HG':
188 188 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
189 189 if version == '10':
190 190 if alg is None:
191 191 alg = changegroup.readexactly(fh, 2)
192 192 return changegroup.cg1unpacker(fh, alg)
193 193 elif version.startswith('2'):
194 194 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
195 195 elif version == 'S1':
196 196 return streamclone.streamcloneapplier(fh)
197 197 else:
198 198 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
199 199
200 200 def getbundlespec(ui, fh):
201 201 """Infer the bundlespec from a bundle file handle.
202 202
203 203 The input file handle is seeked and the original seek position is not
204 204 restored.
205 205 """
206 206 def speccompression(alg):
207 207 try:
208 208 return util.compengines.forbundletype(alg).bundletype()[0]
209 209 except KeyError:
210 210 return None
211 211
212 212 b = readbundle(ui, fh, None)
213 213 if isinstance(b, changegroup.cg1unpacker):
214 214 alg = b._type
215 215 if alg == '_truncatedBZ':
216 216 alg = 'BZ'
217 217 comp = speccompression(alg)
218 218 if not comp:
219 219 raise error.Abort(_('unknown compression algorithm: %s') % alg)
220 220 return '%s-v1' % comp
221 221 elif isinstance(b, bundle2.unbundle20):
222 222 if 'Compression' in b.params:
223 223 comp = speccompression(b.params['Compression'])
224 224 if not comp:
225 225 raise error.Abort(_('unknown compression algorithm: %s') % comp)
226 226 else:
227 227 comp = 'none'
228 228
229 229 version = None
230 230 for part in b.iterparts():
231 231 if part.type == 'changegroup':
232 232 version = part.params['version']
233 233 if version in ('01', '02'):
234 234 version = 'v2'
235 235 else:
236 236 raise error.Abort(_('changegroup version %s does not have '
237 237 'a known bundlespec') % version,
238 238 hint=_('try upgrading your Mercurial '
239 239 'client'))
240 240
241 241 if not version:
242 242 raise error.Abort(_('could not identify changegroup version in '
243 243 'bundle'))
244 244
245 245 return '%s-%s' % (comp, version)
246 246 elif isinstance(b, streamclone.streamcloneapplier):
247 247 requirements = streamclone.readbundle1header(fh)[2]
248 248 params = 'requirements=%s' % ','.join(sorted(requirements))
249 249 return 'none-packed1;%s' % urlreq.quote(params)
250 250 else:
251 251 raise error.Abort(_('unknown bundle type: %s') % b)
252 252
253 253 def _computeoutgoing(repo, heads, common):
254 254 """Computes which revs are outgoing given a set of common
255 255 and a set of heads.
256 256
257 257 This is a separate function so extensions can have access to
258 258 the logic.
259 259
260 260 Returns a discovery.outgoing object.
261 261 """
262 262 cl = repo.changelog
263 263 if common:
264 264 hasnode = cl.hasnode
265 265 common = [n for n in common if hasnode(n)]
266 266 else:
267 267 common = [nullid]
268 268 if not heads:
269 269 heads = cl.heads()
270 270 return discovery.outgoing(repo, common, heads)
271 271
272 272 def _forcebundle1(op):
273 273 """return true if a pull/push must use bundle1
274 274
275 275 This function is used to allow testing of the older bundle version"""
276 276 ui = op.repo.ui
277 277 forcebundle1 = False
278 278 # The goal is this config is to allow developer to choose the bundle
279 279 # version used during exchanged. This is especially handy during test.
280 280 # Value is a list of bundle version to be picked from, highest version
281 281 # should be used.
282 282 #
283 283 # developer config: devel.legacy.exchange
284 284 exchange = ui.configlist('devel', 'legacy.exchange')
285 285 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
286 286 return forcebundle1 or not op.remote.capable('bundle2')
287 287
288 288 class pushoperation(object):
289 289 """A object that represent a single push operation
290 290
291 291 Its purpose is to carry push related state and very common operations.
292 292
293 293 A new pushoperation should be created at the beginning of each push and
294 294 discarded afterward.
295 295 """
296 296
297 297 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
298 298 bookmarks=(), pushvars=None):
299 299 # repo we push from
300 300 self.repo = repo
301 301 self.ui = repo.ui
302 302 # repo we push to
303 303 self.remote = remote
304 304 # force option provided
305 305 self.force = force
306 306 # revs to be pushed (None is "all")
307 307 self.revs = revs
308 308 # bookmark explicitly pushed
309 309 self.bookmarks = bookmarks
310 310 # allow push of new branch
311 311 self.newbranch = newbranch
312 312 # step already performed
313 313 # (used to check what steps have been already performed through bundle2)
314 314 self.stepsdone = set()
315 315 # Integer version of the changegroup push result
316 316 # - None means nothing to push
317 317 # - 0 means HTTP error
318 318 # - 1 means we pushed and remote head count is unchanged *or*
319 319 # we have outgoing changesets but refused to push
320 320 # - other values as described by addchangegroup()
321 321 self.cgresult = None
322 322 # Boolean value for the bookmark push
323 323 self.bkresult = None
324 324 # discover.outgoing object (contains common and outgoing data)
325 325 self.outgoing = None
326 326 # all remote topological heads before the push
327 327 self.remoteheads = None
328 328 # Details of the remote branch pre and post push
329 329 #
330 330 # mapping: {'branch': ([remoteheads],
331 331 # [newheads],
332 332 # [unsyncedheads],
333 333 # [discardedheads])}
334 334 # - branch: the branch name
335 335 # - remoteheads: the list of remote heads known locally
336 336 # None if the branch is new
337 337 # - newheads: the new remote heads (known locally) with outgoing pushed
338 338 # - unsyncedheads: the list of remote heads unknown locally.
339 339 # - discardedheads: the list of remote heads made obsolete by the push
340 340 self.pushbranchmap = None
341 341 # testable as a boolean indicating if any nodes are missing locally.
342 342 self.incoming = None
343 343 # phases changes that must be pushed along side the changesets
344 344 self.outdatedphases = None
345 345 # phases changes that must be pushed if changeset push fails
346 346 self.fallbackoutdatedphases = None
347 347 # outgoing obsmarkers
348 348 self.outobsmarkers = set()
349 349 # outgoing bookmarks
350 350 self.outbookmarks = []
351 351 # transaction manager
352 352 self.trmanager = None
353 353 # map { pushkey partid -> callback handling failure}
354 354 # used to handle exception from mandatory pushkey part failure
355 355 self.pkfailcb = {}
356 356 # an iterable of pushvars or None
357 357 self.pushvars = pushvars
358 358
359 359 @util.propertycache
360 360 def futureheads(self):
361 361 """future remote heads if the changeset push succeeds"""
362 362 return self.outgoing.missingheads
363 363
364 364 @util.propertycache
365 365 def fallbackheads(self):
366 366 """future remote heads if the changeset push fails"""
367 367 if self.revs is None:
368 368 # not target to push, all common are relevant
369 369 return self.outgoing.commonheads
370 370 unfi = self.repo.unfiltered()
371 371 # I want cheads = heads(::missingheads and ::commonheads)
372 372 # (missingheads is revs with secret changeset filtered out)
373 373 #
374 374 # This can be expressed as:
375 375 # cheads = ( (missingheads and ::commonheads)
376 376 # + (commonheads and ::missingheads))"
377 377 # )
378 378 #
379 379 # while trying to push we already computed the following:
380 380 # common = (::commonheads)
381 381 # missing = ((commonheads::missingheads) - commonheads)
382 382 #
383 383 # We can pick:
384 384 # * missingheads part of common (::commonheads)
385 385 common = self.outgoing.common
386 386 nm = self.repo.changelog.nodemap
387 387 cheads = [node for node in self.revs if nm[node] in common]
388 388 # and
389 389 # * commonheads parents on missing
390 390 revset = unfi.set('%ln and parents(roots(%ln))',
391 391 self.outgoing.commonheads,
392 392 self.outgoing.missing)
393 393 cheads.extend(c.node() for c in revset)
394 394 return cheads
395 395
396 396 @property
397 397 def commonheads(self):
398 398 """set of all common heads after changeset bundle push"""
399 399 if self.cgresult:
400 400 return self.futureheads
401 401 else:
402 402 return self.fallbackheads
403 403
404 404 # mapping of message used when pushing bookmark
405 405 bookmsgmap = {'update': (_("updating bookmark %s\n"),
406 406 _('updating bookmark %s failed!\n')),
407 407 'export': (_("exporting bookmark %s\n"),
408 408 _('exporting bookmark %s failed!\n')),
409 409 'delete': (_("deleting remote bookmark %s\n"),
410 410 _('deleting remote bookmark %s failed!\n')),
411 411 }
412 412
413 413
414 414 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
415 415 opargs=None):
416 416 '''Push outgoing changesets (limited by revs) from a local
417 417 repository to remote. Return an integer:
418 418 - None means nothing to push
419 419 - 0 means HTTP error
420 420 - 1 means we pushed and remote head count is unchanged *or*
421 421 we have outgoing changesets but refused to push
422 422 - other values as described by addchangegroup()
423 423 '''
424 424 if opargs is None:
425 425 opargs = {}
426 426 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
427 427 **pycompat.strkwargs(opargs))
428 428 if pushop.remote.local():
429 429 missing = (set(pushop.repo.requirements)
430 430 - pushop.remote.local().supported)
431 431 if missing:
432 432 msg = _("required features are not"
433 433 " supported in the destination:"
434 434 " %s") % (', '.join(sorted(missing)))
435 435 raise error.Abort(msg)
436 436
437 437 if not pushop.remote.canpush():
438 438 raise error.Abort(_("destination does not support push"))
439 439
440 440 if not pushop.remote.capable('unbundle'):
441 441 raise error.Abort(_('cannot push: destination does not support the '
442 442 'unbundle wire protocol command'))
443 443
444 444 # get lock as we might write phase data
445 445 wlock = lock = None
446 446 try:
447 447 # bundle2 push may receive a reply bundle touching bookmarks or other
448 448 # things requiring the wlock. Take it now to ensure proper ordering.
449 449 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
450 450 if (not _forcebundle1(pushop)) and maypushback:
451 451 wlock = pushop.repo.wlock()
452 452 lock = pushop.repo.lock()
453 453 pushop.trmanager = transactionmanager(pushop.repo,
454 454 'push-response',
455 455 pushop.remote.url())
456 456 except IOError as err:
457 457 if err.errno != errno.EACCES:
458 458 raise
459 459 # source repo cannot be locked.
460 460 # We do not abort the push, but just disable the local phase
461 461 # synchronisation.
462 462 msg = 'cannot lock source repository: %s\n' % err
463 463 pushop.ui.debug(msg)
464 464
465 465 with wlock or util.nullcontextmanager(), \
466 466 lock or util.nullcontextmanager(), \
467 467 pushop.trmanager or util.nullcontextmanager():
468 468 pushop.repo.checkpush(pushop)
469 469 _pushdiscovery(pushop)
470 470 if not _forcebundle1(pushop):
471 471 _pushbundle2(pushop)
472 472 _pushchangeset(pushop)
473 473 _pushsyncphase(pushop)
474 474 _pushobsolete(pushop)
475 475 _pushbookmark(pushop)
476 476
477 477 return pushop
478 478
479 479 # list of steps to perform discovery before push
480 480 pushdiscoveryorder = []
481 481
482 482 # Mapping between step name and function
483 483 #
484 484 # This exists to help extensions wrap steps if necessary
485 485 pushdiscoverymapping = {}
486 486
487 487 def pushdiscovery(stepname):
488 488 """decorator for function performing discovery before push
489 489
490 490 The function is added to the step -> function mapping and appended to the
491 491 list of steps. Beware that decorated function will be added in order (this
492 492 may matter).
493 493
494 494 You can only use this decorator for a new step, if you want to wrap a step
495 495 from an extension, change the pushdiscovery dictionary directly."""
496 496 def dec(func):
497 497 assert stepname not in pushdiscoverymapping
498 498 pushdiscoverymapping[stepname] = func
499 499 pushdiscoveryorder.append(stepname)
500 500 return func
501 501 return dec
502 502
503 503 def _pushdiscovery(pushop):
504 504 """Run all discovery steps"""
505 505 for stepname in pushdiscoveryorder:
506 506 step = pushdiscoverymapping[stepname]
507 507 step(pushop)
508 508
509 509 @pushdiscovery('changeset')
510 510 def _pushdiscoverychangeset(pushop):
511 511 """discover the changeset that need to be pushed"""
512 512 fci = discovery.findcommonincoming
513 513 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
514 514 common, inc, remoteheads = commoninc
515 515 fco = discovery.findcommonoutgoing
516 516 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
517 517 commoninc=commoninc, force=pushop.force)
518 518 pushop.outgoing = outgoing
519 519 pushop.remoteheads = remoteheads
520 520 pushop.incoming = inc
521 521
522 522 @pushdiscovery('phase')
523 523 def _pushdiscoveryphase(pushop):
524 524 """discover the phase that needs to be pushed
525 525
526 526 (computed for both success and failure case for changesets push)"""
527 527 outgoing = pushop.outgoing
528 528 unfi = pushop.repo.unfiltered()
529 529 remotephases = pushop.remote.listkeys('phases')
530 530 publishing = remotephases.get('publishing', False)
531 531 if (pushop.ui.configbool('ui', '_usedassubrepo')
532 532 and remotephases # server supports phases
533 533 and not pushop.outgoing.missing # no changesets to be pushed
534 534 and publishing):
535 535 # When:
536 536 # - this is a subrepo push
537 537 # - and remote support phase
538 538 # - and no changeset are to be pushed
539 539 # - and remote is publishing
540 540 # We may be in issue 3871 case!
541 541 # We drop the possible phase synchronisation done by
542 542 # courtesy to publish changesets possibly locally draft
543 543 # on the remote.
544 544 remotephases = {'publishing': 'True'}
545 545 ana = phases.analyzeremotephases(pushop.repo,
546 546 pushop.fallbackheads,
547 547 remotephases)
548 548 pheads, droots = ana
549 549 extracond = ''
550 550 if not publishing:
551 551 extracond = ' and public()'
552 552 revset = 'heads((%%ln::%%ln) %s)' % extracond
553 553 # Get the list of all revs draft on remote by public here.
554 554 # XXX Beware that revset break if droots is not strictly
555 555 # XXX root we may want to ensure it is but it is costly
556 556 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
557 557 if not outgoing.missing:
558 558 future = fallback
559 559 else:
560 560 # adds changeset we are going to push as draft
561 561 #
562 562 # should not be necessary for publishing server, but because of an
563 563 # issue fixed in xxxxx we have to do it anyway.
564 564 fdroots = list(unfi.set('roots(%ln + %ln::)',
565 565 outgoing.missing, droots))
566 566 fdroots = [f.node() for f in fdroots]
567 567 future = list(unfi.set(revset, fdroots, pushop.futureheads))
568 568 pushop.outdatedphases = future
569 569 pushop.fallbackoutdatedphases = fallback
570 570
571 571 @pushdiscovery('obsmarker')
572 572 def _pushdiscoveryobsmarkers(pushop):
573 573 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
574 574 and pushop.repo.obsstore
575 575 and 'obsolete' in pushop.remote.listkeys('namespaces')):
576 576 repo = pushop.repo
577 577 # very naive computation, that can be quite expensive on big repo.
578 578 # However: evolution is currently slow on them anyway.
579 579 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
580 580 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
581 581
582 582 @pushdiscovery('bookmarks')
583 583 def _pushdiscoverybookmarks(pushop):
584 584 ui = pushop.ui
585 585 repo = pushop.repo.unfiltered()
586 586 remote = pushop.remote
587 587 ui.debug("checking for updated bookmarks\n")
588 588 ancestors = ()
589 589 if pushop.revs:
590 590 revnums = map(repo.changelog.rev, pushop.revs)
591 591 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
592 592 remotebookmark = remote.listkeys('bookmarks')
593 593
594 594 explicit = set([repo._bookmarks.expandname(bookmark)
595 595 for bookmark in pushop.bookmarks])
596 596
597 597 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
598 598 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
599 599
600 600 def safehex(x):
601 601 if x is None:
602 602 return x
603 603 return hex(x)
604 604
605 605 def hexifycompbookmarks(bookmarks):
606 606 for b, scid, dcid in bookmarks:
607 607 yield b, safehex(scid), safehex(dcid)
608 608
609 609 comp = [hexifycompbookmarks(marks) for marks in comp]
610 610 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
611 611
612 612 for b, scid, dcid in advsrc:
613 613 if b in explicit:
614 614 explicit.remove(b)
615 615 if not ancestors or repo[scid].rev() in ancestors:
616 616 pushop.outbookmarks.append((b, dcid, scid))
617 617 # search added bookmark
618 618 for b, scid, dcid in addsrc:
619 619 if b in explicit:
620 620 explicit.remove(b)
621 621 pushop.outbookmarks.append((b, '', scid))
622 622 # search for overwritten bookmark
623 623 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
624 624 if b in explicit:
625 625 explicit.remove(b)
626 626 pushop.outbookmarks.append((b, dcid, scid))
627 627 # search for bookmark to delete
628 628 for b, scid, dcid in adddst:
629 629 if b in explicit:
630 630 explicit.remove(b)
631 631 # treat as "deleted locally"
632 632 pushop.outbookmarks.append((b, dcid, ''))
633 633 # identical bookmarks shouldn't get reported
634 634 for b, scid, dcid in same:
635 635 if b in explicit:
636 636 explicit.remove(b)
637 637
638 638 if explicit:
639 639 explicit = sorted(explicit)
640 640 # we should probably list all of them
641 641 ui.warn(_('bookmark %s does not exist on the local '
642 642 'or remote repository!\n') % explicit[0])
643 643 pushop.bkresult = 2
644 644
645 645 pushop.outbookmarks.sort()
646 646
647 647 def _pushcheckoutgoing(pushop):
648 648 outgoing = pushop.outgoing
649 649 unfi = pushop.repo.unfiltered()
650 650 if not outgoing.missing:
651 651 # nothing to push
652 652 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
653 653 return False
654 654 # something to push
655 655 if not pushop.force:
656 656 # if repo.obsstore == False --> no obsolete
657 657 # then, save the iteration
658 658 if unfi.obsstore:
659 659 # this message are here for 80 char limit reason
660 660 mso = _("push includes obsolete changeset: %s!")
661 661 mspd = _("push includes phase-divergent changeset: %s!")
662 662 mscd = _("push includes content-divergent changeset: %s!")
663 663 mst = {"orphan": _("push includes orphan changeset: %s!"),
664 664 "phase-divergent": mspd,
665 665 "content-divergent": mscd}
666 666 # If we are to push if there is at least one
667 667 # obsolete or unstable changeset in missing, at
668 668 # least one of the missinghead will be obsolete or
669 669 # unstable. So checking heads only is ok
670 670 for node in outgoing.missingheads:
671 671 ctx = unfi[node]
672 672 if ctx.obsolete():
673 673 raise error.Abort(mso % ctx)
674 674 elif ctx.isunstable():
675 675 # TODO print more than one instability in the abort
676 676 # message
677 677 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
678 678
679 679 discovery.checkheads(pushop)
680 680 return True
681 681
682 682 # List of names of steps to perform for an outgoing bundle2, order matters.
683 683 b2partsgenorder = []
684 684
685 685 # Mapping between step name and function
686 686 #
687 687 # This exists to help extensions wrap steps if necessary
688 688 b2partsgenmapping = {}
689 689
690 690 def b2partsgenerator(stepname, idx=None):
691 691 """decorator for function generating bundle2 part
692 692
693 693 The function is added to the step -> function mapping and appended to the
694 694 list of steps. Beware that decorated functions will be added in order
695 695 (this may matter).
696 696
697 697 You can only use this decorator for new steps, if you want to wrap a step
698 698 from an extension, attack the b2partsgenmapping dictionary directly."""
699 699 def dec(func):
700 700 assert stepname not in b2partsgenmapping
701 701 b2partsgenmapping[stepname] = func
702 702 if idx is None:
703 703 b2partsgenorder.append(stepname)
704 704 else:
705 705 b2partsgenorder.insert(idx, stepname)
706 706 return func
707 707 return dec
708 708
709 709 def _pushb2ctxcheckheads(pushop, bundler):
710 710 """Generate race condition checking parts
711 711
712 712 Exists as an independent function to aid extensions
713 713 """
714 714 # * 'force' do not check for push race,
715 715 # * if we don't push anything, there are nothing to check.
716 716 if not pushop.force and pushop.outgoing.missingheads:
717 717 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
718 718 emptyremote = pushop.pushbranchmap is None
719 719 if not allowunrelated or emptyremote:
720 720 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
721 721 else:
722 722 affected = set()
723 723 for branch, heads in pushop.pushbranchmap.iteritems():
724 724 remoteheads, newheads, unsyncedheads, discardedheads = heads
725 725 if remoteheads is not None:
726 726 remote = set(remoteheads)
727 727 affected |= set(discardedheads) & remote
728 728 affected |= remote - set(newheads)
729 729 if affected:
730 730 data = iter(sorted(affected))
731 731 bundler.newpart('check:updated-heads', data=data)
732 732
733 733 @b2partsgenerator('changeset')
734 734 def _pushb2ctx(pushop, bundler):
735 735 """handle changegroup push through bundle2
736 736
737 737 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
738 738 """
739 739 if 'changesets' in pushop.stepsdone:
740 740 return
741 741 pushop.stepsdone.add('changesets')
742 742 # Send known heads to the server for race detection.
743 743 if not _pushcheckoutgoing(pushop):
744 744 return
745 745 pushop.repo.prepushoutgoinghooks(pushop)
746 746
747 747 _pushb2ctxcheckheads(pushop, bundler)
748 748
749 749 b2caps = bundle2.bundle2caps(pushop.remote)
750 750 version = '01'
751 751 cgversions = b2caps.get('changegroup')
752 752 if cgversions: # 3.1 and 3.2 ship with an empty value
753 753 cgversions = [v for v in cgversions
754 754 if v in changegroup.supportedoutgoingversions(
755 755 pushop.repo)]
756 756 if not cgversions:
757 757 raise ValueError(_('no common changegroup version'))
758 758 version = max(cgversions)
759 759 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
760 760 'push')
761 761 cgpart = bundler.newpart('changegroup', data=cgstream)
762 762 if cgversions:
763 763 cgpart.addparam('version', version)
764 764 if 'treemanifest' in pushop.repo.requirements:
765 765 cgpart.addparam('treemanifest', '1')
766 766 def handlereply(op):
767 767 """extract addchangegroup returns from server reply"""
768 768 cgreplies = op.records.getreplies(cgpart.id)
769 769 assert len(cgreplies['changegroup']) == 1
770 770 pushop.cgresult = cgreplies['changegroup'][0]['return']
771 771 return handlereply
772 772
773 773 @b2partsgenerator('phase')
774 774 def _pushb2phases(pushop, bundler):
775 775 """handle phase push through bundle2"""
776 776 if 'phases' in pushop.stepsdone:
777 777 return
778 778 b2caps = bundle2.bundle2caps(pushop.remote)
779 779 if not 'pushkey' in b2caps:
780 780 return
781 781 pushop.stepsdone.add('phases')
782 782 part2node = []
783 783
784 784 def handlefailure(pushop, exc):
785 785 targetid = int(exc.partid)
786 786 for partid, node in part2node:
787 787 if partid == targetid:
788 788 raise error.Abort(_('updating %s to public failed') % node)
789 789
790 790 enc = pushkey.encode
791 791 for newremotehead in pushop.outdatedphases:
792 792 part = bundler.newpart('pushkey')
793 793 part.addparam('namespace', enc('phases'))
794 794 part.addparam('key', enc(newremotehead.hex()))
795 795 part.addparam('old', enc('%d' % phases.draft))
796 796 part.addparam('new', enc('%d' % phases.public))
797 797 part2node.append((part.id, newremotehead))
798 798 pushop.pkfailcb[part.id] = handlefailure
799 799
800 800 def handlereply(op):
801 801 for partid, node in part2node:
802 802 partrep = op.records.getreplies(partid)
803 803 results = partrep['pushkey']
804 804 assert len(results) <= 1
805 805 msg = None
806 806 if not results:
807 807 msg = _('server ignored update of %s to public!\n') % node
808 808 elif not int(results[0]['return']):
809 809 msg = _('updating %s to public failed!\n') % node
810 810 if msg is not None:
811 811 pushop.ui.warn(msg)
812 812 return handlereply
813 813
814 814 @b2partsgenerator('obsmarkers')
815 815 def _pushb2obsmarkers(pushop, bundler):
816 816 if 'obsmarkers' in pushop.stepsdone:
817 817 return
818 818 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
819 819 if obsolete.commonversion(remoteversions) is None:
820 820 return
821 821 pushop.stepsdone.add('obsmarkers')
822 822 if pushop.outobsmarkers:
823 823 markers = sorted(pushop.outobsmarkers)
824 824 bundle2.buildobsmarkerspart(bundler, markers)
825 825
826 826 @b2partsgenerator('bookmarks')
827 827 def _pushb2bookmarks(pushop, bundler):
828 828 """handle bookmark push through bundle2"""
829 829 if 'bookmarks' in pushop.stepsdone:
830 830 return
831 831 b2caps = bundle2.bundle2caps(pushop.remote)
832 832 if 'pushkey' not in b2caps:
833 833 return
834 834 pushop.stepsdone.add('bookmarks')
835 835 part2book = []
836 836 enc = pushkey.encode
837 837
838 838 def handlefailure(pushop, exc):
839 839 targetid = int(exc.partid)
840 840 for partid, book, action in part2book:
841 841 if partid == targetid:
842 842 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
843 843 # we should not be called for part we did not generated
844 844 assert False
845 845
846 846 for book, old, new in pushop.outbookmarks:
847 847 part = bundler.newpart('pushkey')
848 848 part.addparam('namespace', enc('bookmarks'))
849 849 part.addparam('key', enc(book))
850 850 part.addparam('old', enc(old))
851 851 part.addparam('new', enc(new))
852 852 action = 'update'
853 853 if not old:
854 854 action = 'export'
855 855 elif not new:
856 856 action = 'delete'
857 857 part2book.append((part.id, book, action))
858 858 pushop.pkfailcb[part.id] = handlefailure
859 859
860 860 def handlereply(op):
861 861 ui = pushop.ui
862 862 for partid, book, action in part2book:
863 863 partrep = op.records.getreplies(partid)
864 864 results = partrep['pushkey']
865 865 assert len(results) <= 1
866 866 if not results:
867 867 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
868 868 else:
869 869 ret = int(results[0]['return'])
870 870 if ret:
871 871 ui.status(bookmsgmap[action][0] % book)
872 872 else:
873 873 ui.warn(bookmsgmap[action][1] % book)
874 874 if pushop.bkresult is not None:
875 875 pushop.bkresult = 1
876 876 return handlereply
877 877
878 878 @b2partsgenerator('pushvars', idx=0)
879 879 def _getbundlesendvars(pushop, bundler):
880 880 '''send shellvars via bundle2'''
881 881 pushvars = pushop.pushvars
882 882 if pushvars:
883 883 shellvars = {}
884 884 for raw in pushvars:
885 885 if '=' not in raw:
886 886 msg = ("unable to parse variable '%s', should follow "
887 887 "'KEY=VALUE' or 'KEY=' format")
888 888 raise error.Abort(msg % raw)
889 889 k, v = raw.split('=', 1)
890 890 shellvars[k] = v
891 891
892 892 part = bundler.newpart('pushvars')
893 893
894 894 for key, value in shellvars.iteritems():
895 895 part.addparam(key, value, mandatory=False)
896 896
897 897 def _pushbundle2(pushop):
898 898 """push data to the remote using bundle2
899 899
900 900 The only currently supported type of data is changegroup but this will
901 901 evolve in the future."""
902 902 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
903 903 pushback = (pushop.trmanager
904 904 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
905 905
906 906 # create reply capability
907 907 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
908 908 allowpushback=pushback))
909 909 bundler.newpart('replycaps', data=capsblob)
910 910 replyhandlers = []
911 911 for partgenname in b2partsgenorder:
912 912 partgen = b2partsgenmapping[partgenname]
913 913 ret = partgen(pushop, bundler)
914 914 if callable(ret):
915 915 replyhandlers.append(ret)
916 916 # do not push if nothing to push
917 917 if bundler.nbparts <= 1:
918 918 return
919 919 stream = util.chunkbuffer(bundler.getchunks())
920 920 try:
921 921 try:
922 922 reply = pushop.remote.unbundle(
923 923 stream, ['force'], pushop.remote.url())
924 924 except error.BundleValueError as exc:
925 925 raise error.Abort(_('missing support for %s') % exc)
926 926 try:
927 927 trgetter = None
928 928 if pushback:
929 929 trgetter = pushop.trmanager.transaction
930 930 op = bundle2.processbundle(pushop.repo, reply, trgetter)
931 931 except error.BundleValueError as exc:
932 932 raise error.Abort(_('missing support for %s') % exc)
933 933 except bundle2.AbortFromPart as exc:
934 934 pushop.ui.status(_('remote: %s\n') % exc)
935 935 if exc.hint is not None:
936 936 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
937 937 raise error.Abort(_('push failed on remote'))
938 938 except error.PushkeyFailed as exc:
939 939 partid = int(exc.partid)
940 940 if partid not in pushop.pkfailcb:
941 941 raise
942 942 pushop.pkfailcb[partid](pushop, exc)
943 943 for rephand in replyhandlers:
944 944 rephand(op)
945 945
946 946 def _pushchangeset(pushop):
947 947 """Make the actual push of changeset bundle to remote repo"""
948 948 if 'changesets' in pushop.stepsdone:
949 949 return
950 950 pushop.stepsdone.add('changesets')
951 951 if not _pushcheckoutgoing(pushop):
952 952 return
953 953
954 954 # Should have verified this in push().
955 955 assert pushop.remote.capable('unbundle')
956 956
957 957 pushop.repo.prepushoutgoinghooks(pushop)
958 958 outgoing = pushop.outgoing
959 959 # TODO: get bundlecaps from remote
960 960 bundlecaps = None
961 961 # create a changegroup from local
962 962 if pushop.revs is None and not (outgoing.excluded
963 963 or pushop.repo.changelog.filteredrevs):
964 964 # push everything,
965 965 # use the fast path, no race possible on push
966 966 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
967 967 fastpath=True, bundlecaps=bundlecaps)
968 968 else:
969 969 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
970 970 'push', bundlecaps=bundlecaps)
971 971
972 972 # apply changegroup to remote
973 973 # local repo finds heads on server, finds out what
974 974 # revs it must push. once revs transferred, if server
975 975 # finds it has different heads (someone else won
976 976 # commit/push race), server aborts.
977 977 if pushop.force:
978 978 remoteheads = ['force']
979 979 else:
980 980 remoteheads = pushop.remoteheads
981 981 # ssh: return remote's addchangegroup()
982 982 # http: return remote's addchangegroup() or 0 for error
983 983 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
984 984 pushop.repo.url())
985 985
986 986 def _pushsyncphase(pushop):
987 987 """synchronise phase information locally and remotely"""
988 988 cheads = pushop.commonheads
989 989 # even when we don't push, exchanging phase data is useful
990 990 remotephases = pushop.remote.listkeys('phases')
991 991 if (pushop.ui.configbool('ui', '_usedassubrepo')
992 992 and remotephases # server supports phases
993 993 and pushop.cgresult is None # nothing was pushed
994 994 and remotephases.get('publishing', False)):
995 995 # When:
996 996 # - this is a subrepo push
997 997 # - and remote support phase
998 998 # - and no changeset was pushed
999 999 # - and remote is publishing
1000 1000 # We may be in issue 3871 case!
1001 1001 # We drop the possible phase synchronisation done by
1002 1002 # courtesy to publish changesets possibly locally draft
1003 1003 # on the remote.
1004 1004 remotephases = {'publishing': 'True'}
1005 1005 if not remotephases: # old server or public only reply from non-publishing
1006 1006 _localphasemove(pushop, cheads)
1007 1007 # don't push any phase data as there is nothing to push
1008 1008 else:
1009 1009 ana = phases.analyzeremotephases(pushop.repo, cheads,
1010 1010 remotephases)
1011 1011 pheads, droots = ana
1012 1012 ### Apply remote phase on local
1013 1013 if remotephases.get('publishing', False):
1014 1014 _localphasemove(pushop, cheads)
1015 1015 else: # publish = False
1016 1016 _localphasemove(pushop, pheads)
1017 1017 _localphasemove(pushop, cheads, phases.draft)
1018 1018 ### Apply local phase on remote
1019 1019
1020 1020 if pushop.cgresult:
1021 1021 if 'phases' in pushop.stepsdone:
1022 1022 # phases already pushed though bundle2
1023 1023 return
1024 1024 outdated = pushop.outdatedphases
1025 1025 else:
1026 1026 outdated = pushop.fallbackoutdatedphases
1027 1027
1028 1028 pushop.stepsdone.add('phases')
1029 1029
1030 1030 # filter heads already turned public by the push
1031 1031 outdated = [c for c in outdated if c.node() not in pheads]
1032 1032 # fallback to independent pushkey command
1033 1033 for newremotehead in outdated:
1034 1034 r = pushop.remote.pushkey('phases',
1035 1035 newremotehead.hex(),
1036 1036 str(phases.draft),
1037 1037 str(phases.public))
1038 1038 if not r:
1039 1039 pushop.ui.warn(_('updating %s to public failed!\n')
1040 1040 % newremotehead)
1041 1041
1042 1042 def _localphasemove(pushop, nodes, phase=phases.public):
1043 1043 """move <nodes> to <phase> in the local source repo"""
1044 1044 if pushop.trmanager:
1045 1045 phases.advanceboundary(pushop.repo,
1046 1046 pushop.trmanager.transaction(),
1047 1047 phase,
1048 1048 nodes)
1049 1049 else:
1050 1050 # repo is not locked, do not change any phases!
1051 1051 # Informs the user that phases should have been moved when
1052 1052 # applicable.
1053 1053 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1054 1054 phasestr = phases.phasenames[phase]
1055 1055 if actualmoves:
1056 1056 pushop.ui.status(_('cannot lock source repo, skipping '
1057 1057 'local %s phase update\n') % phasestr)
1058 1058
1059 1059 def _pushobsolete(pushop):
1060 1060 """utility function to push obsolete markers to a remote"""
1061 1061 if 'obsmarkers' in pushop.stepsdone:
1062 1062 return
1063 1063 repo = pushop.repo
1064 1064 remote = pushop.remote
1065 1065 pushop.stepsdone.add('obsmarkers')
1066 1066 if pushop.outobsmarkers:
1067 1067 pushop.ui.debug('try to push obsolete markers to remote\n')
1068 1068 rslts = []
1069 1069 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1070 1070 for key in sorted(remotedata, reverse=True):
1071 1071 # reverse sort to ensure we end with dump0
1072 1072 data = remotedata[key]
1073 1073 rslts.append(remote.pushkey('obsolete', key, '', data))
1074 1074 if [r for r in rslts if not r]:
1075 1075 msg = _('failed to push some obsolete markers!\n')
1076 1076 repo.ui.warn(msg)
1077 1077
1078 1078 def _pushbookmark(pushop):
1079 1079 """Update bookmark position on remote"""
1080 1080 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1081 1081 return
1082 1082 pushop.stepsdone.add('bookmarks')
1083 1083 ui = pushop.ui
1084 1084 remote = pushop.remote
1085 1085
1086 1086 for b, old, new in pushop.outbookmarks:
1087 1087 action = 'update'
1088 1088 if not old:
1089 1089 action = 'export'
1090 1090 elif not new:
1091 1091 action = 'delete'
1092 1092 if remote.pushkey('bookmarks', b, old, new):
1093 1093 ui.status(bookmsgmap[action][0] % b)
1094 1094 else:
1095 1095 ui.warn(bookmsgmap[action][1] % b)
1096 1096 # discovery can have set the value form invalid entry
1097 1097 if pushop.bkresult is not None:
1098 1098 pushop.bkresult = 1
1099 1099
1100 1100 class pulloperation(object):
1101 1101 """A object that represent a single pull operation
1102 1102
1103 1103 It purpose is to carry pull related state and very common operation.
1104 1104
1105 1105 A new should be created at the beginning of each pull and discarded
1106 1106 afterward.
1107 1107 """
1108 1108
1109 1109 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1110 1110 remotebookmarks=None, streamclonerequested=None):
1111 1111 # repo we pull into
1112 1112 self.repo = repo
1113 1113 # repo we pull from
1114 1114 self.remote = remote
1115 1115 # revision we try to pull (None is "all")
1116 1116 self.heads = heads
1117 1117 # bookmark pulled explicitly
1118 1118 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1119 1119 for bookmark in bookmarks]
1120 1120 # do we force pull?
1121 1121 self.force = force
1122 1122 # whether a streaming clone was requested
1123 1123 self.streamclonerequested = streamclonerequested
1124 1124 # transaction manager
1125 1125 self.trmanager = None
1126 1126 # set of common changeset between local and remote before pull
1127 1127 self.common = None
1128 1128 # set of pulled head
1129 1129 self.rheads = None
1130 1130 # list of missing changeset to fetch remotely
1131 1131 self.fetch = None
1132 1132 # remote bookmarks data
1133 1133 self.remotebookmarks = remotebookmarks
1134 1134 # result of changegroup pulling (used as return code by pull)
1135 1135 self.cgresult = None
1136 1136 # list of step already done
1137 1137 self.stepsdone = set()
1138 1138 # Whether we attempted a clone from pre-generated bundles.
1139 1139 self.clonebundleattempted = False
1140 1140
1141 1141 @util.propertycache
1142 1142 def pulledsubset(self):
1143 1143 """heads of the set of changeset target by the pull"""
1144 1144 # compute target subset
1145 1145 if self.heads is None:
1146 1146 # We pulled every thing possible
1147 1147 # sync on everything common
1148 1148 c = set(self.common)
1149 1149 ret = list(self.common)
1150 1150 for n in self.rheads:
1151 1151 if n not in c:
1152 1152 ret.append(n)
1153 1153 return ret
1154 1154 else:
1155 1155 # We pulled a specific subset
1156 1156 # sync on this subset
1157 1157 return self.heads
1158 1158
1159 1159 @util.propertycache
1160 1160 def canusebundle2(self):
1161 1161 return not _forcebundle1(self)
1162 1162
1163 1163 @util.propertycache
1164 1164 def remotebundle2caps(self):
1165 1165 return bundle2.bundle2caps(self.remote)
1166 1166
1167 1167 def gettransaction(self):
1168 1168 # deprecated; talk to trmanager directly
1169 1169 return self.trmanager.transaction()
1170 1170
1171 1171 class transactionmanager(util.transactional):
1172 1172 """An object to manage the life cycle of a transaction
1173 1173
1174 1174 It creates the transaction on demand and calls the appropriate hooks when
1175 1175 closing the transaction."""
1176 1176 def __init__(self, repo, source, url):
1177 1177 self.repo = repo
1178 1178 self.source = source
1179 1179 self.url = url
1180 1180 self._tr = None
1181 1181
1182 1182 def transaction(self):
1183 1183 """Return an open transaction object, constructing if necessary"""
1184 1184 if not self._tr:
1185 1185 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1186 1186 self._tr = self.repo.transaction(trname)
1187 1187 self._tr.hookargs['source'] = self.source
1188 1188 self._tr.hookargs['url'] = self.url
1189 1189 return self._tr
1190 1190
1191 1191 def close(self):
1192 1192 """close transaction if created"""
1193 1193 if self._tr is not None:
1194 1194 self._tr.close()
1195 1195
1196 1196 def release(self):
1197 1197 """release transaction if created"""
1198 1198 if self._tr is not None:
1199 1199 self._tr.release()
1200 1200
1201 1201 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1202 1202 streamclonerequested=None):
1203 1203 """Fetch repository data from a remote.
1204 1204
1205 1205 This is the main function used to retrieve data from a remote repository.
1206 1206
1207 1207 ``repo`` is the local repository to clone into.
1208 1208 ``remote`` is a peer instance.
1209 1209 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1210 1210 default) means to pull everything from the remote.
1211 1211 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1212 1212 default, all remote bookmarks are pulled.
1213 1213 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1214 1214 initialization.
1215 1215 ``streamclonerequested`` is a boolean indicating whether a "streaming
1216 1216 clone" is requested. A "streaming clone" is essentially a raw file copy
1217 1217 of revlogs from the server. This only works when the local repository is
1218 1218 empty. The default value of ``None`` means to respect the server
1219 1219 configuration for preferring stream clones.
1220 1220
1221 1221 Returns the ``pulloperation`` created for this pull.
1222 1222 """
1223 1223 if opargs is None:
1224 1224 opargs = {}
1225 1225 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1226 1226 streamclonerequested=streamclonerequested, **opargs)
1227 1227
1228 1228 peerlocal = pullop.remote.local()
1229 1229 if peerlocal:
1230 1230 missing = set(peerlocal.requirements) - pullop.repo.supported
1231 1231 if missing:
1232 1232 msg = _("required features are not"
1233 1233 " supported in the destination:"
1234 1234 " %s") % (', '.join(sorted(missing)))
1235 1235 raise error.Abort(msg)
1236 1236
1237 1237 wlock = lock = None
1238 1238 try:
1239 1239 wlock = pullop.repo.wlock()
1240 1240 lock = pullop.repo.lock()
1241 1241 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1242 streamclone.maybeperformlegacystreamclone(pullop)
1243 1242 # This should ideally be in _pullbundle2(). However, it needs to run
1244 1243 # before discovery to avoid extra work.
1245 1244 _maybeapplyclonebundle(pullop)
1245 streamclone.maybeperformlegacystreamclone(pullop)
1246 1246 _pulldiscovery(pullop)
1247 1247 if pullop.canusebundle2:
1248 1248 _pullbundle2(pullop)
1249 1249 _pullchangeset(pullop)
1250 1250 _pullphase(pullop)
1251 1251 _pullbookmarks(pullop)
1252 1252 _pullobsolete(pullop)
1253 1253 pullop.trmanager.close()
1254 1254 finally:
1255 1255 lockmod.release(pullop.trmanager, lock, wlock)
1256 1256
1257 1257 return pullop
1258 1258
1259 1259 # list of steps to perform discovery before pull
1260 1260 pulldiscoveryorder = []
1261 1261
1262 1262 # Mapping between step name and function
1263 1263 #
1264 1264 # This exists to help extensions wrap steps if necessary
1265 1265 pulldiscoverymapping = {}
1266 1266
1267 1267 def pulldiscovery(stepname):
1268 1268 """decorator for function performing discovery before pull
1269 1269
1270 1270 The function is added to the step -> function mapping and appended to the
1271 1271 list of steps. Beware that decorated function will be added in order (this
1272 1272 may matter).
1273 1273
1274 1274 You can only use this decorator for a new step, if you want to wrap a step
1275 1275 from an extension, change the pulldiscovery dictionary directly."""
1276 1276 def dec(func):
1277 1277 assert stepname not in pulldiscoverymapping
1278 1278 pulldiscoverymapping[stepname] = func
1279 1279 pulldiscoveryorder.append(stepname)
1280 1280 return func
1281 1281 return dec
1282 1282
1283 1283 def _pulldiscovery(pullop):
1284 1284 """Run all discovery steps"""
1285 1285 for stepname in pulldiscoveryorder:
1286 1286 step = pulldiscoverymapping[stepname]
1287 1287 step(pullop)
1288 1288
1289 1289 @pulldiscovery('b1:bookmarks')
1290 1290 def _pullbookmarkbundle1(pullop):
1291 1291 """fetch bookmark data in bundle1 case
1292 1292
1293 1293 If not using bundle2, we have to fetch bookmarks before changeset
1294 1294 discovery to reduce the chance and impact of race conditions."""
1295 1295 if pullop.remotebookmarks is not None:
1296 1296 return
1297 1297 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1298 1298 # all known bundle2 servers now support listkeys, but lets be nice with
1299 1299 # new implementation.
1300 1300 return
1301 1301 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1302 1302
1303 1303
1304 1304 @pulldiscovery('changegroup')
1305 1305 def _pulldiscoverychangegroup(pullop):
1306 1306 """discovery phase for the pull
1307 1307
1308 1308 Current handle changeset discovery only, will change handle all discovery
1309 1309 at some point."""
1310 1310 tmp = discovery.findcommonincoming(pullop.repo,
1311 1311 pullop.remote,
1312 1312 heads=pullop.heads,
1313 1313 force=pullop.force)
1314 1314 common, fetch, rheads = tmp
1315 1315 nm = pullop.repo.unfiltered().changelog.nodemap
1316 1316 if fetch and rheads:
1317 1317 # If a remote heads is filtered locally, put in back in common.
1318 1318 #
1319 1319 # This is a hackish solution to catch most of "common but locally
1320 1320 # hidden situation". We do not performs discovery on unfiltered
1321 1321 # repository because it end up doing a pathological amount of round
1322 1322 # trip for w huge amount of changeset we do not care about.
1323 1323 #
1324 1324 # If a set of such "common but filtered" changeset exist on the server
1325 1325 # but are not including a remote heads, we'll not be able to detect it,
1326 1326 scommon = set(common)
1327 1327 for n in rheads:
1328 1328 if n in nm:
1329 1329 if n not in scommon:
1330 1330 common.append(n)
1331 1331 if set(rheads).issubset(set(common)):
1332 1332 fetch = []
1333 1333 pullop.common = common
1334 1334 pullop.fetch = fetch
1335 1335 pullop.rheads = rheads
1336 1336
1337 1337 def _pullbundle2(pullop):
1338 1338 """pull data using bundle2
1339 1339
1340 1340 For now, the only supported data are changegroup."""
1341 1341 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1342 1342
1343 1343 # At the moment we don't do stream clones over bundle2. If that is
1344 1344 # implemented then here's where the check for that will go.
1345 1345 streaming = False
1346 1346
1347 1347 # pulling changegroup
1348 1348 pullop.stepsdone.add('changegroup')
1349 1349
1350 1350 kwargs['common'] = pullop.common
1351 1351 kwargs['heads'] = pullop.heads or pullop.rheads
1352 1352 kwargs['cg'] = pullop.fetch
1353 1353
1354 1354 ui = pullop.repo.ui
1355 1355 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1356 1356 if (not legacyphase and 'heads' in pullop.remotebundle2caps.get('phases')):
1357 1357 kwargs['phases'] = True
1358 1358 pullop.stepsdone.add('phases')
1359 1359
1360 1360 if 'listkeys' in pullop.remotebundle2caps:
1361 1361 if 'phases' not in pullop.stepsdone:
1362 1362 kwargs['listkeys'] = ['phases']
1363 1363 if pullop.remotebookmarks is None:
1364 1364 # make sure to always includes bookmark data when migrating
1365 1365 # `hg incoming --bundle` to using this function.
1366 1366 kwargs.setdefault('listkeys', []).append('bookmarks')
1367 1367
1368 1368 # If this is a full pull / clone and the server supports the clone bundles
1369 1369 # feature, tell the server whether we attempted a clone bundle. The
1370 1370 # presence of this flag indicates the client supports clone bundles. This
1371 1371 # will enable the server to treat clients that support clone bundles
1372 1372 # differently from those that don't.
1373 1373 if (pullop.remote.capable('clonebundles')
1374 1374 and pullop.heads is None and list(pullop.common) == [nullid]):
1375 1375 kwargs['cbattempted'] = pullop.clonebundleattempted
1376 1376
1377 1377 if streaming:
1378 1378 pullop.repo.ui.status(_('streaming all changes\n'))
1379 1379 elif not pullop.fetch:
1380 1380 pullop.repo.ui.status(_("no changes found\n"))
1381 1381 pullop.cgresult = 0
1382 1382 else:
1383 1383 if pullop.heads is None and list(pullop.common) == [nullid]:
1384 1384 pullop.repo.ui.status(_("requesting all changes\n"))
1385 1385 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1386 1386 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1387 1387 if obsolete.commonversion(remoteversions) is not None:
1388 1388 kwargs['obsmarkers'] = True
1389 1389 pullop.stepsdone.add('obsmarkers')
1390 1390 _pullbundle2extraprepare(pullop, kwargs)
1391 1391 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1392 1392 try:
1393 1393 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1394 1394 except bundle2.AbortFromPart as exc:
1395 1395 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1396 1396 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1397 1397 except error.BundleValueError as exc:
1398 1398 raise error.Abort(_('missing support for %s') % exc)
1399 1399
1400 1400 if pullop.fetch:
1401 1401 pullop.cgresult = bundle2.combinechangegroupresults(op)
1402 1402
1403 1403 # processing phases change
1404 1404 for namespace, value in op.records['listkeys']:
1405 1405 if namespace == 'phases':
1406 1406 _pullapplyphases(pullop, value)
1407 1407
1408 1408 # processing bookmark update
1409 1409 for namespace, value in op.records['listkeys']:
1410 1410 if namespace == 'bookmarks':
1411 1411 pullop.remotebookmarks = value
1412 1412
1413 1413 # bookmark data were either already there or pulled in the bundle
1414 1414 if pullop.remotebookmarks is not None:
1415 1415 _pullbookmarks(pullop)
1416 1416
1417 1417 def _pullbundle2extraprepare(pullop, kwargs):
1418 1418 """hook function so that extensions can extend the getbundle call"""
1419 1419 pass
1420 1420
1421 1421 def _pullchangeset(pullop):
1422 1422 """pull changeset from unbundle into the local repo"""
1423 1423 # We delay the open of the transaction as late as possible so we
1424 1424 # don't open transaction for nothing or you break future useful
1425 1425 # rollback call
1426 1426 if 'changegroup' in pullop.stepsdone:
1427 1427 return
1428 1428 pullop.stepsdone.add('changegroup')
1429 1429 if not pullop.fetch:
1430 1430 pullop.repo.ui.status(_("no changes found\n"))
1431 1431 pullop.cgresult = 0
1432 1432 return
1433 1433 tr = pullop.gettransaction()
1434 1434 if pullop.heads is None and list(pullop.common) == [nullid]:
1435 1435 pullop.repo.ui.status(_("requesting all changes\n"))
1436 1436 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1437 1437 # issue1320, avoid a race if remote changed after discovery
1438 1438 pullop.heads = pullop.rheads
1439 1439
1440 1440 if pullop.remote.capable('getbundle'):
1441 1441 # TODO: get bundlecaps from remote
1442 1442 cg = pullop.remote.getbundle('pull', common=pullop.common,
1443 1443 heads=pullop.heads or pullop.rheads)
1444 1444 elif pullop.heads is None:
1445 1445 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1446 1446 elif not pullop.remote.capable('changegroupsubset'):
1447 1447 raise error.Abort(_("partial pull cannot be done because "
1448 1448 "other repository doesn't support "
1449 1449 "changegroupsubset."))
1450 1450 else:
1451 1451 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1452 1452 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1453 1453 pullop.remote.url())
1454 1454 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1455 1455
1456 1456 def _pullphase(pullop):
1457 1457 # Get remote phases data from remote
1458 1458 if 'phases' in pullop.stepsdone:
1459 1459 return
1460 1460 remotephases = pullop.remote.listkeys('phases')
1461 1461 _pullapplyphases(pullop, remotephases)
1462 1462
1463 1463 def _pullapplyphases(pullop, remotephases):
1464 1464 """apply phase movement from observed remote state"""
1465 1465 if 'phases' in pullop.stepsdone:
1466 1466 return
1467 1467 pullop.stepsdone.add('phases')
1468 1468 publishing = bool(remotephases.get('publishing', False))
1469 1469 if remotephases and not publishing:
1470 1470 # remote is new and non-publishing
1471 1471 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1472 1472 pullop.pulledsubset,
1473 1473 remotephases)
1474 1474 dheads = pullop.pulledsubset
1475 1475 else:
1476 1476 # Remote is old or publishing all common changesets
1477 1477 # should be seen as public
1478 1478 pheads = pullop.pulledsubset
1479 1479 dheads = []
1480 1480 unfi = pullop.repo.unfiltered()
1481 1481 phase = unfi._phasecache.phase
1482 1482 rev = unfi.changelog.nodemap.get
1483 1483 public = phases.public
1484 1484 draft = phases.draft
1485 1485
1486 1486 # exclude changesets already public locally and update the others
1487 1487 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1488 1488 if pheads:
1489 1489 tr = pullop.gettransaction()
1490 1490 phases.advanceboundary(pullop.repo, tr, public, pheads)
1491 1491
1492 1492 # exclude changesets already draft locally and update the others
1493 1493 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1494 1494 if dheads:
1495 1495 tr = pullop.gettransaction()
1496 1496 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1497 1497
1498 1498 def _pullbookmarks(pullop):
1499 1499 """process the remote bookmark information to update the local one"""
1500 1500 if 'bookmarks' in pullop.stepsdone:
1501 1501 return
1502 1502 pullop.stepsdone.add('bookmarks')
1503 1503 repo = pullop.repo
1504 1504 remotebookmarks = pullop.remotebookmarks
1505 1505 remotebookmarks = bookmod.unhexlifybookmarks(remotebookmarks)
1506 1506 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1507 1507 pullop.remote.url(),
1508 1508 pullop.gettransaction,
1509 1509 explicit=pullop.explicitbookmarks)
1510 1510
1511 1511 def _pullobsolete(pullop):
1512 1512 """utility function to pull obsolete markers from a remote
1513 1513
1514 1514 The `gettransaction` is function that return the pull transaction, creating
1515 1515 one if necessary. We return the transaction to inform the calling code that
1516 1516 a new transaction have been created (when applicable).
1517 1517
1518 1518 Exists mostly to allow overriding for experimentation purpose"""
1519 1519 if 'obsmarkers' in pullop.stepsdone:
1520 1520 return
1521 1521 pullop.stepsdone.add('obsmarkers')
1522 1522 tr = None
1523 1523 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1524 1524 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1525 1525 remoteobs = pullop.remote.listkeys('obsolete')
1526 1526 if 'dump0' in remoteobs:
1527 1527 tr = pullop.gettransaction()
1528 1528 markers = []
1529 1529 for key in sorted(remoteobs, reverse=True):
1530 1530 if key.startswith('dump'):
1531 1531 data = util.b85decode(remoteobs[key])
1532 1532 version, newmarks = obsolete._readmarkers(data)
1533 1533 markers += newmarks
1534 1534 if markers:
1535 1535 pullop.repo.obsstore.add(tr, markers)
1536 1536 pullop.repo.invalidatevolatilesets()
1537 1537 return tr
1538 1538
1539 1539 def caps20to10(repo):
1540 1540 """return a set with appropriate options to use bundle20 during getbundle"""
1541 1541 caps = {'HG20'}
1542 1542 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1543 1543 caps.add('bundle2=' + urlreq.quote(capsblob))
1544 1544 return caps
1545 1545
1546 1546 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1547 1547 getbundle2partsorder = []
1548 1548
1549 1549 # Mapping between step name and function
1550 1550 #
1551 1551 # This exists to help extensions wrap steps if necessary
1552 1552 getbundle2partsmapping = {}
1553 1553
1554 1554 def getbundle2partsgenerator(stepname, idx=None):
1555 1555 """decorator for function generating bundle2 part for getbundle
1556 1556
1557 1557 The function is added to the step -> function mapping and appended to the
1558 1558 list of steps. Beware that decorated functions will be added in order
1559 1559 (this may matter).
1560 1560
1561 1561 You can only use this decorator for new steps, if you want to wrap a step
1562 1562 from an extension, attack the getbundle2partsmapping dictionary directly."""
1563 1563 def dec(func):
1564 1564 assert stepname not in getbundle2partsmapping
1565 1565 getbundle2partsmapping[stepname] = func
1566 1566 if idx is None:
1567 1567 getbundle2partsorder.append(stepname)
1568 1568 else:
1569 1569 getbundle2partsorder.insert(idx, stepname)
1570 1570 return func
1571 1571 return dec
1572 1572
1573 1573 def bundle2requested(bundlecaps):
1574 1574 if bundlecaps is not None:
1575 1575 return any(cap.startswith('HG2') for cap in bundlecaps)
1576 1576 return False
1577 1577
1578 1578 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1579 1579 **kwargs):
1580 1580 """Return chunks constituting a bundle's raw data.
1581 1581
1582 1582 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1583 1583 passed.
1584 1584
1585 1585 Returns an iterator over raw chunks (of varying sizes).
1586 1586 """
1587 1587 kwargs = pycompat.byteskwargs(kwargs)
1588 1588 usebundle2 = bundle2requested(bundlecaps)
1589 1589 # bundle10 case
1590 1590 if not usebundle2:
1591 1591 if bundlecaps and not kwargs.get('cg', True):
1592 1592 raise ValueError(_('request for bundle10 must include changegroup'))
1593 1593
1594 1594 if kwargs:
1595 1595 raise ValueError(_('unsupported getbundle arguments: %s')
1596 1596 % ', '.join(sorted(kwargs.keys())))
1597 1597 outgoing = _computeoutgoing(repo, heads, common)
1598 1598 return changegroup.makestream(repo, outgoing, '01', source,
1599 1599 bundlecaps=bundlecaps)
1600 1600
1601 1601 # bundle20 case
1602 1602 b2caps = {}
1603 1603 for bcaps in bundlecaps:
1604 1604 if bcaps.startswith('bundle2='):
1605 1605 blob = urlreq.unquote(bcaps[len('bundle2='):])
1606 1606 b2caps.update(bundle2.decodecaps(blob))
1607 1607 bundler = bundle2.bundle20(repo.ui, b2caps)
1608 1608
1609 1609 kwargs['heads'] = heads
1610 1610 kwargs['common'] = common
1611 1611
1612 1612 for name in getbundle2partsorder:
1613 1613 func = getbundle2partsmapping[name]
1614 1614 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1615 1615 **pycompat.strkwargs(kwargs))
1616 1616
1617 1617 return bundler.getchunks()
1618 1618
1619 1619 @getbundle2partsgenerator('changegroup')
1620 1620 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1621 1621 b2caps=None, heads=None, common=None, **kwargs):
1622 1622 """add a changegroup part to the requested bundle"""
1623 1623 cgstream = None
1624 1624 if kwargs.get('cg', True):
1625 1625 # build changegroup bundle here.
1626 1626 version = '01'
1627 1627 cgversions = b2caps.get('changegroup')
1628 1628 if cgversions: # 3.1 and 3.2 ship with an empty value
1629 1629 cgversions = [v for v in cgversions
1630 1630 if v in changegroup.supportedoutgoingversions(repo)]
1631 1631 if not cgversions:
1632 1632 raise ValueError(_('no common changegroup version'))
1633 1633 version = max(cgversions)
1634 1634 outgoing = _computeoutgoing(repo, heads, common)
1635 1635 if outgoing.missing:
1636 1636 cgstream = changegroup.makestream(repo, outgoing, version, source,
1637 1637 bundlecaps=bundlecaps)
1638 1638
1639 1639 if cgstream:
1640 1640 part = bundler.newpart('changegroup', data=cgstream)
1641 1641 if cgversions:
1642 1642 part.addparam('version', version)
1643 1643 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1644 1644 mandatory=False)
1645 1645 if 'treemanifest' in repo.requirements:
1646 1646 part.addparam('treemanifest', '1')
1647 1647
1648 1648 @getbundle2partsgenerator('listkeys')
1649 1649 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1650 1650 b2caps=None, **kwargs):
1651 1651 """add parts containing listkeys namespaces to the requested bundle"""
1652 1652 listkeys = kwargs.get('listkeys', ())
1653 1653 for namespace in listkeys:
1654 1654 part = bundler.newpart('listkeys')
1655 1655 part.addparam('namespace', namespace)
1656 1656 keys = repo.listkeys(namespace).items()
1657 1657 part.data = pushkey.encodekeys(keys)
1658 1658
1659 1659 @getbundle2partsgenerator('obsmarkers')
1660 1660 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1661 1661 b2caps=None, heads=None, **kwargs):
1662 1662 """add an obsolescence markers part to the requested bundle"""
1663 1663 if kwargs.get('obsmarkers', False):
1664 1664 if heads is None:
1665 1665 heads = repo.heads()
1666 1666 subset = [c.node() for c in repo.set('::%ln', heads)]
1667 1667 markers = repo.obsstore.relevantmarkers(subset)
1668 1668 markers = sorted(markers)
1669 1669 bundle2.buildobsmarkerspart(bundler, markers)
1670 1670
1671 1671 @getbundle2partsgenerator('phases')
1672 1672 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1673 1673 b2caps=None, heads=None, **kwargs):
1674 1674 """add phase heads part to the requested bundle"""
1675 1675 if kwargs.get('phases', False):
1676 1676 if not 'heads' in b2caps.get('phases'):
1677 1677 raise ValueError(_('no common phases exchange method'))
1678 1678 if heads is None:
1679 1679 heads = repo.heads()
1680 1680
1681 1681 headsbyphase = collections.defaultdict(set)
1682 1682 if repo.publishing():
1683 1683 headsbyphase[phases.public] = heads
1684 1684 else:
1685 1685 # find the appropriate heads to move
1686 1686
1687 1687 phase = repo._phasecache.phase
1688 1688 node = repo.changelog.node
1689 1689 rev = repo.changelog.rev
1690 1690 for h in heads:
1691 1691 headsbyphase[phase(repo, rev(h))].add(h)
1692 1692 seenphases = list(headsbyphase.keys())
1693 1693
1694 1694 # We do not handle anything but public and draft phase for now)
1695 1695 if seenphases:
1696 1696 assert max(seenphases) <= phases.draft
1697 1697
1698 1698 # if client is pulling non-public changesets, we need to find
1699 1699 # intermediate public heads.
1700 1700 draftheads = headsbyphase.get(phases.draft, set())
1701 1701 if draftheads:
1702 1702 publicheads = headsbyphase.get(phases.public, set())
1703 1703
1704 1704 revset = 'heads(only(%ln, %ln) and public())'
1705 1705 extraheads = repo.revs(revset, draftheads, publicheads)
1706 1706 for r in extraheads:
1707 1707 headsbyphase[phases.public].add(node(r))
1708 1708
1709 1709 # transform data in a format used by the encoding function
1710 1710 phasemapping = []
1711 1711 for phase in phases.allphases:
1712 1712 phasemapping.append(sorted(headsbyphase[phase]))
1713 1713
1714 1714 # generate the actual part
1715 1715 phasedata = phases.binaryencode(phasemapping)
1716 1716 bundler.newpart('phase-heads', data=phasedata)
1717 1717
1718 1718 @getbundle2partsgenerator('hgtagsfnodes')
1719 1719 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1720 1720 b2caps=None, heads=None, common=None,
1721 1721 **kwargs):
1722 1722 """Transfer the .hgtags filenodes mapping.
1723 1723
1724 1724 Only values for heads in this bundle will be transferred.
1725 1725
1726 1726 The part data consists of pairs of 20 byte changeset node and .hgtags
1727 1727 filenodes raw values.
1728 1728 """
1729 1729 # Don't send unless:
1730 1730 # - changeset are being exchanged,
1731 1731 # - the client supports it.
1732 1732 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1733 1733 return
1734 1734
1735 1735 outgoing = _computeoutgoing(repo, heads, common)
1736 1736 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1737 1737
1738 1738 def _getbookmarks(repo, **kwargs):
1739 1739 """Returns bookmark to node mapping.
1740 1740
1741 1741 This function is primarily used to generate `bookmarks` bundle2 part.
1742 1742 It is a separate function in order to make it easy to wrap it
1743 1743 in extensions. Passing `kwargs` to the function makes it easy to
1744 1744 add new parameters in extensions.
1745 1745 """
1746 1746
1747 1747 return dict(bookmod.listbinbookmarks(repo))
1748 1748
1749 1749 def check_heads(repo, their_heads, context):
1750 1750 """check if the heads of a repo have been modified
1751 1751
1752 1752 Used by peer for unbundling.
1753 1753 """
1754 1754 heads = repo.heads()
1755 1755 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1756 1756 if not (their_heads == ['force'] or their_heads == heads or
1757 1757 their_heads == ['hashed', heads_hash]):
1758 1758 # someone else committed/pushed/unbundled while we
1759 1759 # were transferring data
1760 1760 raise error.PushRaced('repository changed while %s - '
1761 1761 'please try again' % context)
1762 1762
1763 1763 def unbundle(repo, cg, heads, source, url):
1764 1764 """Apply a bundle to a repo.
1765 1765
1766 1766 this function makes sure the repo is locked during the application and have
1767 1767 mechanism to check that no push race occurred between the creation of the
1768 1768 bundle and its application.
1769 1769
1770 1770 If the push was raced as PushRaced exception is raised."""
1771 1771 r = 0
1772 1772 # need a transaction when processing a bundle2 stream
1773 1773 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1774 1774 lockandtr = [None, None, None]
1775 1775 recordout = None
1776 1776 # quick fix for output mismatch with bundle2 in 3.4
1777 1777 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
1778 1778 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1779 1779 captureoutput = True
1780 1780 try:
1781 1781 # note: outside bundle1, 'heads' is expected to be empty and this
1782 1782 # 'check_heads' call wil be a no-op
1783 1783 check_heads(repo, heads, 'uploading changes')
1784 1784 # push can proceed
1785 1785 if not isinstance(cg, bundle2.unbundle20):
1786 1786 # legacy case: bundle1 (changegroup 01)
1787 1787 txnname = "\n".join([source, util.hidepassword(url)])
1788 1788 with repo.lock(), repo.transaction(txnname) as tr:
1789 1789 op = bundle2.applybundle(repo, cg, tr, source, url)
1790 1790 r = bundle2.combinechangegroupresults(op)
1791 1791 else:
1792 1792 r = None
1793 1793 try:
1794 1794 def gettransaction():
1795 1795 if not lockandtr[2]:
1796 1796 lockandtr[0] = repo.wlock()
1797 1797 lockandtr[1] = repo.lock()
1798 1798 lockandtr[2] = repo.transaction(source)
1799 1799 lockandtr[2].hookargs['source'] = source
1800 1800 lockandtr[2].hookargs['url'] = url
1801 1801 lockandtr[2].hookargs['bundle2'] = '1'
1802 1802 return lockandtr[2]
1803 1803
1804 1804 # Do greedy locking by default until we're satisfied with lazy
1805 1805 # locking.
1806 1806 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1807 1807 gettransaction()
1808 1808
1809 1809 op = bundle2.bundleoperation(repo, gettransaction,
1810 1810 captureoutput=captureoutput)
1811 1811 try:
1812 1812 op = bundle2.processbundle(repo, cg, op=op)
1813 1813 finally:
1814 1814 r = op.reply
1815 1815 if captureoutput and r is not None:
1816 1816 repo.ui.pushbuffer(error=True, subproc=True)
1817 1817 def recordout(output):
1818 1818 r.newpart('output', data=output, mandatory=False)
1819 1819 if lockandtr[2] is not None:
1820 1820 lockandtr[2].close()
1821 1821 except BaseException as exc:
1822 1822 exc.duringunbundle2 = True
1823 1823 if captureoutput and r is not None:
1824 1824 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1825 1825 def recordout(output):
1826 1826 part = bundle2.bundlepart('output', data=output,
1827 1827 mandatory=False)
1828 1828 parts.append(part)
1829 1829 raise
1830 1830 finally:
1831 1831 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1832 1832 if recordout is not None:
1833 1833 recordout(repo.ui.popbuffer())
1834 1834 return r
1835 1835
1836 1836 def _maybeapplyclonebundle(pullop):
1837 1837 """Apply a clone bundle from a remote, if possible."""
1838 1838
1839 1839 repo = pullop.repo
1840 1840 remote = pullop.remote
1841 1841
1842 1842 if not repo.ui.configbool('ui', 'clonebundles'):
1843 1843 return
1844 1844
1845 1845 # Only run if local repo is empty.
1846 1846 if len(repo):
1847 1847 return
1848 1848
1849 1849 if pullop.heads:
1850 1850 return
1851 1851
1852 1852 if not remote.capable('clonebundles'):
1853 1853 return
1854 1854
1855 1855 res = remote._call('clonebundles')
1856 1856
1857 1857 # If we call the wire protocol command, that's good enough to record the
1858 1858 # attempt.
1859 1859 pullop.clonebundleattempted = True
1860 1860
1861 1861 entries = parseclonebundlesmanifest(repo, res)
1862 1862 if not entries:
1863 1863 repo.ui.note(_('no clone bundles available on remote; '
1864 1864 'falling back to regular clone\n'))
1865 1865 return
1866 1866
1867 entries = filterclonebundleentries(repo, entries)
1867 entries = filterclonebundleentries(
1868 repo, entries, streamclonerequested=pullop.streamclonerequested)
1869
1868 1870 if not entries:
1869 1871 # There is a thundering herd concern here. However, if a server
1870 1872 # operator doesn't advertise bundles appropriate for its clients,
1871 1873 # they deserve what's coming. Furthermore, from a client's
1872 1874 # perspective, no automatic fallback would mean not being able to
1873 1875 # clone!
1874 1876 repo.ui.warn(_('no compatible clone bundles available on server; '
1875 1877 'falling back to regular clone\n'))
1876 1878 repo.ui.warn(_('(you may want to report this to the server '
1877 1879 'operator)\n'))
1878 1880 return
1879 1881
1880 1882 entries = sortclonebundleentries(repo.ui, entries)
1881 1883
1882 1884 url = entries[0]['URL']
1883 1885 repo.ui.status(_('applying clone bundle from %s\n') % url)
1884 1886 if trypullbundlefromurl(repo.ui, repo, url):
1885 1887 repo.ui.status(_('finished applying clone bundle\n'))
1886 1888 # Bundle failed.
1887 1889 #
1888 1890 # We abort by default to avoid the thundering herd of
1889 1891 # clients flooding a server that was expecting expensive
1890 1892 # clone load to be offloaded.
1891 1893 elif repo.ui.configbool('ui', 'clonebundlefallback'):
1892 1894 repo.ui.warn(_('falling back to normal clone\n'))
1893 1895 else:
1894 1896 raise error.Abort(_('error applying bundle'),
1895 1897 hint=_('if this error persists, consider contacting '
1896 1898 'the server operator or disable clone '
1897 1899 'bundles via '
1898 1900 '"--config ui.clonebundles=false"'))
1899 1901
1900 1902 def parseclonebundlesmanifest(repo, s):
1901 1903 """Parses the raw text of a clone bundles manifest.
1902 1904
1903 1905 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1904 1906 to the URL and other keys are the attributes for the entry.
1905 1907 """
1906 1908 m = []
1907 1909 for line in s.splitlines():
1908 1910 fields = line.split()
1909 1911 if not fields:
1910 1912 continue
1911 1913 attrs = {'URL': fields[0]}
1912 1914 for rawattr in fields[1:]:
1913 1915 key, value = rawattr.split('=', 1)
1914 1916 key = urlreq.unquote(key)
1915 1917 value = urlreq.unquote(value)
1916 1918 attrs[key] = value
1917 1919
1918 1920 # Parse BUNDLESPEC into components. This makes client-side
1919 1921 # preferences easier to specify since you can prefer a single
1920 1922 # component of the BUNDLESPEC.
1921 1923 if key == 'BUNDLESPEC':
1922 1924 try:
1923 1925 comp, version, params = parsebundlespec(repo, value,
1924 1926 externalnames=True)
1925 1927 attrs['COMPRESSION'] = comp
1926 1928 attrs['VERSION'] = version
1927 1929 except error.InvalidBundleSpecification:
1928 1930 pass
1929 1931 except error.UnsupportedBundleSpecification:
1930 1932 pass
1931 1933
1932 1934 m.append(attrs)
1933 1935
1934 1936 return m
1935 1937
1936 def filterclonebundleentries(repo, entries):
1938 def filterclonebundleentries(repo, entries, streamclonerequested=False):
1937 1939 """Remove incompatible clone bundle manifest entries.
1938 1940
1939 1941 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1940 1942 and returns a new list consisting of only the entries that this client
1941 1943 should be able to apply.
1942 1944
1943 1945 There is no guarantee we'll be able to apply all returned entries because
1944 1946 the metadata we use to filter on may be missing or wrong.
1945 1947 """
1946 1948 newentries = []
1947 1949 for entry in entries:
1948 1950 spec = entry.get('BUNDLESPEC')
1949 1951 if spec:
1950 1952 try:
1951 parsebundlespec(repo, spec, strict=True)
1953 comp, version, params = parsebundlespec(repo, spec, strict=True)
1954
1955 # If a stream clone was requested, filter out non-streamclone
1956 # entries.
1957 if streamclonerequested and (comp != 'UN' or version != 's1'):
1958 repo.ui.debug('filtering %s because not a stream clone\n' %
1959 entry['URL'])
1960 continue
1961
1952 1962 except error.InvalidBundleSpecification as e:
1953 1963 repo.ui.debug(str(e) + '\n')
1954 1964 continue
1955 1965 except error.UnsupportedBundleSpecification as e:
1956 1966 repo.ui.debug('filtering %s because unsupported bundle '
1957 1967 'spec: %s\n' % (entry['URL'], str(e)))
1958 1968 continue
1969 # If we don't have a spec and requested a stream clone, we don't know
1970 # what the entry is so don't attempt to apply it.
1971 elif streamclonerequested:
1972 repo.ui.debug('filtering %s because cannot determine if a stream '
1973 'clone bundle\n' % entry['URL'])
1974 continue
1959 1975
1960 1976 if 'REQUIRESNI' in entry and not sslutil.hassni:
1961 1977 repo.ui.debug('filtering %s because SNI not supported\n' %
1962 1978 entry['URL'])
1963 1979 continue
1964 1980
1965 1981 newentries.append(entry)
1966 1982
1967 1983 return newentries
1968 1984
1969 1985 class clonebundleentry(object):
1970 1986 """Represents an item in a clone bundles manifest.
1971 1987
1972 1988 This rich class is needed to support sorting since sorted() in Python 3
1973 1989 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
1974 1990 won't work.
1975 1991 """
1976 1992
1977 1993 def __init__(self, value, prefers):
1978 1994 self.value = value
1979 1995 self.prefers = prefers
1980 1996
1981 1997 def _cmp(self, other):
1982 1998 for prefkey, prefvalue in self.prefers:
1983 1999 avalue = self.value.get(prefkey)
1984 2000 bvalue = other.value.get(prefkey)
1985 2001
1986 2002 # Special case for b missing attribute and a matches exactly.
1987 2003 if avalue is not None and bvalue is None and avalue == prefvalue:
1988 2004 return -1
1989 2005
1990 2006 # Special case for a missing attribute and b matches exactly.
1991 2007 if bvalue is not None and avalue is None and bvalue == prefvalue:
1992 2008 return 1
1993 2009
1994 2010 # We can't compare unless attribute present on both.
1995 2011 if avalue is None or bvalue is None:
1996 2012 continue
1997 2013
1998 2014 # Same values should fall back to next attribute.
1999 2015 if avalue == bvalue:
2000 2016 continue
2001 2017
2002 2018 # Exact matches come first.
2003 2019 if avalue == prefvalue:
2004 2020 return -1
2005 2021 if bvalue == prefvalue:
2006 2022 return 1
2007 2023
2008 2024 # Fall back to next attribute.
2009 2025 continue
2010 2026
2011 2027 # If we got here we couldn't sort by attributes and prefers. Fall
2012 2028 # back to index order.
2013 2029 return 0
2014 2030
2015 2031 def __lt__(self, other):
2016 2032 return self._cmp(other) < 0
2017 2033
2018 2034 def __gt__(self, other):
2019 2035 return self._cmp(other) > 0
2020 2036
2021 2037 def __eq__(self, other):
2022 2038 return self._cmp(other) == 0
2023 2039
2024 2040 def __le__(self, other):
2025 2041 return self._cmp(other) <= 0
2026 2042
2027 2043 def __ge__(self, other):
2028 2044 return self._cmp(other) >= 0
2029 2045
2030 2046 def __ne__(self, other):
2031 2047 return self._cmp(other) != 0
2032 2048
2033 2049 def sortclonebundleentries(ui, entries):
2034 2050 prefers = ui.configlist('ui', 'clonebundleprefers')
2035 2051 if not prefers:
2036 2052 return list(entries)
2037 2053
2038 2054 prefers = [p.split('=', 1) for p in prefers]
2039 2055
2040 2056 items = sorted(clonebundleentry(v, prefers) for v in entries)
2041 2057 return [i.value for i in items]
2042 2058
2043 2059 def trypullbundlefromurl(ui, repo, url):
2044 2060 """Attempt to apply a bundle from a URL."""
2045 2061 with repo.lock(), repo.transaction('bundleurl') as tr:
2046 2062 try:
2047 2063 fh = urlmod.open(ui, url)
2048 2064 cg = readbundle(ui, fh, 'stream')
2049 2065
2050 2066 if isinstance(cg, streamclone.streamcloneapplier):
2051 2067 cg.apply(repo)
2052 2068 else:
2053 2069 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2054 2070 return True
2055 2071 except urlerr.httperror as e:
2056 2072 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2057 2073 except urlerr.urlerror as e:
2058 2074 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2059 2075
2060 2076 return False
@@ -1,503 +1,511 b''
1 1 Set up a server
2 2
3 3 $ cat >> $HGRCPATH << EOF
4 4 > [format]
5 5 > usegeneraldelta=yes
6 6 > EOF
7 7 $ hg init server
8 8 $ cd server
9 9 $ cat >> .hg/hgrc << EOF
10 10 > [extensions]
11 11 > clonebundles =
12 12 > EOF
13 13
14 14 $ touch foo
15 15 $ hg -q commit -A -m 'add foo'
16 16 $ touch bar
17 17 $ hg -q commit -A -m 'add bar'
18 18
19 19 $ hg serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
20 20 $ cat hg.pid >> $DAEMON_PIDS
21 21 $ cd ..
22 22
23 23 Missing manifest should not result in server lookup
24 24
25 25 $ hg --verbose clone -U http://localhost:$HGPORT no-manifest
26 26 requesting all changes
27 27 adding changesets
28 28 adding manifests
29 29 adding file changes
30 30 added 2 changesets with 2 changes to 2 files
31 31
32 32 $ cat server/access.log
33 33 * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
34 34 * - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
35 35 * - - [*] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=0000000000000000000000000000000000000000&heads=aaff8d2ffbbf07a46dd1f05d8ae7877e3f56e2a2&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=*,zlib,none,bzip2 (glob)
36 36
37 37 Empty manifest file results in retrieval
38 38 (the extension only checks if the manifest file exists)
39 39
40 40 $ touch server/.hg/clonebundles.manifest
41 41 $ hg --verbose clone -U http://localhost:$HGPORT empty-manifest
42 42 no clone bundles available on remote; falling back to regular clone
43 43 requesting all changes
44 44 adding changesets
45 45 adding manifests
46 46 adding file changes
47 47 added 2 changesets with 2 changes to 2 files
48 48
49 49 Manifest file with invalid URL aborts
50 50
51 51 $ echo 'http://does.not.exist/bundle.hg' > server/.hg/clonebundles.manifest
52 52 $ hg clone http://localhost:$HGPORT 404-url
53 53 applying clone bundle from http://does.not.exist/bundle.hg
54 54 error fetching bundle: (.* not known|No address associated with hostname) (re) (no-windows !)
55 55 error fetching bundle: [Errno 11004] getaddrinfo failed (windows !)
56 56 abort: error applying bundle
57 57 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
58 58 [255]
59 59
60 60 Server is not running aborts
61 61
62 62 $ echo "http://localhost:$HGPORT1/bundle.hg" > server/.hg/clonebundles.manifest
63 63 $ hg clone http://localhost:$HGPORT server-not-runner
64 64 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
65 65 error fetching bundle: (.* refused.*|Protocol not supported|(.* )?Cannot assign requested address) (re)
66 66 abort: error applying bundle
67 67 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
68 68 [255]
69 69
70 70 Server returns 404
71 71
72 72 $ "$PYTHON" $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
73 73 $ cat http.pid >> $DAEMON_PIDS
74 74 $ hg clone http://localhost:$HGPORT running-404
75 75 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
76 76 HTTP error fetching bundle: HTTP Error 404: File not found
77 77 abort: error applying bundle
78 78 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
79 79 [255]
80 80
81 81 We can override failure to fall back to regular clone
82 82
83 83 $ hg --config ui.clonebundlefallback=true clone -U http://localhost:$HGPORT 404-fallback
84 84 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
85 85 HTTP error fetching bundle: HTTP Error 404: File not found
86 86 falling back to normal clone
87 87 requesting all changes
88 88 adding changesets
89 89 adding manifests
90 90 adding file changes
91 91 added 2 changesets with 2 changes to 2 files
92 92
93 93 Bundle with partial content works
94 94
95 95 $ hg -R server bundle --type gzip-v1 --base null -r 53245c60e682 partial.hg
96 96 1 changesets found
97 97
98 98 We verify exact bundle content as an extra check against accidental future
99 99 changes. If this output changes, we could break old clients.
100 100
101 101 $ f --size --hexdump partial.hg
102 102 partial.hg: size=207
103 103 0000: 48 47 31 30 47 5a 78 9c 63 60 60 98 17 ac 12 93 |HG10GZx.c``.....|
104 104 0010: f0 ac a9 23 45 70 cb bf 0d 5f 59 4e 4a 7f 79 21 |...#Ep..._YNJ.y!|
105 105 0020: 9b cc 40 24 20 a0 d7 ce 2c d1 38 25 cd 24 25 d5 |..@$ ...,.8%.$%.|
106 106 0030: d8 c2 22 cd 38 d9 24 cd 22 d5 c8 22 cd 24 cd 32 |..".8.$."..".$.2|
107 107 0040: d1 c2 d0 c4 c8 d2 32 d1 38 39 29 c9 34 cd d4 80 |......2.89).4...|
108 108 0050: ab 24 b5 b8 84 cb 40 c1 80 2b 2d 3f 9f 8b 2b 31 |.$....@..+-?..+1|
109 109 0060: 25 45 01 c8 80 9a d2 9b 65 fb e5 9e 45 bf 8d 7f |%E......e...E...|
110 110 0070: 9f c6 97 9f 2b 44 34 67 d9 ec 8e 0f a0 92 0b 75 |....+D4g.......u|
111 111 0080: 41 d6 24 59 18 a4 a4 9a a6 18 1a 5b 98 9b 5a 98 |A.$Y.......[..Z.|
112 112 0090: 9a 18 26 9b a6 19 98 1a 99 99 26 a6 18 9a 98 24 |..&.......&....$|
113 113 00a0: 26 59 a6 25 5a 98 a5 18 a6 24 71 41 35 b1 43 dc |&Y.%Z....$qA5.C.|
114 114 00b0: 16 b2 83 f7 e9 45 8b d2 56 c7 a3 1f 82 52 d7 8a |.....E..V....R..|
115 115 00c0: 78 ed fc d5 76 f1 36 35 dc 05 00 36 ed 5e c7 |x...v.65...6.^.|
116 116
117 117 $ echo "http://localhost:$HGPORT1/partial.hg" > server/.hg/clonebundles.manifest
118 118 $ hg clone -U http://localhost:$HGPORT partial-bundle
119 119 applying clone bundle from http://localhost:$HGPORT1/partial.hg
120 120 adding changesets
121 121 adding manifests
122 122 adding file changes
123 123 added 1 changesets with 1 changes to 1 files
124 124 finished applying clone bundle
125 125 searching for changes
126 126 adding changesets
127 127 adding manifests
128 128 adding file changes
129 129 added 1 changesets with 1 changes to 1 files
130 130
131 131 Incremental pull doesn't fetch bundle
132 132
133 133 $ hg clone -r 53245c60e682 -U http://localhost:$HGPORT partial-clone
134 134 adding changesets
135 135 adding manifests
136 136 adding file changes
137 137 added 1 changesets with 1 changes to 1 files
138 138
139 139 $ cd partial-clone
140 140 $ hg pull
141 141 pulling from http://localhost:$HGPORT/
142 142 searching for changes
143 143 adding changesets
144 144 adding manifests
145 145 adding file changes
146 146 added 1 changesets with 1 changes to 1 files
147 147 (run 'hg update' to get a working copy)
148 148 $ cd ..
149 149
150 150 Bundle with full content works
151 151
152 152 $ hg -R server bundle --type gzip-v2 --base null -r tip full.hg
153 153 2 changesets found
154 154
155 155 Again, we perform an extra check against bundle content changes. If this content
156 156 changes, clone bundles produced by new Mercurial versions may not be readable
157 157 by old clients.
158 158
159 159 $ f --size --hexdump full.hg
160 160 full.hg: size=396
161 161 0000: 48 47 32 30 00 00 00 0e 43 6f 6d 70 72 65 73 73 |HG20....Compress|
162 162 0010: 69 6f 6e 3d 47 5a 78 9c 63 60 60 d0 e4 76 f6 70 |ion=GZx.c``..v.p|
163 163 0020: f4 73 77 75 0f f2 0f 0d 60 00 02 46 46 76 26 4e |.swu....`..FFv&N|
164 164 0030: c6 b2 d4 a2 e2 cc fc 3c 03 a3 bc a4 e4 8c c4 bc |.......<........|
165 165 0040: f4 d4 62 23 06 06 e6 19 40 f9 4d c1 2a 31 09 cf |..b#....@.M.*1..|
166 166 0050: 9a 3a 52 04 b7 fc db f0 95 e5 a4 f4 97 17 b2 c9 |.:R.............|
167 167 0060: 0c 14 00 02 e6 d9 99 25 1a a7 a4 99 a4 a4 1a 5b |.......%.......[|
168 168 0070: 58 a4 19 27 9b a4 59 a4 1a 59 a4 99 a4 59 26 5a |X..'..Y..Y...Y&Z|
169 169 0080: 18 9a 18 59 5a 26 1a 27 27 25 99 a6 99 1a 70 95 |...YZ&.''%....p.|
170 170 0090: a4 16 97 70 19 28 18 70 a5 e5 e7 73 71 25 a6 a4 |...p.(.p...sq%..|
171 171 00a0: 28 00 19 20 17 af fa df ab ff 7b 3f fb 92 dc 8b |(.. ......{?....|
172 172 00b0: 1f 62 bb 9e b7 d7 d9 87 3d 5a 44 89 2f b0 99 87 |.b......=ZD./...|
173 173 00c0: ec e2 54 63 43 e3 b4 64 43 73 23 33 43 53 0b 63 |..TcC..dCs#3CS.c|
174 174 00d0: d3 14 23 03 a0 fb 2c 2c 0c d3 80 1e 30 49 49 b1 |..#...,,....0II.|
175 175 00e0: 4c 4a 32 48 33 30 b0 34 42 b8 38 29 b1 08 e2 62 |LJ2H30.4B.8)...b|
176 176 00f0: 20 03 6a ca c2 2c db 2f f7 2c fa 6d fc fb 34 be | .j..,./.,.m..4.|
177 177 0100: fc 5c 21 a2 39 cb 66 77 7c 00 0d c3 59 17 14 58 |.\!.9.fw|...Y..X|
178 178 0110: 49 16 06 29 a9 a6 29 86 c6 16 e6 a6 16 a6 26 86 |I..)..).......&.|
179 179 0120: c9 a6 69 06 a6 46 66 a6 89 29 86 26 26 89 49 96 |..i..Ff..).&&.I.|
180 180 0130: 69 89 16 66 29 86 29 49 5c 20 07 3e 16 fe 23 ae |i..f).)I\ .>..#.|
181 181 0140: 26 da 1c ab 10 1f d1 f8 e3 b3 ef cd dd fc 0c 93 |&...............|
182 182 0150: 88 75 34 36 75 04 82 55 17 14 36 a4 38 10 04 d8 |.u46u..U..6.8...|
183 183 0160: 21 01 9a b1 83 f7 e9 45 8b d2 56 c7 a3 1f 82 52 |!......E..V....R|
184 184 0170: d7 8a 78 ed fc d5 76 f1 36 25 81 89 c7 ad ec 90 |..x...v.6%......|
185 185 0180: 54 47 75 2b 89 49 b1 00 d2 8a eb 92 |TGu+.I......|
186 186
187 187 $ echo "http://localhost:$HGPORT1/full.hg" > server/.hg/clonebundles.manifest
188 188 $ hg clone -U http://localhost:$HGPORT full-bundle
189 189 applying clone bundle from http://localhost:$HGPORT1/full.hg
190 190 adding changesets
191 191 adding manifests
192 192 adding file changes
193 193 added 2 changesets with 2 changes to 2 files
194 194 finished applying clone bundle
195 195 searching for changes
196 196 no changes found
197 197
198 198 Feature works over SSH
199 199
200 200 $ hg clone -U -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/server ssh-full-clone
201 201 applying clone bundle from http://localhost:$HGPORT1/full.hg
202 202 adding changesets
203 203 adding manifests
204 204 adding file changes
205 205 added 2 changesets with 2 changes to 2 files
206 206 finished applying clone bundle
207 207 searching for changes
208 208 no changes found
209 209
210 210 Entry with unknown BUNDLESPEC is filtered and not used
211 211
212 212 $ cat > server/.hg/clonebundles.manifest << EOF
213 213 > http://bad.entry1 BUNDLESPEC=UNKNOWN
214 214 > http://bad.entry2 BUNDLESPEC=xz-v1
215 215 > http://bad.entry3 BUNDLESPEC=none-v100
216 216 > http://localhost:$HGPORT1/full.hg BUNDLESPEC=gzip-v2
217 217 > EOF
218 218
219 219 $ hg clone -U http://localhost:$HGPORT filter-unknown-type
220 220 applying clone bundle from http://localhost:$HGPORT1/full.hg
221 221 adding changesets
222 222 adding manifests
223 223 adding file changes
224 224 added 2 changesets with 2 changes to 2 files
225 225 finished applying clone bundle
226 226 searching for changes
227 227 no changes found
228 228
229 229 Automatic fallback when all entries are filtered
230 230
231 231 $ cat > server/.hg/clonebundles.manifest << EOF
232 232 > http://bad.entry BUNDLESPEC=UNKNOWN
233 233 > EOF
234 234
235 235 $ hg clone -U http://localhost:$HGPORT filter-all
236 236 no compatible clone bundles available on server; falling back to regular clone
237 237 (you may want to report this to the server operator)
238 238 requesting all changes
239 239 adding changesets
240 240 adding manifests
241 241 adding file changes
242 242 added 2 changesets with 2 changes to 2 files
243 243
244 244 URLs requiring SNI are filtered in Python <2.7.9
245 245
246 246 $ cp full.hg sni.hg
247 247 $ cat > server/.hg/clonebundles.manifest << EOF
248 248 > http://localhost:$HGPORT1/sni.hg REQUIRESNI=true
249 249 > http://localhost:$HGPORT1/full.hg
250 250 > EOF
251 251
252 252 #if sslcontext
253 253 Python 2.7.9+ support SNI
254 254
255 255 $ hg clone -U http://localhost:$HGPORT sni-supported
256 256 applying clone bundle from http://localhost:$HGPORT1/sni.hg
257 257 adding changesets
258 258 adding manifests
259 259 adding file changes
260 260 added 2 changesets with 2 changes to 2 files
261 261 finished applying clone bundle
262 262 searching for changes
263 263 no changes found
264 264 #else
265 265 Python <2.7.9 will filter SNI URLs
266 266
267 267 $ hg clone -U http://localhost:$HGPORT sni-unsupported
268 268 applying clone bundle from http://localhost:$HGPORT1/full.hg
269 269 adding changesets
270 270 adding manifests
271 271 adding file changes
272 272 added 2 changesets with 2 changes to 2 files
273 273 finished applying clone bundle
274 274 searching for changes
275 275 no changes found
276 276 #endif
277 277
278 278 Stream clone bundles are supported
279 279
280 280 $ hg -R server debugcreatestreamclonebundle packed.hg
281 281 writing 613 bytes for 4 files
282 282 bundle requirements: generaldelta, revlogv1
283 283
284 284 No bundle spec should work
285 285
286 286 $ cat > server/.hg/clonebundles.manifest << EOF
287 287 > http://localhost:$HGPORT1/packed.hg
288 288 > EOF
289 289
290 290 $ hg clone -U http://localhost:$HGPORT stream-clone-no-spec
291 291 applying clone bundle from http://localhost:$HGPORT1/packed.hg
292 292 4 files to transfer, 613 bytes of data
293 293 transferred 613 bytes in *.* seconds (*) (glob)
294 294 finished applying clone bundle
295 295 searching for changes
296 296 no changes found
297 297
298 298 Bundle spec without parameters should work
299 299
300 300 $ cat > server/.hg/clonebundles.manifest << EOF
301 301 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1
302 302 > EOF
303 303
304 304 $ hg clone -U http://localhost:$HGPORT stream-clone-vanilla-spec
305 305 applying clone bundle from http://localhost:$HGPORT1/packed.hg
306 306 4 files to transfer, 613 bytes of data
307 307 transferred 613 bytes in *.* seconds (*) (glob)
308 308 finished applying clone bundle
309 309 searching for changes
310 310 no changes found
311 311
312 312 Bundle spec with format requirements should work
313 313
314 314 $ cat > server/.hg/clonebundles.manifest << EOF
315 315 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv1
316 316 > EOF
317 317
318 318 $ hg clone -U http://localhost:$HGPORT stream-clone-supported-requirements
319 319 applying clone bundle from http://localhost:$HGPORT1/packed.hg
320 320 4 files to transfer, 613 bytes of data
321 321 transferred 613 bytes in *.* seconds (*) (glob)
322 322 finished applying clone bundle
323 323 searching for changes
324 324 no changes found
325 325
326 326 Stream bundle spec with unknown requirements should be filtered out
327 327
328 328 $ cat > server/.hg/clonebundles.manifest << EOF
329 329 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv42
330 330 > EOF
331 331
332 332 $ hg clone -U http://localhost:$HGPORT stream-clone-unsupported-requirements
333 333 no compatible clone bundles available on server; falling back to regular clone
334 334 (you may want to report this to the server operator)
335 335 requesting all changes
336 336 adding changesets
337 337 adding manifests
338 338 adding file changes
339 339 added 2 changesets with 2 changes to 2 files
340 340
341 341 Set up manifest for testing preferences
342 342 (Remember, the TYPE does not have to match reality - the URL is
343 343 important)
344 344
345 345 $ cp full.hg gz-a.hg
346 346 $ cp full.hg gz-b.hg
347 347 $ cp full.hg bz2-a.hg
348 348 $ cp full.hg bz2-b.hg
349 349 $ cat > server/.hg/clonebundles.manifest << EOF
350 350 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2 extra=a
351 351 > http://localhost:$HGPORT1/bz2-a.hg BUNDLESPEC=bzip2-v2 extra=a
352 352 > http://localhost:$HGPORT1/gz-b.hg BUNDLESPEC=gzip-v2 extra=b
353 353 > http://localhost:$HGPORT1/bz2-b.hg BUNDLESPEC=bzip2-v2 extra=b
354 354 > EOF
355 355
356 356 Preferring an undefined attribute will take first entry
357 357
358 358 $ hg --config ui.clonebundleprefers=foo=bar clone -U http://localhost:$HGPORT prefer-foo
359 359 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
360 360 adding changesets
361 361 adding manifests
362 362 adding file changes
363 363 added 2 changesets with 2 changes to 2 files
364 364 finished applying clone bundle
365 365 searching for changes
366 366 no changes found
367 367
368 368 Preferring bz2 type will download first entry of that type
369 369
370 370 $ hg --config ui.clonebundleprefers=COMPRESSION=bzip2 clone -U http://localhost:$HGPORT prefer-bz
371 371 applying clone bundle from http://localhost:$HGPORT1/bz2-a.hg
372 372 adding changesets
373 373 adding manifests
374 374 adding file changes
375 375 added 2 changesets with 2 changes to 2 files
376 376 finished applying clone bundle
377 377 searching for changes
378 378 no changes found
379 379
380 380 Preferring multiple values of an option works
381 381
382 382 $ hg --config ui.clonebundleprefers=COMPRESSION=unknown,COMPRESSION=bzip2 clone -U http://localhost:$HGPORT prefer-multiple-bz
383 383 applying clone bundle from http://localhost:$HGPORT1/bz2-a.hg
384 384 adding changesets
385 385 adding manifests
386 386 adding file changes
387 387 added 2 changesets with 2 changes to 2 files
388 388 finished applying clone bundle
389 389 searching for changes
390 390 no changes found
391 391
392 392 Sorting multiple values should get us back to original first entry
393 393
394 394 $ hg --config ui.clonebundleprefers=BUNDLESPEC=unknown,BUNDLESPEC=gzip-v2,BUNDLESPEC=bzip2-v2 clone -U http://localhost:$HGPORT prefer-multiple-gz
395 395 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
396 396 adding changesets
397 397 adding manifests
398 398 adding file changes
399 399 added 2 changesets with 2 changes to 2 files
400 400 finished applying clone bundle
401 401 searching for changes
402 402 no changes found
403 403
404 404 Preferring multiple attributes has correct order
405 405
406 406 $ hg --config ui.clonebundleprefers=extra=b,BUNDLESPEC=bzip2-v2 clone -U http://localhost:$HGPORT prefer-separate-attributes
407 407 applying clone bundle from http://localhost:$HGPORT1/bz2-b.hg
408 408 adding changesets
409 409 adding manifests
410 410 adding file changes
411 411 added 2 changesets with 2 changes to 2 files
412 412 finished applying clone bundle
413 413 searching for changes
414 414 no changes found
415 415
416 416 Test where attribute is missing from some entries
417 417
418 418 $ cat > server/.hg/clonebundles.manifest << EOF
419 419 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
420 420 > http://localhost:$HGPORT1/bz2-a.hg BUNDLESPEC=bzip2-v2
421 421 > http://localhost:$HGPORT1/gz-b.hg BUNDLESPEC=gzip-v2 extra=b
422 422 > http://localhost:$HGPORT1/bz2-b.hg BUNDLESPEC=bzip2-v2 extra=b
423 423 > EOF
424 424
425 425 $ hg --config ui.clonebundleprefers=extra=b clone -U http://localhost:$HGPORT prefer-partially-defined-attribute
426 426 applying clone bundle from http://localhost:$HGPORT1/gz-b.hg
427 427 adding changesets
428 428 adding manifests
429 429 adding file changes
430 430 added 2 changesets with 2 changes to 2 files
431 431 finished applying clone bundle
432 432 searching for changes
433 433 no changes found
434 434
435 435 Test interaction between clone bundles and --uncompressed
436 436
437 437 A manifest with just a gzip bundle
438 438
439 439 $ cat > server/.hg/clonebundles.manifest << EOF
440 440 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
441 441 > EOF
442 442
443 443 $ hg clone -U --uncompressed http://localhost:$HGPORT uncompressed-gzip
444 no compatible clone bundles available on server; falling back to regular clone
445 (you may want to report this to the server operator)
444 446 streaming all changes
445 447 4 files to transfer, 613 bytes of data
446 448 transferred 613 bytes in * seconds (*) (glob)
447 449 searching for changes
448 450 no changes found
449 451
450 452 A manifest with a stream clone but no BUNDLESPEC
451 453
452 454 $ cat > server/.hg/clonebundles.manifest << EOF
453 455 > http://localhost:$HGPORT1/packed.hg
454 456 > EOF
455 457
456 458 $ hg clone -U --uncompressed http://localhost:$HGPORT uncompressed-no-bundlespec
459 no compatible clone bundles available on server; falling back to regular clone
460 (you may want to report this to the server operator)
457 461 streaming all changes
458 462 4 files to transfer, 613 bytes of data
459 463 transferred 613 bytes in * seconds (*) (glob)
460 464 searching for changes
461 465 no changes found
462 466
463 467 A manifest with a gzip bundle and a stream clone
464 468
465 469 $ cat > server/.hg/clonebundles.manifest << EOF
466 470 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
467 471 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1
468 472 > EOF
469 473
470 474 $ hg clone -U --uncompressed http://localhost:$HGPORT uncompressed-gzip-packed
471 streaming all changes
475 applying clone bundle from http://localhost:$HGPORT1/packed.hg
472 476 4 files to transfer, 613 bytes of data
473 477 transferred 613 bytes in * seconds (*) (glob)
478 finished applying clone bundle
474 479 searching for changes
475 480 no changes found
476 481
477 482 A manifest with a gzip bundle and stream clone with supported requirements
478 483
479 484 $ cat > server/.hg/clonebundles.manifest << EOF
480 485 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
481 486 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv1
482 487 > EOF
483 488
484 489 $ hg clone -U --uncompressed http://localhost:$HGPORT uncompressed-gzip-packed-requirements
485 streaming all changes
490 applying clone bundle from http://localhost:$HGPORT1/packed.hg
486 491 4 files to transfer, 613 bytes of data
487 492 transferred 613 bytes in * seconds (*) (glob)
493 finished applying clone bundle
488 494 searching for changes
489 495 no changes found
490 496
491 497 A manifest with a gzip bundle and a stream clone with unsupported requirements
492 498
493 499 $ cat > server/.hg/clonebundles.manifest << EOF
494 500 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
495 501 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv42
496 502 > EOF
497 503
498 504 $ hg clone -U --uncompressed http://localhost:$HGPORT uncompressed-gzip-packed-unsupported-requirements
505 no compatible clone bundles available on server; falling back to regular clone
506 (you may want to report this to the server operator)
499 507 streaming all changes
500 508 4 files to transfer, 613 bytes of data
501 509 transferred 613 bytes in * seconds (*) (glob)
502 510 searching for changes
503 511 no changes found
General Comments 0
You need to be logged in to leave comments. Login now