##// END OF EJS Templates
bundle2: don't check for whether we can do stream clones...
Siddharth Agarwal -
r32257:205bd393 default
parent child Browse files
Show More
@@ -1,1998 +1,2000 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 hex,
16 16 nullid,
17 17 )
18 18 from . import (
19 19 bookmarks as bookmod,
20 20 bundle2,
21 21 changegroup,
22 22 discovery,
23 23 error,
24 24 lock as lockmod,
25 25 obsolete,
26 26 phases,
27 27 pushkey,
28 28 scmutil,
29 29 sslutil,
30 30 streamclone,
31 31 url as urlmod,
32 32 util,
33 33 )
34 34
35 35 urlerr = util.urlerr
36 36 urlreq = util.urlreq
37 37
38 38 # Maps bundle version human names to changegroup versions.
39 39 _bundlespeccgversions = {'v1': '01',
40 40 'v2': '02',
41 41 'packed1': 's1',
42 42 'bundle2': '02', #legacy
43 43 }
44 44
45 45 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
46 46 _bundlespecv1compengines = set(['gzip', 'bzip2', 'none'])
47 47
48 48 def parsebundlespec(repo, spec, strict=True, externalnames=False):
49 49 """Parse a bundle string specification into parts.
50 50
51 51 Bundle specifications denote a well-defined bundle/exchange format.
52 52 The content of a given specification should not change over time in
53 53 order to ensure that bundles produced by a newer version of Mercurial are
54 54 readable from an older version.
55 55
56 56 The string currently has the form:
57 57
58 58 <compression>-<type>[;<parameter0>[;<parameter1>]]
59 59
60 60 Where <compression> is one of the supported compression formats
61 61 and <type> is (currently) a version string. A ";" can follow the type and
62 62 all text afterwards is interpreted as URI encoded, ";" delimited key=value
63 63 pairs.
64 64
65 65 If ``strict`` is True (the default) <compression> is required. Otherwise,
66 66 it is optional.
67 67
68 68 If ``externalnames`` is False (the default), the human-centric names will
69 69 be converted to their internal representation.
70 70
71 71 Returns a 3-tuple of (compression, version, parameters). Compression will
72 72 be ``None`` if not in strict mode and a compression isn't defined.
73 73
74 74 An ``InvalidBundleSpecification`` is raised when the specification is
75 75 not syntactically well formed.
76 76
77 77 An ``UnsupportedBundleSpecification`` is raised when the compression or
78 78 bundle type/version is not recognized.
79 79
80 80 Note: this function will likely eventually return a more complex data
81 81 structure, including bundle2 part information.
82 82 """
83 83 def parseparams(s):
84 84 if ';' not in s:
85 85 return s, {}
86 86
87 87 params = {}
88 88 version, paramstr = s.split(';', 1)
89 89
90 90 for p in paramstr.split(';'):
91 91 if '=' not in p:
92 92 raise error.InvalidBundleSpecification(
93 93 _('invalid bundle specification: '
94 94 'missing "=" in parameter: %s') % p)
95 95
96 96 key, value = p.split('=', 1)
97 97 key = urlreq.unquote(key)
98 98 value = urlreq.unquote(value)
99 99 params[key] = value
100 100
101 101 return version, params
102 102
103 103
104 104 if strict and '-' not in spec:
105 105 raise error.InvalidBundleSpecification(
106 106 _('invalid bundle specification; '
107 107 'must be prefixed with compression: %s') % spec)
108 108
109 109 if '-' in spec:
110 110 compression, version = spec.split('-', 1)
111 111
112 112 if compression not in util.compengines.supportedbundlenames:
113 113 raise error.UnsupportedBundleSpecification(
114 114 _('%s compression is not supported') % compression)
115 115
116 116 version, params = parseparams(version)
117 117
118 118 if version not in _bundlespeccgversions:
119 119 raise error.UnsupportedBundleSpecification(
120 120 _('%s is not a recognized bundle version') % version)
121 121 else:
122 122 # Value could be just the compression or just the version, in which
123 123 # case some defaults are assumed (but only when not in strict mode).
124 124 assert not strict
125 125
126 126 spec, params = parseparams(spec)
127 127
128 128 if spec in util.compengines.supportedbundlenames:
129 129 compression = spec
130 130 version = 'v1'
131 131 # Generaldelta repos require v2.
132 132 if 'generaldelta' in repo.requirements:
133 133 version = 'v2'
134 134 # Modern compression engines require v2.
135 135 if compression not in _bundlespecv1compengines:
136 136 version = 'v2'
137 137 elif spec in _bundlespeccgversions:
138 138 if spec == 'packed1':
139 139 compression = 'none'
140 140 else:
141 141 compression = 'bzip2'
142 142 version = spec
143 143 else:
144 144 raise error.UnsupportedBundleSpecification(
145 145 _('%s is not a recognized bundle specification') % spec)
146 146
147 147 # Bundle version 1 only supports a known set of compression engines.
148 148 if version == 'v1' and compression not in _bundlespecv1compengines:
149 149 raise error.UnsupportedBundleSpecification(
150 150 _('compression engine %s is not supported on v1 bundles') %
151 151 compression)
152 152
153 153 # The specification for packed1 can optionally declare the data formats
154 154 # required to apply it. If we see this metadata, compare against what the
155 155 # repo supports and error if the bundle isn't compatible.
156 156 if version == 'packed1' and 'requirements' in params:
157 157 requirements = set(params['requirements'].split(','))
158 158 missingreqs = requirements - repo.supportedformats
159 159 if missingreqs:
160 160 raise error.UnsupportedBundleSpecification(
161 161 _('missing support for repository features: %s') %
162 162 ', '.join(sorted(missingreqs)))
163 163
164 164 if not externalnames:
165 165 engine = util.compengines.forbundlename(compression)
166 166 compression = engine.bundletype()[1]
167 167 version = _bundlespeccgversions[version]
168 168 return compression, version, params
169 169
170 170 def readbundle(ui, fh, fname, vfs=None):
171 171 header = changegroup.readexactly(fh, 4)
172 172
173 173 alg = None
174 174 if not fname:
175 175 fname = "stream"
176 176 if not header.startswith('HG') and header.startswith('\0'):
177 177 fh = changegroup.headerlessfixup(fh, header)
178 178 header = "HG10"
179 179 alg = 'UN'
180 180 elif vfs:
181 181 fname = vfs.join(fname)
182 182
183 183 magic, version = header[0:2], header[2:4]
184 184
185 185 if magic != 'HG':
186 186 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
187 187 if version == '10':
188 188 if alg is None:
189 189 alg = changegroup.readexactly(fh, 2)
190 190 return changegroup.cg1unpacker(fh, alg)
191 191 elif version.startswith('2'):
192 192 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
193 193 elif version == 'S1':
194 194 return streamclone.streamcloneapplier(fh)
195 195 else:
196 196 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
197 197
198 198 def getbundlespec(ui, fh):
199 199 """Infer the bundlespec from a bundle file handle.
200 200
201 201 The input file handle is seeked and the original seek position is not
202 202 restored.
203 203 """
204 204 def speccompression(alg):
205 205 try:
206 206 return util.compengines.forbundletype(alg).bundletype()[0]
207 207 except KeyError:
208 208 return None
209 209
210 210 b = readbundle(ui, fh, None)
211 211 if isinstance(b, changegroup.cg1unpacker):
212 212 alg = b._type
213 213 if alg == '_truncatedBZ':
214 214 alg = 'BZ'
215 215 comp = speccompression(alg)
216 216 if not comp:
217 217 raise error.Abort(_('unknown compression algorithm: %s') % alg)
218 218 return '%s-v1' % comp
219 219 elif isinstance(b, bundle2.unbundle20):
220 220 if 'Compression' in b.params:
221 221 comp = speccompression(b.params['Compression'])
222 222 if not comp:
223 223 raise error.Abort(_('unknown compression algorithm: %s') % comp)
224 224 else:
225 225 comp = 'none'
226 226
227 227 version = None
228 228 for part in b.iterparts():
229 229 if part.type == 'changegroup':
230 230 version = part.params['version']
231 231 if version in ('01', '02'):
232 232 version = 'v2'
233 233 else:
234 234 raise error.Abort(_('changegroup version %s does not have '
235 235 'a known bundlespec') % version,
236 236 hint=_('try upgrading your Mercurial '
237 237 'client'))
238 238
239 239 if not version:
240 240 raise error.Abort(_('could not identify changegroup version in '
241 241 'bundle'))
242 242
243 243 return '%s-%s' % (comp, version)
244 244 elif isinstance(b, streamclone.streamcloneapplier):
245 245 requirements = streamclone.readbundle1header(fh)[2]
246 246 params = 'requirements=%s' % ','.join(sorted(requirements))
247 247 return 'none-packed1;%s' % urlreq.quote(params)
248 248 else:
249 249 raise error.Abort(_('unknown bundle type: %s') % b)
250 250
251 251 def buildobsmarkerspart(bundler, markers):
252 252 """add an obsmarker part to the bundler with <markers>
253 253
254 254 No part is created if markers is empty.
255 255 Raises ValueError if the bundler doesn't support any known obsmarker format.
256 256 """
257 257 if markers:
258 258 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
259 259 version = obsolete.commonversion(remoteversions)
260 260 if version is None:
261 261 raise ValueError('bundler does not support common obsmarker format')
262 262 stream = obsolete.encodemarkers(markers, True, version=version)
263 263 return bundler.newpart('obsmarkers', data=stream)
264 264 return None
265 265
266 266 def _computeoutgoing(repo, heads, common):
267 267 """Computes which revs are outgoing given a set of common
268 268 and a set of heads.
269 269
270 270 This is a separate function so extensions can have access to
271 271 the logic.
272 272
273 273 Returns a discovery.outgoing object.
274 274 """
275 275 cl = repo.changelog
276 276 if common:
277 277 hasnode = cl.hasnode
278 278 common = [n for n in common if hasnode(n)]
279 279 else:
280 280 common = [nullid]
281 281 if not heads:
282 282 heads = cl.heads()
283 283 return discovery.outgoing(repo, common, heads)
284 284
285 285 def _forcebundle1(op):
286 286 """return true if a pull/push must use bundle1
287 287
288 288 This function is used to allow testing of the older bundle version"""
289 289 ui = op.repo.ui
290 290 forcebundle1 = False
291 291 # The goal is this config is to allow developer to choose the bundle
292 292 # version used during exchanged. This is especially handy during test.
293 293 # Value is a list of bundle version to be picked from, highest version
294 294 # should be used.
295 295 #
296 296 # developer config: devel.legacy.exchange
297 297 exchange = ui.configlist('devel', 'legacy.exchange')
298 298 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
299 299 return forcebundle1 or not op.remote.capable('bundle2')
300 300
301 301 class pushoperation(object):
302 302 """A object that represent a single push operation
303 303
304 304 Its purpose is to carry push related state and very common operations.
305 305
306 306 A new pushoperation should be created at the beginning of each push and
307 307 discarded afterward.
308 308 """
309 309
310 310 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
311 311 bookmarks=()):
312 312 # repo we push from
313 313 self.repo = repo
314 314 self.ui = repo.ui
315 315 # repo we push to
316 316 self.remote = remote
317 317 # force option provided
318 318 self.force = force
319 319 # revs to be pushed (None is "all")
320 320 self.revs = revs
321 321 # bookmark explicitly pushed
322 322 self.bookmarks = bookmarks
323 323 # allow push of new branch
324 324 self.newbranch = newbranch
325 325 # did a local lock get acquired?
326 326 self.locallocked = None
327 327 # step already performed
328 328 # (used to check what steps have been already performed through bundle2)
329 329 self.stepsdone = set()
330 330 # Integer version of the changegroup push result
331 331 # - None means nothing to push
332 332 # - 0 means HTTP error
333 333 # - 1 means we pushed and remote head count is unchanged *or*
334 334 # we have outgoing changesets but refused to push
335 335 # - other values as described by addchangegroup()
336 336 self.cgresult = None
337 337 # Boolean value for the bookmark push
338 338 self.bkresult = None
339 339 # discover.outgoing object (contains common and outgoing data)
340 340 self.outgoing = None
341 341 # all remote heads before the push
342 342 self.remoteheads = None
343 343 # testable as a boolean indicating if any nodes are missing locally.
344 344 self.incoming = None
345 345 # phases changes that must be pushed along side the changesets
346 346 self.outdatedphases = None
347 347 # phases changes that must be pushed if changeset push fails
348 348 self.fallbackoutdatedphases = None
349 349 # outgoing obsmarkers
350 350 self.outobsmarkers = set()
351 351 # outgoing bookmarks
352 352 self.outbookmarks = []
353 353 # transaction manager
354 354 self.trmanager = None
355 355 # map { pushkey partid -> callback handling failure}
356 356 # used to handle exception from mandatory pushkey part failure
357 357 self.pkfailcb = {}
358 358
359 359 @util.propertycache
360 360 def futureheads(self):
361 361 """future remote heads if the changeset push succeeds"""
362 362 return self.outgoing.missingheads
363 363
364 364 @util.propertycache
365 365 def fallbackheads(self):
366 366 """future remote heads if the changeset push fails"""
367 367 if self.revs is None:
368 368 # not target to push, all common are relevant
369 369 return self.outgoing.commonheads
370 370 unfi = self.repo.unfiltered()
371 371 # I want cheads = heads(::missingheads and ::commonheads)
372 372 # (missingheads is revs with secret changeset filtered out)
373 373 #
374 374 # This can be expressed as:
375 375 # cheads = ( (missingheads and ::commonheads)
376 376 # + (commonheads and ::missingheads))"
377 377 # )
378 378 #
379 379 # while trying to push we already computed the following:
380 380 # common = (::commonheads)
381 381 # missing = ((commonheads::missingheads) - commonheads)
382 382 #
383 383 # We can pick:
384 384 # * missingheads part of common (::commonheads)
385 385 common = self.outgoing.common
386 386 nm = self.repo.changelog.nodemap
387 387 cheads = [node for node in self.revs if nm[node] in common]
388 388 # and
389 389 # * commonheads parents on missing
390 390 revset = unfi.set('%ln and parents(roots(%ln))',
391 391 self.outgoing.commonheads,
392 392 self.outgoing.missing)
393 393 cheads.extend(c.node() for c in revset)
394 394 return cheads
395 395
396 396 @property
397 397 def commonheads(self):
398 398 """set of all common heads after changeset bundle push"""
399 399 if self.cgresult:
400 400 return self.futureheads
401 401 else:
402 402 return self.fallbackheads
403 403
404 404 # mapping of message used when pushing bookmark
405 405 bookmsgmap = {'update': (_("updating bookmark %s\n"),
406 406 _('updating bookmark %s failed!\n')),
407 407 'export': (_("exporting bookmark %s\n"),
408 408 _('exporting bookmark %s failed!\n')),
409 409 'delete': (_("deleting remote bookmark %s\n"),
410 410 _('deleting remote bookmark %s failed!\n')),
411 411 }
412 412
413 413
414 414 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
415 415 opargs=None):
416 416 '''Push outgoing changesets (limited by revs) from a local
417 417 repository to remote. Return an integer:
418 418 - None means nothing to push
419 419 - 0 means HTTP error
420 420 - 1 means we pushed and remote head count is unchanged *or*
421 421 we have outgoing changesets but refused to push
422 422 - other values as described by addchangegroup()
423 423 '''
424 424 if opargs is None:
425 425 opargs = {}
426 426 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
427 427 **opargs)
428 428 if pushop.remote.local():
429 429 missing = (set(pushop.repo.requirements)
430 430 - pushop.remote.local().supported)
431 431 if missing:
432 432 msg = _("required features are not"
433 433 " supported in the destination:"
434 434 " %s") % (', '.join(sorted(missing)))
435 435 raise error.Abort(msg)
436 436
437 437 # there are two ways to push to remote repo:
438 438 #
439 439 # addchangegroup assumes local user can lock remote
440 440 # repo (local filesystem, old ssh servers).
441 441 #
442 442 # unbundle assumes local user cannot lock remote repo (new ssh
443 443 # servers, http servers).
444 444
445 445 if not pushop.remote.canpush():
446 446 raise error.Abort(_("destination does not support push"))
447 447 # get local lock as we might write phase data
448 448 localwlock = locallock = None
449 449 try:
450 450 # bundle2 push may receive a reply bundle touching bookmarks or other
451 451 # things requiring the wlock. Take it now to ensure proper ordering.
452 452 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
453 453 if (not _forcebundle1(pushop)) and maypushback:
454 454 localwlock = pushop.repo.wlock()
455 455 locallock = pushop.repo.lock()
456 456 pushop.locallocked = True
457 457 except IOError as err:
458 458 pushop.locallocked = False
459 459 if err.errno != errno.EACCES:
460 460 raise
461 461 # source repo cannot be locked.
462 462 # We do not abort the push, but just disable the local phase
463 463 # synchronisation.
464 464 msg = 'cannot lock source repository: %s\n' % err
465 465 pushop.ui.debug(msg)
466 466 try:
467 467 if pushop.locallocked:
468 468 pushop.trmanager = transactionmanager(pushop.repo,
469 469 'push-response',
470 470 pushop.remote.url())
471 471 pushop.repo.checkpush(pushop)
472 472 lock = None
473 473 unbundle = pushop.remote.capable('unbundle')
474 474 if not unbundle:
475 475 lock = pushop.remote.lock()
476 476 try:
477 477 _pushdiscovery(pushop)
478 478 if not _forcebundle1(pushop):
479 479 _pushbundle2(pushop)
480 480 _pushchangeset(pushop)
481 481 _pushsyncphase(pushop)
482 482 _pushobsolete(pushop)
483 483 _pushbookmark(pushop)
484 484 finally:
485 485 if lock is not None:
486 486 lock.release()
487 487 if pushop.trmanager:
488 488 pushop.trmanager.close()
489 489 finally:
490 490 if pushop.trmanager:
491 491 pushop.trmanager.release()
492 492 if locallock is not None:
493 493 locallock.release()
494 494 if localwlock is not None:
495 495 localwlock.release()
496 496
497 497 return pushop
498 498
499 499 # list of steps to perform discovery before push
500 500 pushdiscoveryorder = []
501 501
502 502 # Mapping between step name and function
503 503 #
504 504 # This exists to help extensions wrap steps if necessary
505 505 pushdiscoverymapping = {}
506 506
507 507 def pushdiscovery(stepname):
508 508 """decorator for function performing discovery before push
509 509
510 510 The function is added to the step -> function mapping and appended to the
511 511 list of steps. Beware that decorated function will be added in order (this
512 512 may matter).
513 513
514 514 You can only use this decorator for a new step, if you want to wrap a step
515 515 from an extension, change the pushdiscovery dictionary directly."""
516 516 def dec(func):
517 517 assert stepname not in pushdiscoverymapping
518 518 pushdiscoverymapping[stepname] = func
519 519 pushdiscoveryorder.append(stepname)
520 520 return func
521 521 return dec
522 522
523 523 def _pushdiscovery(pushop):
524 524 """Run all discovery steps"""
525 525 for stepname in pushdiscoveryorder:
526 526 step = pushdiscoverymapping[stepname]
527 527 step(pushop)
528 528
529 529 @pushdiscovery('changeset')
530 530 def _pushdiscoverychangeset(pushop):
531 531 """discover the changeset that need to be pushed"""
532 532 fci = discovery.findcommonincoming
533 533 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
534 534 common, inc, remoteheads = commoninc
535 535 fco = discovery.findcommonoutgoing
536 536 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
537 537 commoninc=commoninc, force=pushop.force)
538 538 pushop.outgoing = outgoing
539 539 pushop.remoteheads = remoteheads
540 540 pushop.incoming = inc
541 541
542 542 @pushdiscovery('phase')
543 543 def _pushdiscoveryphase(pushop):
544 544 """discover the phase that needs to be pushed
545 545
546 546 (computed for both success and failure case for changesets push)"""
547 547 outgoing = pushop.outgoing
548 548 unfi = pushop.repo.unfiltered()
549 549 remotephases = pushop.remote.listkeys('phases')
550 550 publishing = remotephases.get('publishing', False)
551 551 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
552 552 and remotephases # server supports phases
553 553 and not pushop.outgoing.missing # no changesets to be pushed
554 554 and publishing):
555 555 # When:
556 556 # - this is a subrepo push
557 557 # - and remote support phase
558 558 # - and no changeset are to be pushed
559 559 # - and remote is publishing
560 560 # We may be in issue 3871 case!
561 561 # We drop the possible phase synchronisation done by
562 562 # courtesy to publish changesets possibly locally draft
563 563 # on the remote.
564 564 remotephases = {'publishing': 'True'}
565 565 ana = phases.analyzeremotephases(pushop.repo,
566 566 pushop.fallbackheads,
567 567 remotephases)
568 568 pheads, droots = ana
569 569 extracond = ''
570 570 if not publishing:
571 571 extracond = ' and public()'
572 572 revset = 'heads((%%ln::%%ln) %s)' % extracond
573 573 # Get the list of all revs draft on remote by public here.
574 574 # XXX Beware that revset break if droots is not strictly
575 575 # XXX root we may want to ensure it is but it is costly
576 576 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
577 577 if not outgoing.missing:
578 578 future = fallback
579 579 else:
580 580 # adds changeset we are going to push as draft
581 581 #
582 582 # should not be necessary for publishing server, but because of an
583 583 # issue fixed in xxxxx we have to do it anyway.
584 584 fdroots = list(unfi.set('roots(%ln + %ln::)',
585 585 outgoing.missing, droots))
586 586 fdroots = [f.node() for f in fdroots]
587 587 future = list(unfi.set(revset, fdroots, pushop.futureheads))
588 588 pushop.outdatedphases = future
589 589 pushop.fallbackoutdatedphases = fallback
590 590
591 591 @pushdiscovery('obsmarker')
592 592 def _pushdiscoveryobsmarkers(pushop):
593 593 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
594 594 and pushop.repo.obsstore
595 595 and 'obsolete' in pushop.remote.listkeys('namespaces')):
596 596 repo = pushop.repo
597 597 # very naive computation, that can be quite expensive on big repo.
598 598 # However: evolution is currently slow on them anyway.
599 599 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
600 600 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
601 601
602 602 @pushdiscovery('bookmarks')
603 603 def _pushdiscoverybookmarks(pushop):
604 604 ui = pushop.ui
605 605 repo = pushop.repo.unfiltered()
606 606 remote = pushop.remote
607 607 ui.debug("checking for updated bookmarks\n")
608 608 ancestors = ()
609 609 if pushop.revs:
610 610 revnums = map(repo.changelog.rev, pushop.revs)
611 611 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
612 612 remotebookmark = remote.listkeys('bookmarks')
613 613
614 614 explicit = set([repo._bookmarks.expandname(bookmark)
615 615 for bookmark in pushop.bookmarks])
616 616
617 617 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
618 618 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
619 619
620 620 def safehex(x):
621 621 if x is None:
622 622 return x
623 623 return hex(x)
624 624
625 625 def hexifycompbookmarks(bookmarks):
626 626 for b, scid, dcid in bookmarks:
627 627 yield b, safehex(scid), safehex(dcid)
628 628
629 629 comp = [hexifycompbookmarks(marks) for marks in comp]
630 630 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
631 631
632 632 for b, scid, dcid in advsrc:
633 633 if b in explicit:
634 634 explicit.remove(b)
635 635 if not ancestors or repo[scid].rev() in ancestors:
636 636 pushop.outbookmarks.append((b, dcid, scid))
637 637 # search added bookmark
638 638 for b, scid, dcid in addsrc:
639 639 if b in explicit:
640 640 explicit.remove(b)
641 641 pushop.outbookmarks.append((b, '', scid))
642 642 # search for overwritten bookmark
643 643 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
644 644 if b in explicit:
645 645 explicit.remove(b)
646 646 pushop.outbookmarks.append((b, dcid, scid))
647 647 # search for bookmark to delete
648 648 for b, scid, dcid in adddst:
649 649 if b in explicit:
650 650 explicit.remove(b)
651 651 # treat as "deleted locally"
652 652 pushop.outbookmarks.append((b, dcid, ''))
653 653 # identical bookmarks shouldn't get reported
654 654 for b, scid, dcid in same:
655 655 if b in explicit:
656 656 explicit.remove(b)
657 657
658 658 if explicit:
659 659 explicit = sorted(explicit)
660 660 # we should probably list all of them
661 661 ui.warn(_('bookmark %s does not exist on the local '
662 662 'or remote repository!\n') % explicit[0])
663 663 pushop.bkresult = 2
664 664
665 665 pushop.outbookmarks.sort()
666 666
667 667 def _pushcheckoutgoing(pushop):
668 668 outgoing = pushop.outgoing
669 669 unfi = pushop.repo.unfiltered()
670 670 if not outgoing.missing:
671 671 # nothing to push
672 672 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
673 673 return False
674 674 # something to push
675 675 if not pushop.force:
676 676 # if repo.obsstore == False --> no obsolete
677 677 # then, save the iteration
678 678 if unfi.obsstore:
679 679 # this message are here for 80 char limit reason
680 680 mso = _("push includes obsolete changeset: %s!")
681 681 mst = {"unstable": _("push includes unstable changeset: %s!"),
682 682 "bumped": _("push includes bumped changeset: %s!"),
683 683 "divergent": _("push includes divergent changeset: %s!")}
684 684 # If we are to push if there is at least one
685 685 # obsolete or unstable changeset in missing, at
686 686 # least one of the missinghead will be obsolete or
687 687 # unstable. So checking heads only is ok
688 688 for node in outgoing.missingheads:
689 689 ctx = unfi[node]
690 690 if ctx.obsolete():
691 691 raise error.Abort(mso % ctx)
692 692 elif ctx.troubled():
693 693 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
694 694
695 695 discovery.checkheads(pushop)
696 696 return True
697 697
698 698 # List of names of steps to perform for an outgoing bundle2, order matters.
699 699 b2partsgenorder = []
700 700
701 701 # Mapping between step name and function
702 702 #
703 703 # This exists to help extensions wrap steps if necessary
704 704 b2partsgenmapping = {}
705 705
706 706 def b2partsgenerator(stepname, idx=None):
707 707 """decorator for function generating bundle2 part
708 708
709 709 The function is added to the step -> function mapping and appended to the
710 710 list of steps. Beware that decorated functions will be added in order
711 711 (this may matter).
712 712
713 713 You can only use this decorator for new steps, if you want to wrap a step
714 714 from an extension, attack the b2partsgenmapping dictionary directly."""
715 715 def dec(func):
716 716 assert stepname not in b2partsgenmapping
717 717 b2partsgenmapping[stepname] = func
718 718 if idx is None:
719 719 b2partsgenorder.append(stepname)
720 720 else:
721 721 b2partsgenorder.insert(idx, stepname)
722 722 return func
723 723 return dec
724 724
725 725 def _pushb2ctxcheckheads(pushop, bundler):
726 726 """Generate race condition checking parts
727 727
728 728 Exists as an independent function to aid extensions
729 729 """
730 730 if not pushop.force:
731 731 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
732 732
733 733 @b2partsgenerator('changeset')
734 734 def _pushb2ctx(pushop, bundler):
735 735 """handle changegroup push through bundle2
736 736
737 737 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
738 738 """
739 739 if 'changesets' in pushop.stepsdone:
740 740 return
741 741 pushop.stepsdone.add('changesets')
742 742 # Send known heads to the server for race detection.
743 743 if not _pushcheckoutgoing(pushop):
744 744 return
745 745 pushop.repo.prepushoutgoinghooks(pushop)
746 746
747 747 _pushb2ctxcheckheads(pushop, bundler)
748 748
749 749 b2caps = bundle2.bundle2caps(pushop.remote)
750 750 version = '01'
751 751 cgversions = b2caps.get('changegroup')
752 752 if cgversions: # 3.1 and 3.2 ship with an empty value
753 753 cgversions = [v for v in cgversions
754 754 if v in changegroup.supportedoutgoingversions(
755 755 pushop.repo)]
756 756 if not cgversions:
757 757 raise ValueError(_('no common changegroup version'))
758 758 version = max(cgversions)
759 759 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
760 760 pushop.outgoing,
761 761 version=version)
762 762 cgpart = bundler.newpart('changegroup', data=cg)
763 763 if cgversions:
764 764 cgpart.addparam('version', version)
765 765 if 'treemanifest' in pushop.repo.requirements:
766 766 cgpart.addparam('treemanifest', '1')
767 767 def handlereply(op):
768 768 """extract addchangegroup returns from server reply"""
769 769 cgreplies = op.records.getreplies(cgpart.id)
770 770 assert len(cgreplies['changegroup']) == 1
771 771 pushop.cgresult = cgreplies['changegroup'][0]['return']
772 772 return handlereply
773 773
774 774 @b2partsgenerator('phase')
775 775 def _pushb2phases(pushop, bundler):
776 776 """handle phase push through bundle2"""
777 777 if 'phases' in pushop.stepsdone:
778 778 return
779 779 b2caps = bundle2.bundle2caps(pushop.remote)
780 780 if not 'pushkey' in b2caps:
781 781 return
782 782 pushop.stepsdone.add('phases')
783 783 part2node = []
784 784
785 785 def handlefailure(pushop, exc):
786 786 targetid = int(exc.partid)
787 787 for partid, node in part2node:
788 788 if partid == targetid:
789 789 raise error.Abort(_('updating %s to public failed') % node)
790 790
791 791 enc = pushkey.encode
792 792 for newremotehead in pushop.outdatedphases:
793 793 part = bundler.newpart('pushkey')
794 794 part.addparam('namespace', enc('phases'))
795 795 part.addparam('key', enc(newremotehead.hex()))
796 796 part.addparam('old', enc(str(phases.draft)))
797 797 part.addparam('new', enc(str(phases.public)))
798 798 part2node.append((part.id, newremotehead))
799 799 pushop.pkfailcb[part.id] = handlefailure
800 800
801 801 def handlereply(op):
802 802 for partid, node in part2node:
803 803 partrep = op.records.getreplies(partid)
804 804 results = partrep['pushkey']
805 805 assert len(results) <= 1
806 806 msg = None
807 807 if not results:
808 808 msg = _('server ignored update of %s to public!\n') % node
809 809 elif not int(results[0]['return']):
810 810 msg = _('updating %s to public failed!\n') % node
811 811 if msg is not None:
812 812 pushop.ui.warn(msg)
813 813 return handlereply
814 814
815 815 @b2partsgenerator('obsmarkers')
816 816 def _pushb2obsmarkers(pushop, bundler):
817 817 if 'obsmarkers' in pushop.stepsdone:
818 818 return
819 819 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
820 820 if obsolete.commonversion(remoteversions) is None:
821 821 return
822 822 pushop.stepsdone.add('obsmarkers')
823 823 if pushop.outobsmarkers:
824 824 markers = sorted(pushop.outobsmarkers)
825 825 buildobsmarkerspart(bundler, markers)
826 826
827 827 @b2partsgenerator('bookmarks')
828 828 def _pushb2bookmarks(pushop, bundler):
829 829 """handle bookmark push through bundle2"""
830 830 if 'bookmarks' in pushop.stepsdone:
831 831 return
832 832 b2caps = bundle2.bundle2caps(pushop.remote)
833 833 if 'pushkey' not in b2caps:
834 834 return
835 835 pushop.stepsdone.add('bookmarks')
836 836 part2book = []
837 837 enc = pushkey.encode
838 838
839 839 def handlefailure(pushop, exc):
840 840 targetid = int(exc.partid)
841 841 for partid, book, action in part2book:
842 842 if partid == targetid:
843 843 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
844 844 # we should not be called for part we did not generated
845 845 assert False
846 846
847 847 for book, old, new in pushop.outbookmarks:
848 848 part = bundler.newpart('pushkey')
849 849 part.addparam('namespace', enc('bookmarks'))
850 850 part.addparam('key', enc(book))
851 851 part.addparam('old', enc(old))
852 852 part.addparam('new', enc(new))
853 853 action = 'update'
854 854 if not old:
855 855 action = 'export'
856 856 elif not new:
857 857 action = 'delete'
858 858 part2book.append((part.id, book, action))
859 859 pushop.pkfailcb[part.id] = handlefailure
860 860
861 861 def handlereply(op):
862 862 ui = pushop.ui
863 863 for partid, book, action in part2book:
864 864 partrep = op.records.getreplies(partid)
865 865 results = partrep['pushkey']
866 866 assert len(results) <= 1
867 867 if not results:
868 868 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
869 869 else:
870 870 ret = int(results[0]['return'])
871 871 if ret:
872 872 ui.status(bookmsgmap[action][0] % book)
873 873 else:
874 874 ui.warn(bookmsgmap[action][1] % book)
875 875 if pushop.bkresult is not None:
876 876 pushop.bkresult = 1
877 877 return handlereply
878 878
879 879
880 880 def _pushbundle2(pushop):
881 881 """push data to the remote using bundle2
882 882
883 883 The only currently supported type of data is changegroup but this will
884 884 evolve in the future."""
885 885 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
886 886 pushback = (pushop.trmanager
887 887 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
888 888
889 889 # create reply capability
890 890 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
891 891 allowpushback=pushback))
892 892 bundler.newpart('replycaps', data=capsblob)
893 893 replyhandlers = []
894 894 for partgenname in b2partsgenorder:
895 895 partgen = b2partsgenmapping[partgenname]
896 896 ret = partgen(pushop, bundler)
897 897 if callable(ret):
898 898 replyhandlers.append(ret)
899 899 # do not push if nothing to push
900 900 if bundler.nbparts <= 1:
901 901 return
902 902 stream = util.chunkbuffer(bundler.getchunks())
903 903 try:
904 904 try:
905 905 reply = pushop.remote.unbundle(
906 906 stream, ['force'], pushop.remote.url())
907 907 except error.BundleValueError as exc:
908 908 raise error.Abort(_('missing support for %s') % exc)
909 909 try:
910 910 trgetter = None
911 911 if pushback:
912 912 trgetter = pushop.trmanager.transaction
913 913 op = bundle2.processbundle(pushop.repo, reply, trgetter)
914 914 except error.BundleValueError as exc:
915 915 raise error.Abort(_('missing support for %s') % exc)
916 916 except bundle2.AbortFromPart as exc:
917 917 pushop.ui.status(_('remote: %s\n') % exc)
918 918 if exc.hint is not None:
919 919 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
920 920 raise error.Abort(_('push failed on remote'))
921 921 except error.PushkeyFailed as exc:
922 922 partid = int(exc.partid)
923 923 if partid not in pushop.pkfailcb:
924 924 raise
925 925 pushop.pkfailcb[partid](pushop, exc)
926 926 for rephand in replyhandlers:
927 927 rephand(op)
928 928
929 929 def _pushchangeset(pushop):
930 930 """Make the actual push of changeset bundle to remote repo"""
931 931 if 'changesets' in pushop.stepsdone:
932 932 return
933 933 pushop.stepsdone.add('changesets')
934 934 if not _pushcheckoutgoing(pushop):
935 935 return
936 936 pushop.repo.prepushoutgoinghooks(pushop)
937 937 outgoing = pushop.outgoing
938 938 unbundle = pushop.remote.capable('unbundle')
939 939 # create a changegroup from local
940 940 if pushop.revs is None and not (outgoing.excluded
941 941 or pushop.repo.changelog.filteredrevs):
942 942 # push everything,
943 943 # use the fast path, no race possible on push
944 944 bundler = changegroup.cg1packer(pushop.repo)
945 945 cg = changegroup.getsubset(pushop.repo,
946 946 outgoing,
947 947 bundler,
948 948 'push',
949 949 fastpath=True)
950 950 else:
951 951 cg = changegroup.getchangegroup(pushop.repo, 'push', outgoing)
952 952
953 953 # apply changegroup to remote
954 954 if unbundle:
955 955 # local repo finds heads on server, finds out what
956 956 # revs it must push. once revs transferred, if server
957 957 # finds it has different heads (someone else won
958 958 # commit/push race), server aborts.
959 959 if pushop.force:
960 960 remoteheads = ['force']
961 961 else:
962 962 remoteheads = pushop.remoteheads
963 963 # ssh: return remote's addchangegroup()
964 964 # http: return remote's addchangegroup() or 0 for error
965 965 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
966 966 pushop.repo.url())
967 967 else:
968 968 # we return an integer indicating remote head count
969 969 # change
970 970 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
971 971 pushop.repo.url())
972 972
973 973 def _pushsyncphase(pushop):
974 974 """synchronise phase information locally and remotely"""
975 975 cheads = pushop.commonheads
976 976 # even when we don't push, exchanging phase data is useful
977 977 remotephases = pushop.remote.listkeys('phases')
978 978 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
979 979 and remotephases # server supports phases
980 980 and pushop.cgresult is None # nothing was pushed
981 981 and remotephases.get('publishing', False)):
982 982 # When:
983 983 # - this is a subrepo push
984 984 # - and remote support phase
985 985 # - and no changeset was pushed
986 986 # - and remote is publishing
987 987 # We may be in issue 3871 case!
988 988 # We drop the possible phase synchronisation done by
989 989 # courtesy to publish changesets possibly locally draft
990 990 # on the remote.
991 991 remotephases = {'publishing': 'True'}
992 992 if not remotephases: # old server or public only reply from non-publishing
993 993 _localphasemove(pushop, cheads)
994 994 # don't push any phase data as there is nothing to push
995 995 else:
996 996 ana = phases.analyzeremotephases(pushop.repo, cheads,
997 997 remotephases)
998 998 pheads, droots = ana
999 999 ### Apply remote phase on local
1000 1000 if remotephases.get('publishing', False):
1001 1001 _localphasemove(pushop, cheads)
1002 1002 else: # publish = False
1003 1003 _localphasemove(pushop, pheads)
1004 1004 _localphasemove(pushop, cheads, phases.draft)
1005 1005 ### Apply local phase on remote
1006 1006
1007 1007 if pushop.cgresult:
1008 1008 if 'phases' in pushop.stepsdone:
1009 1009 # phases already pushed though bundle2
1010 1010 return
1011 1011 outdated = pushop.outdatedphases
1012 1012 else:
1013 1013 outdated = pushop.fallbackoutdatedphases
1014 1014
1015 1015 pushop.stepsdone.add('phases')
1016 1016
1017 1017 # filter heads already turned public by the push
1018 1018 outdated = [c for c in outdated if c.node() not in pheads]
1019 1019 # fallback to independent pushkey command
1020 1020 for newremotehead in outdated:
1021 1021 r = pushop.remote.pushkey('phases',
1022 1022 newremotehead.hex(),
1023 1023 str(phases.draft),
1024 1024 str(phases.public))
1025 1025 if not r:
1026 1026 pushop.ui.warn(_('updating %s to public failed!\n')
1027 1027 % newremotehead)
1028 1028
1029 1029 def _localphasemove(pushop, nodes, phase=phases.public):
1030 1030 """move <nodes> to <phase> in the local source repo"""
1031 1031 if pushop.trmanager:
1032 1032 phases.advanceboundary(pushop.repo,
1033 1033 pushop.trmanager.transaction(),
1034 1034 phase,
1035 1035 nodes)
1036 1036 else:
1037 1037 # repo is not locked, do not change any phases!
1038 1038 # Informs the user that phases should have been moved when
1039 1039 # applicable.
1040 1040 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1041 1041 phasestr = phases.phasenames[phase]
1042 1042 if actualmoves:
1043 1043 pushop.ui.status(_('cannot lock source repo, skipping '
1044 1044 'local %s phase update\n') % phasestr)
1045 1045
1046 1046 def _pushobsolete(pushop):
1047 1047 """utility function to push obsolete markers to a remote"""
1048 1048 if 'obsmarkers' in pushop.stepsdone:
1049 1049 return
1050 1050 repo = pushop.repo
1051 1051 remote = pushop.remote
1052 1052 pushop.stepsdone.add('obsmarkers')
1053 1053 if pushop.outobsmarkers:
1054 1054 pushop.ui.debug('try to push obsolete markers to remote\n')
1055 1055 rslts = []
1056 1056 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1057 1057 for key in sorted(remotedata, reverse=True):
1058 1058 # reverse sort to ensure we end with dump0
1059 1059 data = remotedata[key]
1060 1060 rslts.append(remote.pushkey('obsolete', key, '', data))
1061 1061 if [r for r in rslts if not r]:
1062 1062 msg = _('failed to push some obsolete markers!\n')
1063 1063 repo.ui.warn(msg)
1064 1064
1065 1065 def _pushbookmark(pushop):
1066 1066 """Update bookmark position on remote"""
1067 1067 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1068 1068 return
1069 1069 pushop.stepsdone.add('bookmarks')
1070 1070 ui = pushop.ui
1071 1071 remote = pushop.remote
1072 1072
1073 1073 for b, old, new in pushop.outbookmarks:
1074 1074 action = 'update'
1075 1075 if not old:
1076 1076 action = 'export'
1077 1077 elif not new:
1078 1078 action = 'delete'
1079 1079 if remote.pushkey('bookmarks', b, old, new):
1080 1080 ui.status(bookmsgmap[action][0] % b)
1081 1081 else:
1082 1082 ui.warn(bookmsgmap[action][1] % b)
1083 1083 # discovery can have set the value form invalid entry
1084 1084 if pushop.bkresult is not None:
1085 1085 pushop.bkresult = 1
1086 1086
1087 1087 class pulloperation(object):
1088 1088 """A object that represent a single pull operation
1089 1089
1090 1090 It purpose is to carry pull related state and very common operation.
1091 1091
1092 1092 A new should be created at the beginning of each pull and discarded
1093 1093 afterward.
1094 1094 """
1095 1095
1096 1096 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1097 1097 remotebookmarks=None, streamclonerequested=None):
1098 1098 # repo we pull into
1099 1099 self.repo = repo
1100 1100 # repo we pull from
1101 1101 self.remote = remote
1102 1102 # revision we try to pull (None is "all")
1103 1103 self.heads = heads
1104 1104 # bookmark pulled explicitly
1105 1105 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1106 1106 for bookmark in bookmarks]
1107 1107 # do we force pull?
1108 1108 self.force = force
1109 1109 # whether a streaming clone was requested
1110 1110 self.streamclonerequested = streamclonerequested
1111 1111 # transaction manager
1112 1112 self.trmanager = None
1113 1113 # set of common changeset between local and remote before pull
1114 1114 self.common = None
1115 1115 # set of pulled head
1116 1116 self.rheads = None
1117 1117 # list of missing changeset to fetch remotely
1118 1118 self.fetch = None
1119 1119 # remote bookmarks data
1120 1120 self.remotebookmarks = remotebookmarks
1121 1121 # result of changegroup pulling (used as return code by pull)
1122 1122 self.cgresult = None
1123 1123 # list of step already done
1124 1124 self.stepsdone = set()
1125 1125 # Whether we attempted a clone from pre-generated bundles.
1126 1126 self.clonebundleattempted = False
1127 1127
1128 1128 @util.propertycache
1129 1129 def pulledsubset(self):
1130 1130 """heads of the set of changeset target by the pull"""
1131 1131 # compute target subset
1132 1132 if self.heads is None:
1133 1133 # We pulled every thing possible
1134 1134 # sync on everything common
1135 1135 c = set(self.common)
1136 1136 ret = list(self.common)
1137 1137 for n in self.rheads:
1138 1138 if n not in c:
1139 1139 ret.append(n)
1140 1140 return ret
1141 1141 else:
1142 1142 # We pulled a specific subset
1143 1143 # sync on this subset
1144 1144 return self.heads
1145 1145
1146 1146 @util.propertycache
1147 1147 def canusebundle2(self):
1148 1148 return not _forcebundle1(self)
1149 1149
1150 1150 @util.propertycache
1151 1151 def remotebundle2caps(self):
1152 1152 return bundle2.bundle2caps(self.remote)
1153 1153
1154 1154 def gettransaction(self):
1155 1155 # deprecated; talk to trmanager directly
1156 1156 return self.trmanager.transaction()
1157 1157
1158 1158 class transactionmanager(object):
1159 1159 """An object to manage the life cycle of a transaction
1160 1160
1161 1161 It creates the transaction on demand and calls the appropriate hooks when
1162 1162 closing the transaction."""
1163 1163 def __init__(self, repo, source, url):
1164 1164 self.repo = repo
1165 1165 self.source = source
1166 1166 self.url = url
1167 1167 self._tr = None
1168 1168
1169 1169 def transaction(self):
1170 1170 """Return an open transaction object, constructing if necessary"""
1171 1171 if not self._tr:
1172 1172 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1173 1173 self._tr = self.repo.transaction(trname)
1174 1174 self._tr.hookargs['source'] = self.source
1175 1175 self._tr.hookargs['url'] = self.url
1176 1176 return self._tr
1177 1177
1178 1178 def close(self):
1179 1179 """close transaction if created"""
1180 1180 if self._tr is not None:
1181 1181 self._tr.close()
1182 1182
1183 1183 def release(self):
1184 1184 """release transaction if created"""
1185 1185 if self._tr is not None:
1186 1186 self._tr.release()
1187 1187
1188 1188 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1189 1189 streamclonerequested=None):
1190 1190 """Fetch repository data from a remote.
1191 1191
1192 1192 This is the main function used to retrieve data from a remote repository.
1193 1193
1194 1194 ``repo`` is the local repository to clone into.
1195 1195 ``remote`` is a peer instance.
1196 1196 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1197 1197 default) means to pull everything from the remote.
1198 1198 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1199 1199 default, all remote bookmarks are pulled.
1200 1200 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1201 1201 initialization.
1202 1202 ``streamclonerequested`` is a boolean indicating whether a "streaming
1203 1203 clone" is requested. A "streaming clone" is essentially a raw file copy
1204 1204 of revlogs from the server. This only works when the local repository is
1205 1205 empty. The default value of ``None`` means to respect the server
1206 1206 configuration for preferring stream clones.
1207 1207
1208 1208 Returns the ``pulloperation`` created for this pull.
1209 1209 """
1210 1210 if opargs is None:
1211 1211 opargs = {}
1212 1212 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1213 1213 streamclonerequested=streamclonerequested, **opargs)
1214 1214 if pullop.remote.local():
1215 1215 missing = set(pullop.remote.requirements) - pullop.repo.supported
1216 1216 if missing:
1217 1217 msg = _("required features are not"
1218 1218 " supported in the destination:"
1219 1219 " %s") % (', '.join(sorted(missing)))
1220 1220 raise error.Abort(msg)
1221 1221
1222 1222 wlock = lock = None
1223 1223 try:
1224 1224 wlock = pullop.repo.wlock()
1225 1225 lock = pullop.repo.lock()
1226 1226 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1227 1227 streamclone.maybeperformlegacystreamclone(pullop)
1228 1228 # This should ideally be in _pullbundle2(). However, it needs to run
1229 1229 # before discovery to avoid extra work.
1230 1230 _maybeapplyclonebundle(pullop)
1231 1231 _pulldiscovery(pullop)
1232 1232 if pullop.canusebundle2:
1233 1233 _pullbundle2(pullop)
1234 1234 _pullchangeset(pullop)
1235 1235 _pullphase(pullop)
1236 1236 _pullbookmarks(pullop)
1237 1237 _pullobsolete(pullop)
1238 1238 pullop.trmanager.close()
1239 1239 finally:
1240 1240 lockmod.release(pullop.trmanager, lock, wlock)
1241 1241
1242 1242 return pullop
1243 1243
1244 1244 # list of steps to perform discovery before pull
1245 1245 pulldiscoveryorder = []
1246 1246
1247 1247 # Mapping between step name and function
1248 1248 #
1249 1249 # This exists to help extensions wrap steps if necessary
1250 1250 pulldiscoverymapping = {}
1251 1251
1252 1252 def pulldiscovery(stepname):
1253 1253 """decorator for function performing discovery before pull
1254 1254
1255 1255 The function is added to the step -> function mapping and appended to the
1256 1256 list of steps. Beware that decorated function will be added in order (this
1257 1257 may matter).
1258 1258
1259 1259 You can only use this decorator for a new step, if you want to wrap a step
1260 1260 from an extension, change the pulldiscovery dictionary directly."""
1261 1261 def dec(func):
1262 1262 assert stepname not in pulldiscoverymapping
1263 1263 pulldiscoverymapping[stepname] = func
1264 1264 pulldiscoveryorder.append(stepname)
1265 1265 return func
1266 1266 return dec
1267 1267
1268 1268 def _pulldiscovery(pullop):
1269 1269 """Run all discovery steps"""
1270 1270 for stepname in pulldiscoveryorder:
1271 1271 step = pulldiscoverymapping[stepname]
1272 1272 step(pullop)
1273 1273
1274 1274 @pulldiscovery('b1:bookmarks')
1275 1275 def _pullbookmarkbundle1(pullop):
1276 1276 """fetch bookmark data in bundle1 case
1277 1277
1278 1278 If not using bundle2, we have to fetch bookmarks before changeset
1279 1279 discovery to reduce the chance and impact of race conditions."""
1280 1280 if pullop.remotebookmarks is not None:
1281 1281 return
1282 1282 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1283 1283 # all known bundle2 servers now support listkeys, but lets be nice with
1284 1284 # new implementation.
1285 1285 return
1286 1286 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1287 1287
1288 1288
1289 1289 @pulldiscovery('changegroup')
1290 1290 def _pulldiscoverychangegroup(pullop):
1291 1291 """discovery phase for the pull
1292 1292
1293 1293 Current handle changeset discovery only, will change handle all discovery
1294 1294 at some point."""
1295 1295 tmp = discovery.findcommonincoming(pullop.repo,
1296 1296 pullop.remote,
1297 1297 heads=pullop.heads,
1298 1298 force=pullop.force)
1299 1299 common, fetch, rheads = tmp
1300 1300 nm = pullop.repo.unfiltered().changelog.nodemap
1301 1301 if fetch and rheads:
1302 1302 # If a remote heads in filtered locally, lets drop it from the unknown
1303 1303 # remote heads and put in back in common.
1304 1304 #
1305 1305 # This is a hackish solution to catch most of "common but locally
1306 1306 # hidden situation". We do not performs discovery on unfiltered
1307 1307 # repository because it end up doing a pathological amount of round
1308 1308 # trip for w huge amount of changeset we do not care about.
1309 1309 #
1310 1310 # If a set of such "common but filtered" changeset exist on the server
1311 1311 # but are not including a remote heads, we'll not be able to detect it,
1312 1312 scommon = set(common)
1313 1313 filteredrheads = []
1314 1314 for n in rheads:
1315 1315 if n in nm:
1316 1316 if n not in scommon:
1317 1317 common.append(n)
1318 1318 else:
1319 1319 filteredrheads.append(n)
1320 1320 if not filteredrheads:
1321 1321 fetch = []
1322 1322 rheads = filteredrheads
1323 1323 pullop.common = common
1324 1324 pullop.fetch = fetch
1325 1325 pullop.rheads = rheads
1326 1326
1327 1327 def _pullbundle2(pullop):
1328 1328 """pull data using bundle2
1329 1329
1330 1330 For now, the only supported data are changegroup."""
1331 1331 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1332 1332
1333 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1333 # At the moment we don't do stream clones over bundle2. If that is
1334 # implemented then here's where the check for that will go.
1335 streaming = False
1334 1336
1335 1337 # pulling changegroup
1336 1338 pullop.stepsdone.add('changegroup')
1337 1339
1338 1340 kwargs['common'] = pullop.common
1339 1341 kwargs['heads'] = pullop.heads or pullop.rheads
1340 1342 kwargs['cg'] = pullop.fetch
1341 1343 if 'listkeys' in pullop.remotebundle2caps:
1342 1344 kwargs['listkeys'] = ['phases']
1343 1345 if pullop.remotebookmarks is None:
1344 1346 # make sure to always includes bookmark data when migrating
1345 1347 # `hg incoming --bundle` to using this function.
1346 1348 kwargs['listkeys'].append('bookmarks')
1347 1349
1348 1350 # If this is a full pull / clone and the server supports the clone bundles
1349 1351 # feature, tell the server whether we attempted a clone bundle. The
1350 1352 # presence of this flag indicates the client supports clone bundles. This
1351 1353 # will enable the server to treat clients that support clone bundles
1352 1354 # differently from those that don't.
1353 1355 if (pullop.remote.capable('clonebundles')
1354 1356 and pullop.heads is None and list(pullop.common) == [nullid]):
1355 1357 kwargs['cbattempted'] = pullop.clonebundleattempted
1356 1358
1357 1359 if streaming:
1358 1360 pullop.repo.ui.status(_('streaming all changes\n'))
1359 1361 elif not pullop.fetch:
1360 1362 pullop.repo.ui.status(_("no changes found\n"))
1361 1363 pullop.cgresult = 0
1362 1364 else:
1363 1365 if pullop.heads is None and list(pullop.common) == [nullid]:
1364 1366 pullop.repo.ui.status(_("requesting all changes\n"))
1365 1367 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1366 1368 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1367 1369 if obsolete.commonversion(remoteversions) is not None:
1368 1370 kwargs['obsmarkers'] = True
1369 1371 pullop.stepsdone.add('obsmarkers')
1370 1372 _pullbundle2extraprepare(pullop, kwargs)
1371 1373 bundle = pullop.remote.getbundle('pull', **kwargs)
1372 1374 try:
1373 1375 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1374 1376 except bundle2.AbortFromPart as exc:
1375 1377 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1376 1378 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1377 1379 except error.BundleValueError as exc:
1378 1380 raise error.Abort(_('missing support for %s') % exc)
1379 1381
1380 1382 if pullop.fetch:
1381 1383 results = [cg['return'] for cg in op.records['changegroup']]
1382 1384 pullop.cgresult = changegroup.combineresults(results)
1383 1385
1384 1386 # processing phases change
1385 1387 for namespace, value in op.records['listkeys']:
1386 1388 if namespace == 'phases':
1387 1389 _pullapplyphases(pullop, value)
1388 1390
1389 1391 # processing bookmark update
1390 1392 for namespace, value in op.records['listkeys']:
1391 1393 if namespace == 'bookmarks':
1392 1394 pullop.remotebookmarks = value
1393 1395
1394 1396 # bookmark data were either already there or pulled in the bundle
1395 1397 if pullop.remotebookmarks is not None:
1396 1398 _pullbookmarks(pullop)
1397 1399
1398 1400 def _pullbundle2extraprepare(pullop, kwargs):
1399 1401 """hook function so that extensions can extend the getbundle call"""
1400 1402 pass
1401 1403
1402 1404 def _pullchangeset(pullop):
1403 1405 """pull changeset from unbundle into the local repo"""
1404 1406 # We delay the open of the transaction as late as possible so we
1405 1407 # don't open transaction for nothing or you break future useful
1406 1408 # rollback call
1407 1409 if 'changegroup' in pullop.stepsdone:
1408 1410 return
1409 1411 pullop.stepsdone.add('changegroup')
1410 1412 if not pullop.fetch:
1411 1413 pullop.repo.ui.status(_("no changes found\n"))
1412 1414 pullop.cgresult = 0
1413 1415 return
1414 1416 pullop.gettransaction()
1415 1417 if pullop.heads is None and list(pullop.common) == [nullid]:
1416 1418 pullop.repo.ui.status(_("requesting all changes\n"))
1417 1419 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1418 1420 # issue1320, avoid a race if remote changed after discovery
1419 1421 pullop.heads = pullop.rheads
1420 1422
1421 1423 if pullop.remote.capable('getbundle'):
1422 1424 # TODO: get bundlecaps from remote
1423 1425 cg = pullop.remote.getbundle('pull', common=pullop.common,
1424 1426 heads=pullop.heads or pullop.rheads)
1425 1427 elif pullop.heads is None:
1426 1428 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1427 1429 elif not pullop.remote.capable('changegroupsubset'):
1428 1430 raise error.Abort(_("partial pull cannot be done because "
1429 1431 "other repository doesn't support "
1430 1432 "changegroupsubset."))
1431 1433 else:
1432 1434 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1433 1435 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1434 1436
1435 1437 def _pullphase(pullop):
1436 1438 # Get remote phases data from remote
1437 1439 if 'phases' in pullop.stepsdone:
1438 1440 return
1439 1441 remotephases = pullop.remote.listkeys('phases')
1440 1442 _pullapplyphases(pullop, remotephases)
1441 1443
1442 1444 def _pullapplyphases(pullop, remotephases):
1443 1445 """apply phase movement from observed remote state"""
1444 1446 if 'phases' in pullop.stepsdone:
1445 1447 return
1446 1448 pullop.stepsdone.add('phases')
1447 1449 publishing = bool(remotephases.get('publishing', False))
1448 1450 if remotephases and not publishing:
1449 1451 # remote is new and non-publishing
1450 1452 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1451 1453 pullop.pulledsubset,
1452 1454 remotephases)
1453 1455 dheads = pullop.pulledsubset
1454 1456 else:
1455 1457 # Remote is old or publishing all common changesets
1456 1458 # should be seen as public
1457 1459 pheads = pullop.pulledsubset
1458 1460 dheads = []
1459 1461 unfi = pullop.repo.unfiltered()
1460 1462 phase = unfi._phasecache.phase
1461 1463 rev = unfi.changelog.nodemap.get
1462 1464 public = phases.public
1463 1465 draft = phases.draft
1464 1466
1465 1467 # exclude changesets already public locally and update the others
1466 1468 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1467 1469 if pheads:
1468 1470 tr = pullop.gettransaction()
1469 1471 phases.advanceboundary(pullop.repo, tr, public, pheads)
1470 1472
1471 1473 # exclude changesets already draft locally and update the others
1472 1474 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1473 1475 if dheads:
1474 1476 tr = pullop.gettransaction()
1475 1477 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1476 1478
1477 1479 def _pullbookmarks(pullop):
1478 1480 """process the remote bookmark information to update the local one"""
1479 1481 if 'bookmarks' in pullop.stepsdone:
1480 1482 return
1481 1483 pullop.stepsdone.add('bookmarks')
1482 1484 repo = pullop.repo
1483 1485 remotebookmarks = pullop.remotebookmarks
1484 1486 remotebookmarks = bookmod.unhexlifybookmarks(remotebookmarks)
1485 1487 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1486 1488 pullop.remote.url(),
1487 1489 pullop.gettransaction,
1488 1490 explicit=pullop.explicitbookmarks)
1489 1491
1490 1492 def _pullobsolete(pullop):
1491 1493 """utility function to pull obsolete markers from a remote
1492 1494
1493 1495 The `gettransaction` is function that return the pull transaction, creating
1494 1496 one if necessary. We return the transaction to inform the calling code that
1495 1497 a new transaction have been created (when applicable).
1496 1498
1497 1499 Exists mostly to allow overriding for experimentation purpose"""
1498 1500 if 'obsmarkers' in pullop.stepsdone:
1499 1501 return
1500 1502 pullop.stepsdone.add('obsmarkers')
1501 1503 tr = None
1502 1504 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1503 1505 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1504 1506 remoteobs = pullop.remote.listkeys('obsolete')
1505 1507 if 'dump0' in remoteobs:
1506 1508 tr = pullop.gettransaction()
1507 1509 markers = []
1508 1510 for key in sorted(remoteobs, reverse=True):
1509 1511 if key.startswith('dump'):
1510 1512 data = util.b85decode(remoteobs[key])
1511 1513 version, newmarks = obsolete._readmarkers(data)
1512 1514 markers += newmarks
1513 1515 if markers:
1514 1516 pullop.repo.obsstore.add(tr, markers)
1515 1517 pullop.repo.invalidatevolatilesets()
1516 1518 return tr
1517 1519
1518 1520 def caps20to10(repo):
1519 1521 """return a set with appropriate options to use bundle20 during getbundle"""
1520 1522 caps = set(['HG20'])
1521 1523 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1522 1524 caps.add('bundle2=' + urlreq.quote(capsblob))
1523 1525 return caps
1524 1526
1525 1527 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1526 1528 getbundle2partsorder = []
1527 1529
1528 1530 # Mapping between step name and function
1529 1531 #
1530 1532 # This exists to help extensions wrap steps if necessary
1531 1533 getbundle2partsmapping = {}
1532 1534
1533 1535 def getbundle2partsgenerator(stepname, idx=None):
1534 1536 """decorator for function generating bundle2 part for getbundle
1535 1537
1536 1538 The function is added to the step -> function mapping and appended to the
1537 1539 list of steps. Beware that decorated functions will be added in order
1538 1540 (this may matter).
1539 1541
1540 1542 You can only use this decorator for new steps, if you want to wrap a step
1541 1543 from an extension, attack the getbundle2partsmapping dictionary directly."""
1542 1544 def dec(func):
1543 1545 assert stepname not in getbundle2partsmapping
1544 1546 getbundle2partsmapping[stepname] = func
1545 1547 if idx is None:
1546 1548 getbundle2partsorder.append(stepname)
1547 1549 else:
1548 1550 getbundle2partsorder.insert(idx, stepname)
1549 1551 return func
1550 1552 return dec
1551 1553
1552 1554 def bundle2requested(bundlecaps):
1553 1555 if bundlecaps is not None:
1554 1556 return any(cap.startswith('HG2') for cap in bundlecaps)
1555 1557 return False
1556 1558
1557 1559 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1558 1560 **kwargs):
1559 1561 """Return chunks constituting a bundle's raw data.
1560 1562
1561 1563 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1562 1564 passed.
1563 1565
1564 1566 Returns an iterator over raw chunks (of varying sizes).
1565 1567 """
1566 1568 usebundle2 = bundle2requested(bundlecaps)
1567 1569 # bundle10 case
1568 1570 if not usebundle2:
1569 1571 if bundlecaps and not kwargs.get('cg', True):
1570 1572 raise ValueError(_('request for bundle10 must include changegroup'))
1571 1573
1572 1574 if kwargs:
1573 1575 raise ValueError(_('unsupported getbundle arguments: %s')
1574 1576 % ', '.join(sorted(kwargs.keys())))
1575 1577 outgoing = _computeoutgoing(repo, heads, common)
1576 1578 bundler = changegroup.getbundler('01', repo)
1577 1579 return changegroup.getsubsetraw(repo, outgoing, bundler, source)
1578 1580
1579 1581 # bundle20 case
1580 1582 b2caps = {}
1581 1583 for bcaps in bundlecaps:
1582 1584 if bcaps.startswith('bundle2='):
1583 1585 blob = urlreq.unquote(bcaps[len('bundle2='):])
1584 1586 b2caps.update(bundle2.decodecaps(blob))
1585 1587 bundler = bundle2.bundle20(repo.ui, b2caps)
1586 1588
1587 1589 kwargs['heads'] = heads
1588 1590 kwargs['common'] = common
1589 1591
1590 1592 for name in getbundle2partsorder:
1591 1593 func = getbundle2partsmapping[name]
1592 1594 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1593 1595 **kwargs)
1594 1596
1595 1597 return bundler.getchunks()
1596 1598
1597 1599 @getbundle2partsgenerator('changegroup')
1598 1600 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1599 1601 b2caps=None, heads=None, common=None, **kwargs):
1600 1602 """add a changegroup part to the requested bundle"""
1601 1603 cg = None
1602 1604 if kwargs.get('cg', True):
1603 1605 # build changegroup bundle here.
1604 1606 version = '01'
1605 1607 cgversions = b2caps.get('changegroup')
1606 1608 if cgversions: # 3.1 and 3.2 ship with an empty value
1607 1609 cgversions = [v for v in cgversions
1608 1610 if v in changegroup.supportedoutgoingversions(repo)]
1609 1611 if not cgversions:
1610 1612 raise ValueError(_('no common changegroup version'))
1611 1613 version = max(cgversions)
1612 1614 outgoing = _computeoutgoing(repo, heads, common)
1613 1615 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1614 1616 version=version)
1615 1617
1616 1618 if cg:
1617 1619 part = bundler.newpart('changegroup', data=cg)
1618 1620 if cgversions:
1619 1621 part.addparam('version', version)
1620 1622 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1621 1623 if 'treemanifest' in repo.requirements:
1622 1624 part.addparam('treemanifest', '1')
1623 1625
1624 1626 @getbundle2partsgenerator('listkeys')
1625 1627 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1626 1628 b2caps=None, **kwargs):
1627 1629 """add parts containing listkeys namespaces to the requested bundle"""
1628 1630 listkeys = kwargs.get('listkeys', ())
1629 1631 for namespace in listkeys:
1630 1632 part = bundler.newpart('listkeys')
1631 1633 part.addparam('namespace', namespace)
1632 1634 keys = repo.listkeys(namespace).items()
1633 1635 part.data = pushkey.encodekeys(keys)
1634 1636
1635 1637 @getbundle2partsgenerator('obsmarkers')
1636 1638 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1637 1639 b2caps=None, heads=None, **kwargs):
1638 1640 """add an obsolescence markers part to the requested bundle"""
1639 1641 if kwargs.get('obsmarkers', False):
1640 1642 if heads is None:
1641 1643 heads = repo.heads()
1642 1644 subset = [c.node() for c in repo.set('::%ln', heads)]
1643 1645 markers = repo.obsstore.relevantmarkers(subset)
1644 1646 markers = sorted(markers)
1645 1647 buildobsmarkerspart(bundler, markers)
1646 1648
1647 1649 @getbundle2partsgenerator('hgtagsfnodes')
1648 1650 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1649 1651 b2caps=None, heads=None, common=None,
1650 1652 **kwargs):
1651 1653 """Transfer the .hgtags filenodes mapping.
1652 1654
1653 1655 Only values for heads in this bundle will be transferred.
1654 1656
1655 1657 The part data consists of pairs of 20 byte changeset node and .hgtags
1656 1658 filenodes raw values.
1657 1659 """
1658 1660 # Don't send unless:
1659 1661 # - changeset are being exchanged,
1660 1662 # - the client supports it.
1661 1663 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1662 1664 return
1663 1665
1664 1666 outgoing = _computeoutgoing(repo, heads, common)
1665 1667 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1666 1668
1667 1669 def _getbookmarks(repo, **kwargs):
1668 1670 """Returns bookmark to node mapping.
1669 1671
1670 1672 This function is primarily used to generate `bookmarks` bundle2 part.
1671 1673 It is a separate function in order to make it easy to wrap it
1672 1674 in extensions. Passing `kwargs` to the function makes it easy to
1673 1675 add new parameters in extensions.
1674 1676 """
1675 1677
1676 1678 return dict(bookmod.listbinbookmarks(repo))
1677 1679
1678 1680 def check_heads(repo, their_heads, context):
1679 1681 """check if the heads of a repo have been modified
1680 1682
1681 1683 Used by peer for unbundling.
1682 1684 """
1683 1685 heads = repo.heads()
1684 1686 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1685 1687 if not (their_heads == ['force'] or their_heads == heads or
1686 1688 their_heads == ['hashed', heads_hash]):
1687 1689 # someone else committed/pushed/unbundled while we
1688 1690 # were transferring data
1689 1691 raise error.PushRaced('repository changed while %s - '
1690 1692 'please try again' % context)
1691 1693
1692 1694 def unbundle(repo, cg, heads, source, url):
1693 1695 """Apply a bundle to a repo.
1694 1696
1695 1697 this function makes sure the repo is locked during the application and have
1696 1698 mechanism to check that no push race occurred between the creation of the
1697 1699 bundle and its application.
1698 1700
1699 1701 If the push was raced as PushRaced exception is raised."""
1700 1702 r = 0
1701 1703 # need a transaction when processing a bundle2 stream
1702 1704 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1703 1705 lockandtr = [None, None, None]
1704 1706 recordout = None
1705 1707 # quick fix for output mismatch with bundle2 in 3.4
1706 1708 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1707 1709 False)
1708 1710 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1709 1711 captureoutput = True
1710 1712 try:
1711 1713 # note: outside bundle1, 'heads' is expected to be empty and this
1712 1714 # 'check_heads' call wil be a no-op
1713 1715 check_heads(repo, heads, 'uploading changes')
1714 1716 # push can proceed
1715 1717 if not util.safehasattr(cg, 'params'):
1716 1718 # legacy case: bundle1 (changegroup 01)
1717 1719 lockandtr[1] = repo.lock()
1718 1720 r = cg.apply(repo, source, url)
1719 1721 else:
1720 1722 r = None
1721 1723 try:
1722 1724 def gettransaction():
1723 1725 if not lockandtr[2]:
1724 1726 lockandtr[0] = repo.wlock()
1725 1727 lockandtr[1] = repo.lock()
1726 1728 lockandtr[2] = repo.transaction(source)
1727 1729 lockandtr[2].hookargs['source'] = source
1728 1730 lockandtr[2].hookargs['url'] = url
1729 1731 lockandtr[2].hookargs['bundle2'] = '1'
1730 1732 return lockandtr[2]
1731 1733
1732 1734 # Do greedy locking by default until we're satisfied with lazy
1733 1735 # locking.
1734 1736 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1735 1737 gettransaction()
1736 1738
1737 1739 op = bundle2.bundleoperation(repo, gettransaction,
1738 1740 captureoutput=captureoutput)
1739 1741 try:
1740 1742 op = bundle2.processbundle(repo, cg, op=op)
1741 1743 finally:
1742 1744 r = op.reply
1743 1745 if captureoutput and r is not None:
1744 1746 repo.ui.pushbuffer(error=True, subproc=True)
1745 1747 def recordout(output):
1746 1748 r.newpart('output', data=output, mandatory=False)
1747 1749 if lockandtr[2] is not None:
1748 1750 lockandtr[2].close()
1749 1751 except BaseException as exc:
1750 1752 exc.duringunbundle2 = True
1751 1753 if captureoutput and r is not None:
1752 1754 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1753 1755 def recordout(output):
1754 1756 part = bundle2.bundlepart('output', data=output,
1755 1757 mandatory=False)
1756 1758 parts.append(part)
1757 1759 raise
1758 1760 finally:
1759 1761 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1760 1762 if recordout is not None:
1761 1763 recordout(repo.ui.popbuffer())
1762 1764 return r
1763 1765
1764 1766 def _maybeapplyclonebundle(pullop):
1765 1767 """Apply a clone bundle from a remote, if possible."""
1766 1768
1767 1769 repo = pullop.repo
1768 1770 remote = pullop.remote
1769 1771
1770 1772 if not repo.ui.configbool('ui', 'clonebundles', True):
1771 1773 return
1772 1774
1773 1775 # Only run if local repo is empty.
1774 1776 if len(repo):
1775 1777 return
1776 1778
1777 1779 if pullop.heads:
1778 1780 return
1779 1781
1780 1782 if not remote.capable('clonebundles'):
1781 1783 return
1782 1784
1783 1785 res = remote._call('clonebundles')
1784 1786
1785 1787 # If we call the wire protocol command, that's good enough to record the
1786 1788 # attempt.
1787 1789 pullop.clonebundleattempted = True
1788 1790
1789 1791 entries = parseclonebundlesmanifest(repo, res)
1790 1792 if not entries:
1791 1793 repo.ui.note(_('no clone bundles available on remote; '
1792 1794 'falling back to regular clone\n'))
1793 1795 return
1794 1796
1795 1797 entries = filterclonebundleentries(repo, entries)
1796 1798 if not entries:
1797 1799 # There is a thundering herd concern here. However, if a server
1798 1800 # operator doesn't advertise bundles appropriate for its clients,
1799 1801 # they deserve what's coming. Furthermore, from a client's
1800 1802 # perspective, no automatic fallback would mean not being able to
1801 1803 # clone!
1802 1804 repo.ui.warn(_('no compatible clone bundles available on server; '
1803 1805 'falling back to regular clone\n'))
1804 1806 repo.ui.warn(_('(you may want to report this to the server '
1805 1807 'operator)\n'))
1806 1808 return
1807 1809
1808 1810 entries = sortclonebundleentries(repo.ui, entries)
1809 1811
1810 1812 url = entries[0]['URL']
1811 1813 repo.ui.status(_('applying clone bundle from %s\n') % url)
1812 1814 if trypullbundlefromurl(repo.ui, repo, url):
1813 1815 repo.ui.status(_('finished applying clone bundle\n'))
1814 1816 # Bundle failed.
1815 1817 #
1816 1818 # We abort by default to avoid the thundering herd of
1817 1819 # clients flooding a server that was expecting expensive
1818 1820 # clone load to be offloaded.
1819 1821 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1820 1822 repo.ui.warn(_('falling back to normal clone\n'))
1821 1823 else:
1822 1824 raise error.Abort(_('error applying bundle'),
1823 1825 hint=_('if this error persists, consider contacting '
1824 1826 'the server operator or disable clone '
1825 1827 'bundles via '
1826 1828 '"--config ui.clonebundles=false"'))
1827 1829
1828 1830 def parseclonebundlesmanifest(repo, s):
1829 1831 """Parses the raw text of a clone bundles manifest.
1830 1832
1831 1833 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1832 1834 to the URL and other keys are the attributes for the entry.
1833 1835 """
1834 1836 m = []
1835 1837 for line in s.splitlines():
1836 1838 fields = line.split()
1837 1839 if not fields:
1838 1840 continue
1839 1841 attrs = {'URL': fields[0]}
1840 1842 for rawattr in fields[1:]:
1841 1843 key, value = rawattr.split('=', 1)
1842 1844 key = urlreq.unquote(key)
1843 1845 value = urlreq.unquote(value)
1844 1846 attrs[key] = value
1845 1847
1846 1848 # Parse BUNDLESPEC into components. This makes client-side
1847 1849 # preferences easier to specify since you can prefer a single
1848 1850 # component of the BUNDLESPEC.
1849 1851 if key == 'BUNDLESPEC':
1850 1852 try:
1851 1853 comp, version, params = parsebundlespec(repo, value,
1852 1854 externalnames=True)
1853 1855 attrs['COMPRESSION'] = comp
1854 1856 attrs['VERSION'] = version
1855 1857 except error.InvalidBundleSpecification:
1856 1858 pass
1857 1859 except error.UnsupportedBundleSpecification:
1858 1860 pass
1859 1861
1860 1862 m.append(attrs)
1861 1863
1862 1864 return m
1863 1865
1864 1866 def filterclonebundleentries(repo, entries):
1865 1867 """Remove incompatible clone bundle manifest entries.
1866 1868
1867 1869 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1868 1870 and returns a new list consisting of only the entries that this client
1869 1871 should be able to apply.
1870 1872
1871 1873 There is no guarantee we'll be able to apply all returned entries because
1872 1874 the metadata we use to filter on may be missing or wrong.
1873 1875 """
1874 1876 newentries = []
1875 1877 for entry in entries:
1876 1878 spec = entry.get('BUNDLESPEC')
1877 1879 if spec:
1878 1880 try:
1879 1881 parsebundlespec(repo, spec, strict=True)
1880 1882 except error.InvalidBundleSpecification as e:
1881 1883 repo.ui.debug(str(e) + '\n')
1882 1884 continue
1883 1885 except error.UnsupportedBundleSpecification as e:
1884 1886 repo.ui.debug('filtering %s because unsupported bundle '
1885 1887 'spec: %s\n' % (entry['URL'], str(e)))
1886 1888 continue
1887 1889
1888 1890 if 'REQUIRESNI' in entry and not sslutil.hassni:
1889 1891 repo.ui.debug('filtering %s because SNI not supported\n' %
1890 1892 entry['URL'])
1891 1893 continue
1892 1894
1893 1895 newentries.append(entry)
1894 1896
1895 1897 return newentries
1896 1898
1897 1899 class clonebundleentry(object):
1898 1900 """Represents an item in a clone bundles manifest.
1899 1901
1900 1902 This rich class is needed to support sorting since sorted() in Python 3
1901 1903 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
1902 1904 won't work.
1903 1905 """
1904 1906
1905 1907 def __init__(self, value, prefers):
1906 1908 self.value = value
1907 1909 self.prefers = prefers
1908 1910
1909 1911 def _cmp(self, other):
1910 1912 for prefkey, prefvalue in self.prefers:
1911 1913 avalue = self.value.get(prefkey)
1912 1914 bvalue = other.value.get(prefkey)
1913 1915
1914 1916 # Special case for b missing attribute and a matches exactly.
1915 1917 if avalue is not None and bvalue is None and avalue == prefvalue:
1916 1918 return -1
1917 1919
1918 1920 # Special case for a missing attribute and b matches exactly.
1919 1921 if bvalue is not None and avalue is None and bvalue == prefvalue:
1920 1922 return 1
1921 1923
1922 1924 # We can't compare unless attribute present on both.
1923 1925 if avalue is None or bvalue is None:
1924 1926 continue
1925 1927
1926 1928 # Same values should fall back to next attribute.
1927 1929 if avalue == bvalue:
1928 1930 continue
1929 1931
1930 1932 # Exact matches come first.
1931 1933 if avalue == prefvalue:
1932 1934 return -1
1933 1935 if bvalue == prefvalue:
1934 1936 return 1
1935 1937
1936 1938 # Fall back to next attribute.
1937 1939 continue
1938 1940
1939 1941 # If we got here we couldn't sort by attributes and prefers. Fall
1940 1942 # back to index order.
1941 1943 return 0
1942 1944
1943 1945 def __lt__(self, other):
1944 1946 return self._cmp(other) < 0
1945 1947
1946 1948 def __gt__(self, other):
1947 1949 return self._cmp(other) > 0
1948 1950
1949 1951 def __eq__(self, other):
1950 1952 return self._cmp(other) == 0
1951 1953
1952 1954 def __le__(self, other):
1953 1955 return self._cmp(other) <= 0
1954 1956
1955 1957 def __ge__(self, other):
1956 1958 return self._cmp(other) >= 0
1957 1959
1958 1960 def __ne__(self, other):
1959 1961 return self._cmp(other) != 0
1960 1962
1961 1963 def sortclonebundleentries(ui, entries):
1962 1964 prefers = ui.configlist('ui', 'clonebundleprefers', default=[])
1963 1965 if not prefers:
1964 1966 return list(entries)
1965 1967
1966 1968 prefers = [p.split('=', 1) for p in prefers]
1967 1969
1968 1970 items = sorted(clonebundleentry(v, prefers) for v in entries)
1969 1971 return [i.value for i in items]
1970 1972
1971 1973 def trypullbundlefromurl(ui, repo, url):
1972 1974 """Attempt to apply a bundle from a URL."""
1973 1975 lock = repo.lock()
1974 1976 try:
1975 1977 tr = repo.transaction('bundleurl')
1976 1978 try:
1977 1979 try:
1978 1980 fh = urlmod.open(ui, url)
1979 1981 cg = readbundle(ui, fh, 'stream')
1980 1982
1981 1983 if isinstance(cg, bundle2.unbundle20):
1982 1984 bundle2.processbundle(repo, cg, lambda: tr)
1983 1985 elif isinstance(cg, streamclone.streamcloneapplier):
1984 1986 cg.apply(repo)
1985 1987 else:
1986 1988 cg.apply(repo, 'clonebundles', url)
1987 1989 tr.close()
1988 1990 return True
1989 1991 except urlerr.httperror as e:
1990 1992 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1991 1993 except urlerr.urlerror as e:
1992 1994 ui.warn(_('error fetching bundle: %s\n') % e.reason[1])
1993 1995
1994 1996 return False
1995 1997 finally:
1996 1998 tr.release()
1997 1999 finally:
1998 2000 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now