##// END OF EJS Templates
unbundle: swap conditional branches for clarity...
Pierre-Yves David -
r30870:c5bee6aa default
parent child Browse files
Show More
@@ -1,2009 +1,2009
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 hex,
16 16 nullid,
17 17 )
18 18 from . import (
19 19 base85,
20 20 bookmarks as bookmod,
21 21 bundle2,
22 22 changegroup,
23 23 discovery,
24 24 error,
25 25 lock as lockmod,
26 26 obsolete,
27 27 phases,
28 28 pushkey,
29 29 scmutil,
30 30 sslutil,
31 31 streamclone,
32 32 tags,
33 33 url as urlmod,
34 34 util,
35 35 )
36 36
37 37 urlerr = util.urlerr
38 38 urlreq = util.urlreq
39 39
40 40 # Maps bundle version human names to changegroup versions.
41 41 _bundlespeccgversions = {'v1': '01',
42 42 'v2': '02',
43 43 'packed1': 's1',
44 44 'bundle2': '02', #legacy
45 45 }
46 46
47 47 def parsebundlespec(repo, spec, strict=True, externalnames=False):
48 48 """Parse a bundle string specification into parts.
49 49
50 50 Bundle specifications denote a well-defined bundle/exchange format.
51 51 The content of a given specification should not change over time in
52 52 order to ensure that bundles produced by a newer version of Mercurial are
53 53 readable from an older version.
54 54
55 55 The string currently has the form:
56 56
57 57 <compression>-<type>[;<parameter0>[;<parameter1>]]
58 58
59 59 Where <compression> is one of the supported compression formats
60 60 and <type> is (currently) a version string. A ";" can follow the type and
61 61 all text afterwards is interpreted as URI encoded, ";" delimited key=value
62 62 pairs.
63 63
64 64 If ``strict`` is True (the default) <compression> is required. Otherwise,
65 65 it is optional.
66 66
67 67 If ``externalnames`` is False (the default), the human-centric names will
68 68 be converted to their internal representation.
69 69
70 70 Returns a 3-tuple of (compression, version, parameters). Compression will
71 71 be ``None`` if not in strict mode and a compression isn't defined.
72 72
73 73 An ``InvalidBundleSpecification`` is raised when the specification is
74 74 not syntactically well formed.
75 75
76 76 An ``UnsupportedBundleSpecification`` is raised when the compression or
77 77 bundle type/version is not recognized.
78 78
79 79 Note: this function will likely eventually return a more complex data
80 80 structure, including bundle2 part information.
81 81 """
82 82 def parseparams(s):
83 83 if ';' not in s:
84 84 return s, {}
85 85
86 86 params = {}
87 87 version, paramstr = s.split(';', 1)
88 88
89 89 for p in paramstr.split(';'):
90 90 if '=' not in p:
91 91 raise error.InvalidBundleSpecification(
92 92 _('invalid bundle specification: '
93 93 'missing "=" in parameter: %s') % p)
94 94
95 95 key, value = p.split('=', 1)
96 96 key = urlreq.unquote(key)
97 97 value = urlreq.unquote(value)
98 98 params[key] = value
99 99
100 100 return version, params
101 101
102 102
103 103 if strict and '-' not in spec:
104 104 raise error.InvalidBundleSpecification(
105 105 _('invalid bundle specification; '
106 106 'must be prefixed with compression: %s') % spec)
107 107
108 108 if '-' in spec:
109 109 compression, version = spec.split('-', 1)
110 110
111 111 if compression not in util.compengines.supportedbundlenames:
112 112 raise error.UnsupportedBundleSpecification(
113 113 _('%s compression is not supported') % compression)
114 114
115 115 version, params = parseparams(version)
116 116
117 117 if version not in _bundlespeccgversions:
118 118 raise error.UnsupportedBundleSpecification(
119 119 _('%s is not a recognized bundle version') % version)
120 120 else:
121 121 # Value could be just the compression or just the version, in which
122 122 # case some defaults are assumed (but only when not in strict mode).
123 123 assert not strict
124 124
125 125 spec, params = parseparams(spec)
126 126
127 127 if spec in util.compengines.supportedbundlenames:
128 128 compression = spec
129 129 version = 'v1'
130 130 if 'generaldelta' in repo.requirements:
131 131 version = 'v2'
132 132 elif spec in _bundlespeccgversions:
133 133 if spec == 'packed1':
134 134 compression = 'none'
135 135 else:
136 136 compression = 'bzip2'
137 137 version = spec
138 138 else:
139 139 raise error.UnsupportedBundleSpecification(
140 140 _('%s is not a recognized bundle specification') % spec)
141 141
142 142 # The specification for packed1 can optionally declare the data formats
143 143 # required to apply it. If we see this metadata, compare against what the
144 144 # repo supports and error if the bundle isn't compatible.
145 145 if version == 'packed1' and 'requirements' in params:
146 146 requirements = set(params['requirements'].split(','))
147 147 missingreqs = requirements - repo.supportedformats
148 148 if missingreqs:
149 149 raise error.UnsupportedBundleSpecification(
150 150 _('missing support for repository features: %s') %
151 151 ', '.join(sorted(missingreqs)))
152 152
153 153 if not externalnames:
154 154 engine = util.compengines.forbundlename(compression)
155 155 compression = engine.bundletype()[1]
156 156 version = _bundlespeccgversions[version]
157 157 return compression, version, params
158 158
159 159 def readbundle(ui, fh, fname, vfs=None):
160 160 header = changegroup.readexactly(fh, 4)
161 161
162 162 alg = None
163 163 if not fname:
164 164 fname = "stream"
165 165 if not header.startswith('HG') and header.startswith('\0'):
166 166 fh = changegroup.headerlessfixup(fh, header)
167 167 header = "HG10"
168 168 alg = 'UN'
169 169 elif vfs:
170 170 fname = vfs.join(fname)
171 171
172 172 magic, version = header[0:2], header[2:4]
173 173
174 174 if magic != 'HG':
175 175 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
176 176 if version == '10':
177 177 if alg is None:
178 178 alg = changegroup.readexactly(fh, 2)
179 179 return changegroup.cg1unpacker(fh, alg)
180 180 elif version.startswith('2'):
181 181 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
182 182 elif version == 'S1':
183 183 return streamclone.streamcloneapplier(fh)
184 184 else:
185 185 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
186 186
187 187 def getbundlespec(ui, fh):
188 188 """Infer the bundlespec from a bundle file handle.
189 189
190 190 The input file handle is seeked and the original seek position is not
191 191 restored.
192 192 """
193 193 def speccompression(alg):
194 194 try:
195 195 return util.compengines.forbundletype(alg).bundletype()[0]
196 196 except KeyError:
197 197 return None
198 198
199 199 b = readbundle(ui, fh, None)
200 200 if isinstance(b, changegroup.cg1unpacker):
201 201 alg = b._type
202 202 if alg == '_truncatedBZ':
203 203 alg = 'BZ'
204 204 comp = speccompression(alg)
205 205 if not comp:
206 206 raise error.Abort(_('unknown compression algorithm: %s') % alg)
207 207 return '%s-v1' % comp
208 208 elif isinstance(b, bundle2.unbundle20):
209 209 if 'Compression' in b.params:
210 210 comp = speccompression(b.params['Compression'])
211 211 if not comp:
212 212 raise error.Abort(_('unknown compression algorithm: %s') % comp)
213 213 else:
214 214 comp = 'none'
215 215
216 216 version = None
217 217 for part in b.iterparts():
218 218 if part.type == 'changegroup':
219 219 version = part.params['version']
220 220 if version in ('01', '02'):
221 221 version = 'v2'
222 222 else:
223 223 raise error.Abort(_('changegroup version %s does not have '
224 224 'a known bundlespec') % version,
225 225 hint=_('try upgrading your Mercurial '
226 226 'client'))
227 227
228 228 if not version:
229 229 raise error.Abort(_('could not identify changegroup version in '
230 230 'bundle'))
231 231
232 232 return '%s-%s' % (comp, version)
233 233 elif isinstance(b, streamclone.streamcloneapplier):
234 234 requirements = streamclone.readbundle1header(fh)[2]
235 235 params = 'requirements=%s' % ','.join(sorted(requirements))
236 236 return 'none-packed1;%s' % urlreq.quote(params)
237 237 else:
238 238 raise error.Abort(_('unknown bundle type: %s') % b)
239 239
240 240 def buildobsmarkerspart(bundler, markers):
241 241 """add an obsmarker part to the bundler with <markers>
242 242
243 243 No part is created if markers is empty.
244 244 Raises ValueError if the bundler doesn't support any known obsmarker format.
245 245 """
246 246 if markers:
247 247 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
248 248 version = obsolete.commonversion(remoteversions)
249 249 if version is None:
250 250 raise ValueError('bundler does not support common obsmarker format')
251 251 stream = obsolete.encodemarkers(markers, True, version=version)
252 252 return bundler.newpart('obsmarkers', data=stream)
253 253 return None
254 254
255 255 def _computeoutgoing(repo, heads, common):
256 256 """Computes which revs are outgoing given a set of common
257 257 and a set of heads.
258 258
259 259 This is a separate function so extensions can have access to
260 260 the logic.
261 261
262 262 Returns a discovery.outgoing object.
263 263 """
264 264 cl = repo.changelog
265 265 if common:
266 266 hasnode = cl.hasnode
267 267 common = [n for n in common if hasnode(n)]
268 268 else:
269 269 common = [nullid]
270 270 if not heads:
271 271 heads = cl.heads()
272 272 return discovery.outgoing(repo, common, heads)
273 273
274 274 def _forcebundle1(op):
275 275 """return true if a pull/push must use bundle1
276 276
277 277 This function is used to allow testing of the older bundle version"""
278 278 ui = op.repo.ui
279 279 forcebundle1 = False
280 280 # The goal is this config is to allow developer to choose the bundle
281 281 # version used during exchanged. This is especially handy during test.
282 282 # Value is a list of bundle version to be picked from, highest version
283 283 # should be used.
284 284 #
285 285 # developer config: devel.legacy.exchange
286 286 exchange = ui.configlist('devel', 'legacy.exchange')
287 287 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
288 288 return forcebundle1 or not op.remote.capable('bundle2')
289 289
290 290 class pushoperation(object):
291 291 """A object that represent a single push operation
292 292
293 293 Its purpose is to carry push related state and very common operations.
294 294
295 295 A new pushoperation should be created at the beginning of each push and
296 296 discarded afterward.
297 297 """
298 298
299 299 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
300 300 bookmarks=()):
301 301 # repo we push from
302 302 self.repo = repo
303 303 self.ui = repo.ui
304 304 # repo we push to
305 305 self.remote = remote
306 306 # force option provided
307 307 self.force = force
308 308 # revs to be pushed (None is "all")
309 309 self.revs = revs
310 310 # bookmark explicitly pushed
311 311 self.bookmarks = bookmarks
312 312 # allow push of new branch
313 313 self.newbranch = newbranch
314 314 # did a local lock get acquired?
315 315 self.locallocked = None
316 316 # step already performed
317 317 # (used to check what steps have been already performed through bundle2)
318 318 self.stepsdone = set()
319 319 # Integer version of the changegroup push result
320 320 # - None means nothing to push
321 321 # - 0 means HTTP error
322 322 # - 1 means we pushed and remote head count is unchanged *or*
323 323 # we have outgoing changesets but refused to push
324 324 # - other values as described by addchangegroup()
325 325 self.cgresult = None
326 326 # Boolean value for the bookmark push
327 327 self.bkresult = None
328 328 # discover.outgoing object (contains common and outgoing data)
329 329 self.outgoing = None
330 330 # all remote heads before the push
331 331 self.remoteheads = None
332 332 # testable as a boolean indicating if any nodes are missing locally.
333 333 self.incoming = None
334 334 # phases changes that must be pushed along side the changesets
335 335 self.outdatedphases = None
336 336 # phases changes that must be pushed if changeset push fails
337 337 self.fallbackoutdatedphases = None
338 338 # outgoing obsmarkers
339 339 self.outobsmarkers = set()
340 340 # outgoing bookmarks
341 341 self.outbookmarks = []
342 342 # transaction manager
343 343 self.trmanager = None
344 344 # map { pushkey partid -> callback handling failure}
345 345 # used to handle exception from mandatory pushkey part failure
346 346 self.pkfailcb = {}
347 347
348 348 @util.propertycache
349 349 def futureheads(self):
350 350 """future remote heads if the changeset push succeeds"""
351 351 return self.outgoing.missingheads
352 352
353 353 @util.propertycache
354 354 def fallbackheads(self):
355 355 """future remote heads if the changeset push fails"""
356 356 if self.revs is None:
357 357 # not target to push, all common are relevant
358 358 return self.outgoing.commonheads
359 359 unfi = self.repo.unfiltered()
360 360 # I want cheads = heads(::missingheads and ::commonheads)
361 361 # (missingheads is revs with secret changeset filtered out)
362 362 #
363 363 # This can be expressed as:
364 364 # cheads = ( (missingheads and ::commonheads)
365 365 # + (commonheads and ::missingheads))"
366 366 # )
367 367 #
368 368 # while trying to push we already computed the following:
369 369 # common = (::commonheads)
370 370 # missing = ((commonheads::missingheads) - commonheads)
371 371 #
372 372 # We can pick:
373 373 # * missingheads part of common (::commonheads)
374 374 common = self.outgoing.common
375 375 nm = self.repo.changelog.nodemap
376 376 cheads = [node for node in self.revs if nm[node] in common]
377 377 # and
378 378 # * commonheads parents on missing
379 379 revset = unfi.set('%ln and parents(roots(%ln))',
380 380 self.outgoing.commonheads,
381 381 self.outgoing.missing)
382 382 cheads.extend(c.node() for c in revset)
383 383 return cheads
384 384
385 385 @property
386 386 def commonheads(self):
387 387 """set of all common heads after changeset bundle push"""
388 388 if self.cgresult:
389 389 return self.futureheads
390 390 else:
391 391 return self.fallbackheads
392 392
393 393 # mapping of message used when pushing bookmark
394 394 bookmsgmap = {'update': (_("updating bookmark %s\n"),
395 395 _('updating bookmark %s failed!\n')),
396 396 'export': (_("exporting bookmark %s\n"),
397 397 _('exporting bookmark %s failed!\n')),
398 398 'delete': (_("deleting remote bookmark %s\n"),
399 399 _('deleting remote bookmark %s failed!\n')),
400 400 }
401 401
402 402
403 403 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
404 404 opargs=None):
405 405 '''Push outgoing changesets (limited by revs) from a local
406 406 repository to remote. Return an integer:
407 407 - None means nothing to push
408 408 - 0 means HTTP error
409 409 - 1 means we pushed and remote head count is unchanged *or*
410 410 we have outgoing changesets but refused to push
411 411 - other values as described by addchangegroup()
412 412 '''
413 413 if opargs is None:
414 414 opargs = {}
415 415 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
416 416 **opargs)
417 417 if pushop.remote.local():
418 418 missing = (set(pushop.repo.requirements)
419 419 - pushop.remote.local().supported)
420 420 if missing:
421 421 msg = _("required features are not"
422 422 " supported in the destination:"
423 423 " %s") % (', '.join(sorted(missing)))
424 424 raise error.Abort(msg)
425 425
426 426 # there are two ways to push to remote repo:
427 427 #
428 428 # addchangegroup assumes local user can lock remote
429 429 # repo (local filesystem, old ssh servers).
430 430 #
431 431 # unbundle assumes local user cannot lock remote repo (new ssh
432 432 # servers, http servers).
433 433
434 434 if not pushop.remote.canpush():
435 435 raise error.Abort(_("destination does not support push"))
436 436 # get local lock as we might write phase data
437 437 localwlock = locallock = None
438 438 try:
439 439 # bundle2 push may receive a reply bundle touching bookmarks or other
440 440 # things requiring the wlock. Take it now to ensure proper ordering.
441 441 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
442 442 if (not _forcebundle1(pushop)) and maypushback:
443 443 localwlock = pushop.repo.wlock()
444 444 locallock = pushop.repo.lock()
445 445 pushop.locallocked = True
446 446 except IOError as err:
447 447 pushop.locallocked = False
448 448 if err.errno != errno.EACCES:
449 449 raise
450 450 # source repo cannot be locked.
451 451 # We do not abort the push, but just disable the local phase
452 452 # synchronisation.
453 453 msg = 'cannot lock source repository: %s\n' % err
454 454 pushop.ui.debug(msg)
455 455 try:
456 456 if pushop.locallocked:
457 457 pushop.trmanager = transactionmanager(pushop.repo,
458 458 'push-response',
459 459 pushop.remote.url())
460 460 pushop.repo.checkpush(pushop)
461 461 lock = None
462 462 unbundle = pushop.remote.capable('unbundle')
463 463 if not unbundle:
464 464 lock = pushop.remote.lock()
465 465 try:
466 466 _pushdiscovery(pushop)
467 467 if not _forcebundle1(pushop):
468 468 _pushbundle2(pushop)
469 469 _pushchangeset(pushop)
470 470 _pushsyncphase(pushop)
471 471 _pushobsolete(pushop)
472 472 _pushbookmark(pushop)
473 473 finally:
474 474 if lock is not None:
475 475 lock.release()
476 476 if pushop.trmanager:
477 477 pushop.trmanager.close()
478 478 finally:
479 479 if pushop.trmanager:
480 480 pushop.trmanager.release()
481 481 if locallock is not None:
482 482 locallock.release()
483 483 if localwlock is not None:
484 484 localwlock.release()
485 485
486 486 return pushop
487 487
488 488 # list of steps to perform discovery before push
489 489 pushdiscoveryorder = []
490 490
491 491 # Mapping between step name and function
492 492 #
493 493 # This exists to help extensions wrap steps if necessary
494 494 pushdiscoverymapping = {}
495 495
496 496 def pushdiscovery(stepname):
497 497 """decorator for function performing discovery before push
498 498
499 499 The function is added to the step -> function mapping and appended to the
500 500 list of steps. Beware that decorated function will be added in order (this
501 501 may matter).
502 502
503 503 You can only use this decorator for a new step, if you want to wrap a step
504 504 from an extension, change the pushdiscovery dictionary directly."""
505 505 def dec(func):
506 506 assert stepname not in pushdiscoverymapping
507 507 pushdiscoverymapping[stepname] = func
508 508 pushdiscoveryorder.append(stepname)
509 509 return func
510 510 return dec
511 511
512 512 def _pushdiscovery(pushop):
513 513 """Run all discovery steps"""
514 514 for stepname in pushdiscoveryorder:
515 515 step = pushdiscoverymapping[stepname]
516 516 step(pushop)
517 517
518 518 @pushdiscovery('changeset')
519 519 def _pushdiscoverychangeset(pushop):
520 520 """discover the changeset that need to be pushed"""
521 521 fci = discovery.findcommonincoming
522 522 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
523 523 common, inc, remoteheads = commoninc
524 524 fco = discovery.findcommonoutgoing
525 525 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
526 526 commoninc=commoninc, force=pushop.force)
527 527 pushop.outgoing = outgoing
528 528 pushop.remoteheads = remoteheads
529 529 pushop.incoming = inc
530 530
531 531 @pushdiscovery('phase')
532 532 def _pushdiscoveryphase(pushop):
533 533 """discover the phase that needs to be pushed
534 534
535 535 (computed for both success and failure case for changesets push)"""
536 536 outgoing = pushop.outgoing
537 537 unfi = pushop.repo.unfiltered()
538 538 remotephases = pushop.remote.listkeys('phases')
539 539 publishing = remotephases.get('publishing', False)
540 540 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
541 541 and remotephases # server supports phases
542 542 and not pushop.outgoing.missing # no changesets to be pushed
543 543 and publishing):
544 544 # When:
545 545 # - this is a subrepo push
546 546 # - and remote support phase
547 547 # - and no changeset are to be pushed
548 548 # - and remote is publishing
549 549 # We may be in issue 3871 case!
550 550 # We drop the possible phase synchronisation done by
551 551 # courtesy to publish changesets possibly locally draft
552 552 # on the remote.
553 553 remotephases = {'publishing': 'True'}
554 554 ana = phases.analyzeremotephases(pushop.repo,
555 555 pushop.fallbackheads,
556 556 remotephases)
557 557 pheads, droots = ana
558 558 extracond = ''
559 559 if not publishing:
560 560 extracond = ' and public()'
561 561 revset = 'heads((%%ln::%%ln) %s)' % extracond
562 562 # Get the list of all revs draft on remote by public here.
563 563 # XXX Beware that revset break if droots is not strictly
564 564 # XXX root we may want to ensure it is but it is costly
565 565 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
566 566 if not outgoing.missing:
567 567 future = fallback
568 568 else:
569 569 # adds changeset we are going to push as draft
570 570 #
571 571 # should not be necessary for publishing server, but because of an
572 572 # issue fixed in xxxxx we have to do it anyway.
573 573 fdroots = list(unfi.set('roots(%ln + %ln::)',
574 574 outgoing.missing, droots))
575 575 fdroots = [f.node() for f in fdroots]
576 576 future = list(unfi.set(revset, fdroots, pushop.futureheads))
577 577 pushop.outdatedphases = future
578 578 pushop.fallbackoutdatedphases = fallback
579 579
580 580 @pushdiscovery('obsmarker')
581 581 def _pushdiscoveryobsmarkers(pushop):
582 582 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
583 583 and pushop.repo.obsstore
584 584 and 'obsolete' in pushop.remote.listkeys('namespaces')):
585 585 repo = pushop.repo
586 586 # very naive computation, that can be quite expensive on big repo.
587 587 # However: evolution is currently slow on them anyway.
588 588 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
589 589 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
590 590
591 591 @pushdiscovery('bookmarks')
592 592 def _pushdiscoverybookmarks(pushop):
593 593 ui = pushop.ui
594 594 repo = pushop.repo.unfiltered()
595 595 remote = pushop.remote
596 596 ui.debug("checking for updated bookmarks\n")
597 597 ancestors = ()
598 598 if pushop.revs:
599 599 revnums = map(repo.changelog.rev, pushop.revs)
600 600 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
601 601 remotebookmark = remote.listkeys('bookmarks')
602 602
603 603 explicit = set([repo._bookmarks.expandname(bookmark)
604 604 for bookmark in pushop.bookmarks])
605 605
606 606 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
607 607 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
608 608
609 609 def safehex(x):
610 610 if x is None:
611 611 return x
612 612 return hex(x)
613 613
614 614 def hexifycompbookmarks(bookmarks):
615 615 for b, scid, dcid in bookmarks:
616 616 yield b, safehex(scid), safehex(dcid)
617 617
618 618 comp = [hexifycompbookmarks(marks) for marks in comp]
619 619 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
620 620
621 621 for b, scid, dcid in advsrc:
622 622 if b in explicit:
623 623 explicit.remove(b)
624 624 if not ancestors or repo[scid].rev() in ancestors:
625 625 pushop.outbookmarks.append((b, dcid, scid))
626 626 # search added bookmark
627 627 for b, scid, dcid in addsrc:
628 628 if b in explicit:
629 629 explicit.remove(b)
630 630 pushop.outbookmarks.append((b, '', scid))
631 631 # search for overwritten bookmark
632 632 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
633 633 if b in explicit:
634 634 explicit.remove(b)
635 635 pushop.outbookmarks.append((b, dcid, scid))
636 636 # search for bookmark to delete
637 637 for b, scid, dcid in adddst:
638 638 if b in explicit:
639 639 explicit.remove(b)
640 640 # treat as "deleted locally"
641 641 pushop.outbookmarks.append((b, dcid, ''))
642 642 # identical bookmarks shouldn't get reported
643 643 for b, scid, dcid in same:
644 644 if b in explicit:
645 645 explicit.remove(b)
646 646
647 647 if explicit:
648 648 explicit = sorted(explicit)
649 649 # we should probably list all of them
650 650 ui.warn(_('bookmark %s does not exist on the local '
651 651 'or remote repository!\n') % explicit[0])
652 652 pushop.bkresult = 2
653 653
654 654 pushop.outbookmarks.sort()
655 655
656 656 def _pushcheckoutgoing(pushop):
657 657 outgoing = pushop.outgoing
658 658 unfi = pushop.repo.unfiltered()
659 659 if not outgoing.missing:
660 660 # nothing to push
661 661 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
662 662 return False
663 663 # something to push
664 664 if not pushop.force:
665 665 # if repo.obsstore == False --> no obsolete
666 666 # then, save the iteration
667 667 if unfi.obsstore:
668 668 # this message are here for 80 char limit reason
669 669 mso = _("push includes obsolete changeset: %s!")
670 670 mst = {"unstable": _("push includes unstable changeset: %s!"),
671 671 "bumped": _("push includes bumped changeset: %s!"),
672 672 "divergent": _("push includes divergent changeset: %s!")}
673 673 # If we are to push if there is at least one
674 674 # obsolete or unstable changeset in missing, at
675 675 # least one of the missinghead will be obsolete or
676 676 # unstable. So checking heads only is ok
677 677 for node in outgoing.missingheads:
678 678 ctx = unfi[node]
679 679 if ctx.obsolete():
680 680 raise error.Abort(mso % ctx)
681 681 elif ctx.troubled():
682 682 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
683 683
684 684 discovery.checkheads(pushop)
685 685 return True
686 686
687 687 # List of names of steps to perform for an outgoing bundle2, order matters.
688 688 b2partsgenorder = []
689 689
690 690 # Mapping between step name and function
691 691 #
692 692 # This exists to help extensions wrap steps if necessary
693 693 b2partsgenmapping = {}
694 694
695 695 def b2partsgenerator(stepname, idx=None):
696 696 """decorator for function generating bundle2 part
697 697
698 698 The function is added to the step -> function mapping and appended to the
699 699 list of steps. Beware that decorated functions will be added in order
700 700 (this may matter).
701 701
702 702 You can only use this decorator for new steps, if you want to wrap a step
703 703 from an extension, attack the b2partsgenmapping dictionary directly."""
704 704 def dec(func):
705 705 assert stepname not in b2partsgenmapping
706 706 b2partsgenmapping[stepname] = func
707 707 if idx is None:
708 708 b2partsgenorder.append(stepname)
709 709 else:
710 710 b2partsgenorder.insert(idx, stepname)
711 711 return func
712 712 return dec
713 713
714 714 def _pushb2ctxcheckheads(pushop, bundler):
715 715 """Generate race condition checking parts
716 716
717 717 Exists as an independent function to aid extensions
718 718 """
719 719 if not pushop.force:
720 720 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
721 721
722 722 @b2partsgenerator('changeset')
723 723 def _pushb2ctx(pushop, bundler):
724 724 """handle changegroup push through bundle2
725 725
726 726 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
727 727 """
728 728 if 'changesets' in pushop.stepsdone:
729 729 return
730 730 pushop.stepsdone.add('changesets')
731 731 # Send known heads to the server for race detection.
732 732 if not _pushcheckoutgoing(pushop):
733 733 return
734 734 pushop.repo.prepushoutgoinghooks(pushop)
735 735
736 736 _pushb2ctxcheckheads(pushop, bundler)
737 737
738 738 b2caps = bundle2.bundle2caps(pushop.remote)
739 739 version = '01'
740 740 cgversions = b2caps.get('changegroup')
741 741 if cgversions: # 3.1 and 3.2 ship with an empty value
742 742 cgversions = [v for v in cgversions
743 743 if v in changegroup.supportedoutgoingversions(
744 744 pushop.repo)]
745 745 if not cgversions:
746 746 raise ValueError(_('no common changegroup version'))
747 747 version = max(cgversions)
748 748 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
749 749 pushop.outgoing,
750 750 version=version)
751 751 cgpart = bundler.newpart('changegroup', data=cg)
752 752 if cgversions:
753 753 cgpart.addparam('version', version)
754 754 if 'treemanifest' in pushop.repo.requirements:
755 755 cgpart.addparam('treemanifest', '1')
756 756 def handlereply(op):
757 757 """extract addchangegroup returns from server reply"""
758 758 cgreplies = op.records.getreplies(cgpart.id)
759 759 assert len(cgreplies['changegroup']) == 1
760 760 pushop.cgresult = cgreplies['changegroup'][0]['return']
761 761 return handlereply
762 762
763 763 @b2partsgenerator('phase')
764 764 def _pushb2phases(pushop, bundler):
765 765 """handle phase push through bundle2"""
766 766 if 'phases' in pushop.stepsdone:
767 767 return
768 768 b2caps = bundle2.bundle2caps(pushop.remote)
769 769 if not 'pushkey' in b2caps:
770 770 return
771 771 pushop.stepsdone.add('phases')
772 772 part2node = []
773 773
774 774 def handlefailure(pushop, exc):
775 775 targetid = int(exc.partid)
776 776 for partid, node in part2node:
777 777 if partid == targetid:
778 778 raise error.Abort(_('updating %s to public failed') % node)
779 779
780 780 enc = pushkey.encode
781 781 for newremotehead in pushop.outdatedphases:
782 782 part = bundler.newpart('pushkey')
783 783 part.addparam('namespace', enc('phases'))
784 784 part.addparam('key', enc(newremotehead.hex()))
785 785 part.addparam('old', enc(str(phases.draft)))
786 786 part.addparam('new', enc(str(phases.public)))
787 787 part2node.append((part.id, newremotehead))
788 788 pushop.pkfailcb[part.id] = handlefailure
789 789
790 790 def handlereply(op):
791 791 for partid, node in part2node:
792 792 partrep = op.records.getreplies(partid)
793 793 results = partrep['pushkey']
794 794 assert len(results) <= 1
795 795 msg = None
796 796 if not results:
797 797 msg = _('server ignored update of %s to public!\n') % node
798 798 elif not int(results[0]['return']):
799 799 msg = _('updating %s to public failed!\n') % node
800 800 if msg is not None:
801 801 pushop.ui.warn(msg)
802 802 return handlereply
803 803
804 804 @b2partsgenerator('obsmarkers')
805 805 def _pushb2obsmarkers(pushop, bundler):
806 806 if 'obsmarkers' in pushop.stepsdone:
807 807 return
808 808 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
809 809 if obsolete.commonversion(remoteversions) is None:
810 810 return
811 811 pushop.stepsdone.add('obsmarkers')
812 812 if pushop.outobsmarkers:
813 813 markers = sorted(pushop.outobsmarkers)
814 814 buildobsmarkerspart(bundler, markers)
815 815
816 816 @b2partsgenerator('bookmarks')
817 817 def _pushb2bookmarks(pushop, bundler):
818 818 """handle bookmark push through bundle2"""
819 819 if 'bookmarks' in pushop.stepsdone:
820 820 return
821 821 b2caps = bundle2.bundle2caps(pushop.remote)
822 822 if 'pushkey' not in b2caps:
823 823 return
824 824 pushop.stepsdone.add('bookmarks')
825 825 part2book = []
826 826 enc = pushkey.encode
827 827
828 828 def handlefailure(pushop, exc):
829 829 targetid = int(exc.partid)
830 830 for partid, book, action in part2book:
831 831 if partid == targetid:
832 832 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
833 833 # we should not be called for part we did not generated
834 834 assert False
835 835
836 836 for book, old, new in pushop.outbookmarks:
837 837 part = bundler.newpart('pushkey')
838 838 part.addparam('namespace', enc('bookmarks'))
839 839 part.addparam('key', enc(book))
840 840 part.addparam('old', enc(old))
841 841 part.addparam('new', enc(new))
842 842 action = 'update'
843 843 if not old:
844 844 action = 'export'
845 845 elif not new:
846 846 action = 'delete'
847 847 part2book.append((part.id, book, action))
848 848 pushop.pkfailcb[part.id] = handlefailure
849 849
850 850 def handlereply(op):
851 851 ui = pushop.ui
852 852 for partid, book, action in part2book:
853 853 partrep = op.records.getreplies(partid)
854 854 results = partrep['pushkey']
855 855 assert len(results) <= 1
856 856 if not results:
857 857 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
858 858 else:
859 859 ret = int(results[0]['return'])
860 860 if ret:
861 861 ui.status(bookmsgmap[action][0] % book)
862 862 else:
863 863 ui.warn(bookmsgmap[action][1] % book)
864 864 if pushop.bkresult is not None:
865 865 pushop.bkresult = 1
866 866 return handlereply
867 867
868 868
869 869 def _pushbundle2(pushop):
870 870 """push data to the remote using bundle2
871 871
872 872 The only currently supported type of data is changegroup but this will
873 873 evolve in the future."""
874 874 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
875 875 pushback = (pushop.trmanager
876 876 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
877 877
878 878 # create reply capability
879 879 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
880 880 allowpushback=pushback))
881 881 bundler.newpart('replycaps', data=capsblob)
882 882 replyhandlers = []
883 883 for partgenname in b2partsgenorder:
884 884 partgen = b2partsgenmapping[partgenname]
885 885 ret = partgen(pushop, bundler)
886 886 if callable(ret):
887 887 replyhandlers.append(ret)
888 888 # do not push if nothing to push
889 889 if bundler.nbparts <= 1:
890 890 return
891 891 stream = util.chunkbuffer(bundler.getchunks())
892 892 try:
893 893 try:
894 894 reply = pushop.remote.unbundle(
895 895 stream, ['force'], pushop.remote.url())
896 896 except error.BundleValueError as exc:
897 897 raise error.Abort(_('missing support for %s') % exc)
898 898 try:
899 899 trgetter = None
900 900 if pushback:
901 901 trgetter = pushop.trmanager.transaction
902 902 op = bundle2.processbundle(pushop.repo, reply, trgetter)
903 903 except error.BundleValueError as exc:
904 904 raise error.Abort(_('missing support for %s') % exc)
905 905 except bundle2.AbortFromPart as exc:
906 906 pushop.ui.status(_('remote: %s\n') % exc)
907 907 raise error.Abort(_('push failed on remote'), hint=exc.hint)
908 908 except error.PushkeyFailed as exc:
909 909 partid = int(exc.partid)
910 910 if partid not in pushop.pkfailcb:
911 911 raise
912 912 pushop.pkfailcb[partid](pushop, exc)
913 913 for rephand in replyhandlers:
914 914 rephand(op)
915 915
916 916 def _pushchangeset(pushop):
917 917 """Make the actual push of changeset bundle to remote repo"""
918 918 if 'changesets' in pushop.stepsdone:
919 919 return
920 920 pushop.stepsdone.add('changesets')
921 921 if not _pushcheckoutgoing(pushop):
922 922 return
923 923 pushop.repo.prepushoutgoinghooks(pushop)
924 924 outgoing = pushop.outgoing
925 925 unbundle = pushop.remote.capable('unbundle')
926 926 # TODO: get bundlecaps from remote
927 927 bundlecaps = None
928 928 # create a changegroup from local
929 929 if pushop.revs is None and not (outgoing.excluded
930 930 or pushop.repo.changelog.filteredrevs):
931 931 # push everything,
932 932 # use the fast path, no race possible on push
933 933 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
934 934 cg = changegroup.getsubset(pushop.repo,
935 935 outgoing,
936 936 bundler,
937 937 'push',
938 938 fastpath=True)
939 939 else:
940 940 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
941 941 bundlecaps)
942 942
943 943 # apply changegroup to remote
944 944 if unbundle:
945 945 # local repo finds heads on server, finds out what
946 946 # revs it must push. once revs transferred, if server
947 947 # finds it has different heads (someone else won
948 948 # commit/push race), server aborts.
949 949 if pushop.force:
950 950 remoteheads = ['force']
951 951 else:
952 952 remoteheads = pushop.remoteheads
953 953 # ssh: return remote's addchangegroup()
954 954 # http: return remote's addchangegroup() or 0 for error
955 955 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
956 956 pushop.repo.url())
957 957 else:
958 958 # we return an integer indicating remote head count
959 959 # change
960 960 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
961 961 pushop.repo.url())
962 962
963 963 def _pushsyncphase(pushop):
964 964 """synchronise phase information locally and remotely"""
965 965 cheads = pushop.commonheads
966 966 # even when we don't push, exchanging phase data is useful
967 967 remotephases = pushop.remote.listkeys('phases')
968 968 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
969 969 and remotephases # server supports phases
970 970 and pushop.cgresult is None # nothing was pushed
971 971 and remotephases.get('publishing', False)):
972 972 # When:
973 973 # - this is a subrepo push
974 974 # - and remote support phase
975 975 # - and no changeset was pushed
976 976 # - and remote is publishing
977 977 # We may be in issue 3871 case!
978 978 # We drop the possible phase synchronisation done by
979 979 # courtesy to publish changesets possibly locally draft
980 980 # on the remote.
981 981 remotephases = {'publishing': 'True'}
982 982 if not remotephases: # old server or public only reply from non-publishing
983 983 _localphasemove(pushop, cheads)
984 984 # don't push any phase data as there is nothing to push
985 985 else:
986 986 ana = phases.analyzeremotephases(pushop.repo, cheads,
987 987 remotephases)
988 988 pheads, droots = ana
989 989 ### Apply remote phase on local
990 990 if remotephases.get('publishing', False):
991 991 _localphasemove(pushop, cheads)
992 992 else: # publish = False
993 993 _localphasemove(pushop, pheads)
994 994 _localphasemove(pushop, cheads, phases.draft)
995 995 ### Apply local phase on remote
996 996
997 997 if pushop.cgresult:
998 998 if 'phases' in pushop.stepsdone:
999 999 # phases already pushed though bundle2
1000 1000 return
1001 1001 outdated = pushop.outdatedphases
1002 1002 else:
1003 1003 outdated = pushop.fallbackoutdatedphases
1004 1004
1005 1005 pushop.stepsdone.add('phases')
1006 1006
1007 1007 # filter heads already turned public by the push
1008 1008 outdated = [c for c in outdated if c.node() not in pheads]
1009 1009 # fallback to independent pushkey command
1010 1010 for newremotehead in outdated:
1011 1011 r = pushop.remote.pushkey('phases',
1012 1012 newremotehead.hex(),
1013 1013 str(phases.draft),
1014 1014 str(phases.public))
1015 1015 if not r:
1016 1016 pushop.ui.warn(_('updating %s to public failed!\n')
1017 1017 % newremotehead)
1018 1018
1019 1019 def _localphasemove(pushop, nodes, phase=phases.public):
1020 1020 """move <nodes> to <phase> in the local source repo"""
1021 1021 if pushop.trmanager:
1022 1022 phases.advanceboundary(pushop.repo,
1023 1023 pushop.trmanager.transaction(),
1024 1024 phase,
1025 1025 nodes)
1026 1026 else:
1027 1027 # repo is not locked, do not change any phases!
1028 1028 # Informs the user that phases should have been moved when
1029 1029 # applicable.
1030 1030 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1031 1031 phasestr = phases.phasenames[phase]
1032 1032 if actualmoves:
1033 1033 pushop.ui.status(_('cannot lock source repo, skipping '
1034 1034 'local %s phase update\n') % phasestr)
1035 1035
1036 1036 def _pushobsolete(pushop):
1037 1037 """utility function to push obsolete markers to a remote"""
1038 1038 if 'obsmarkers' in pushop.stepsdone:
1039 1039 return
1040 1040 repo = pushop.repo
1041 1041 remote = pushop.remote
1042 1042 pushop.stepsdone.add('obsmarkers')
1043 1043 if pushop.outobsmarkers:
1044 1044 pushop.ui.debug('try to push obsolete markers to remote\n')
1045 1045 rslts = []
1046 1046 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1047 1047 for key in sorted(remotedata, reverse=True):
1048 1048 # reverse sort to ensure we end with dump0
1049 1049 data = remotedata[key]
1050 1050 rslts.append(remote.pushkey('obsolete', key, '', data))
1051 1051 if [r for r in rslts if not r]:
1052 1052 msg = _('failed to push some obsolete markers!\n')
1053 1053 repo.ui.warn(msg)
1054 1054
1055 1055 def _pushbookmark(pushop):
1056 1056 """Update bookmark position on remote"""
1057 1057 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1058 1058 return
1059 1059 pushop.stepsdone.add('bookmarks')
1060 1060 ui = pushop.ui
1061 1061 remote = pushop.remote
1062 1062
1063 1063 for b, old, new in pushop.outbookmarks:
1064 1064 action = 'update'
1065 1065 if not old:
1066 1066 action = 'export'
1067 1067 elif not new:
1068 1068 action = 'delete'
1069 1069 if remote.pushkey('bookmarks', b, old, new):
1070 1070 ui.status(bookmsgmap[action][0] % b)
1071 1071 else:
1072 1072 ui.warn(bookmsgmap[action][1] % b)
1073 1073 # discovery can have set the value form invalid entry
1074 1074 if pushop.bkresult is not None:
1075 1075 pushop.bkresult = 1
1076 1076
1077 1077 class pulloperation(object):
1078 1078 """A object that represent a single pull operation
1079 1079
1080 1080 It purpose is to carry pull related state and very common operation.
1081 1081
1082 1082 A new should be created at the beginning of each pull and discarded
1083 1083 afterward.
1084 1084 """
1085 1085
1086 1086 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1087 1087 remotebookmarks=None, streamclonerequested=None):
1088 1088 # repo we pull into
1089 1089 self.repo = repo
1090 1090 # repo we pull from
1091 1091 self.remote = remote
1092 1092 # revision we try to pull (None is "all")
1093 1093 self.heads = heads
1094 1094 # bookmark pulled explicitly
1095 1095 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1096 1096 for bookmark in bookmarks]
1097 1097 # do we force pull?
1098 1098 self.force = force
1099 1099 # whether a streaming clone was requested
1100 1100 self.streamclonerequested = streamclonerequested
1101 1101 # transaction manager
1102 1102 self.trmanager = None
1103 1103 # set of common changeset between local and remote before pull
1104 1104 self.common = None
1105 1105 # set of pulled head
1106 1106 self.rheads = None
1107 1107 # list of missing changeset to fetch remotely
1108 1108 self.fetch = None
1109 1109 # remote bookmarks data
1110 1110 self.remotebookmarks = remotebookmarks
1111 1111 # result of changegroup pulling (used as return code by pull)
1112 1112 self.cgresult = None
1113 1113 # list of step already done
1114 1114 self.stepsdone = set()
1115 1115 # Whether we attempted a clone from pre-generated bundles.
1116 1116 self.clonebundleattempted = False
1117 1117
1118 1118 @util.propertycache
1119 1119 def pulledsubset(self):
1120 1120 """heads of the set of changeset target by the pull"""
1121 1121 # compute target subset
1122 1122 if self.heads is None:
1123 1123 # We pulled every thing possible
1124 1124 # sync on everything common
1125 1125 c = set(self.common)
1126 1126 ret = list(self.common)
1127 1127 for n in self.rheads:
1128 1128 if n not in c:
1129 1129 ret.append(n)
1130 1130 return ret
1131 1131 else:
1132 1132 # We pulled a specific subset
1133 1133 # sync on this subset
1134 1134 return self.heads
1135 1135
1136 1136 @util.propertycache
1137 1137 def canusebundle2(self):
1138 1138 return not _forcebundle1(self)
1139 1139
1140 1140 @util.propertycache
1141 1141 def remotebundle2caps(self):
1142 1142 return bundle2.bundle2caps(self.remote)
1143 1143
1144 1144 def gettransaction(self):
1145 1145 # deprecated; talk to trmanager directly
1146 1146 return self.trmanager.transaction()
1147 1147
1148 1148 class transactionmanager(object):
1149 1149 """An object to manage the life cycle of a transaction
1150 1150
1151 1151 It creates the transaction on demand and calls the appropriate hooks when
1152 1152 closing the transaction."""
1153 1153 def __init__(self, repo, source, url):
1154 1154 self.repo = repo
1155 1155 self.source = source
1156 1156 self.url = url
1157 1157 self._tr = None
1158 1158
1159 1159 def transaction(self):
1160 1160 """Return an open transaction object, constructing if necessary"""
1161 1161 if not self._tr:
1162 1162 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1163 1163 self._tr = self.repo.transaction(trname)
1164 1164 self._tr.hookargs['source'] = self.source
1165 1165 self._tr.hookargs['url'] = self.url
1166 1166 return self._tr
1167 1167
1168 1168 def close(self):
1169 1169 """close transaction if created"""
1170 1170 if self._tr is not None:
1171 1171 self._tr.close()
1172 1172
1173 1173 def release(self):
1174 1174 """release transaction if created"""
1175 1175 if self._tr is not None:
1176 1176 self._tr.release()
1177 1177
1178 1178 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1179 1179 streamclonerequested=None):
1180 1180 """Fetch repository data from a remote.
1181 1181
1182 1182 This is the main function used to retrieve data from a remote repository.
1183 1183
1184 1184 ``repo`` is the local repository to clone into.
1185 1185 ``remote`` is a peer instance.
1186 1186 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1187 1187 default) means to pull everything from the remote.
1188 1188 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1189 1189 default, all remote bookmarks are pulled.
1190 1190 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1191 1191 initialization.
1192 1192 ``streamclonerequested`` is a boolean indicating whether a "streaming
1193 1193 clone" is requested. A "streaming clone" is essentially a raw file copy
1194 1194 of revlogs from the server. This only works when the local repository is
1195 1195 empty. The default value of ``None`` means to respect the server
1196 1196 configuration for preferring stream clones.
1197 1197
1198 1198 Returns the ``pulloperation`` created for this pull.
1199 1199 """
1200 1200 if opargs is None:
1201 1201 opargs = {}
1202 1202 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1203 1203 streamclonerequested=streamclonerequested, **opargs)
1204 1204 if pullop.remote.local():
1205 1205 missing = set(pullop.remote.requirements) - pullop.repo.supported
1206 1206 if missing:
1207 1207 msg = _("required features are not"
1208 1208 " supported in the destination:"
1209 1209 " %s") % (', '.join(sorted(missing)))
1210 1210 raise error.Abort(msg)
1211 1211
1212 1212 wlock = lock = None
1213 1213 try:
1214 1214 wlock = pullop.repo.wlock()
1215 1215 lock = pullop.repo.lock()
1216 1216 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1217 1217 streamclone.maybeperformlegacystreamclone(pullop)
1218 1218 # This should ideally be in _pullbundle2(). However, it needs to run
1219 1219 # before discovery to avoid extra work.
1220 1220 _maybeapplyclonebundle(pullop)
1221 1221 _pulldiscovery(pullop)
1222 1222 if pullop.canusebundle2:
1223 1223 _pullbundle2(pullop)
1224 1224 _pullchangeset(pullop)
1225 1225 _pullphase(pullop)
1226 1226 _pullbookmarks(pullop)
1227 1227 _pullobsolete(pullop)
1228 1228 pullop.trmanager.close()
1229 1229 finally:
1230 1230 lockmod.release(pullop.trmanager, lock, wlock)
1231 1231
1232 1232 return pullop
1233 1233
1234 1234 # list of steps to perform discovery before pull
1235 1235 pulldiscoveryorder = []
1236 1236
1237 1237 # Mapping between step name and function
1238 1238 #
1239 1239 # This exists to help extensions wrap steps if necessary
1240 1240 pulldiscoverymapping = {}
1241 1241
1242 1242 def pulldiscovery(stepname):
1243 1243 """decorator for function performing discovery before pull
1244 1244
1245 1245 The function is added to the step -> function mapping and appended to the
1246 1246 list of steps. Beware that decorated function will be added in order (this
1247 1247 may matter).
1248 1248
1249 1249 You can only use this decorator for a new step, if you want to wrap a step
1250 1250 from an extension, change the pulldiscovery dictionary directly."""
1251 1251 def dec(func):
1252 1252 assert stepname not in pulldiscoverymapping
1253 1253 pulldiscoverymapping[stepname] = func
1254 1254 pulldiscoveryorder.append(stepname)
1255 1255 return func
1256 1256 return dec
1257 1257
1258 1258 def _pulldiscovery(pullop):
1259 1259 """Run all discovery steps"""
1260 1260 for stepname in pulldiscoveryorder:
1261 1261 step = pulldiscoverymapping[stepname]
1262 1262 step(pullop)
1263 1263
1264 1264 @pulldiscovery('b1:bookmarks')
1265 1265 def _pullbookmarkbundle1(pullop):
1266 1266 """fetch bookmark data in bundle1 case
1267 1267
1268 1268 If not using bundle2, we have to fetch bookmarks before changeset
1269 1269 discovery to reduce the chance and impact of race conditions."""
1270 1270 if pullop.remotebookmarks is not None:
1271 1271 return
1272 1272 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1273 1273 # all known bundle2 servers now support listkeys, but lets be nice with
1274 1274 # new implementation.
1275 1275 return
1276 1276 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1277 1277
1278 1278
1279 1279 @pulldiscovery('changegroup')
1280 1280 def _pulldiscoverychangegroup(pullop):
1281 1281 """discovery phase for the pull
1282 1282
1283 1283 Current handle changeset discovery only, will change handle all discovery
1284 1284 at some point."""
1285 1285 tmp = discovery.findcommonincoming(pullop.repo,
1286 1286 pullop.remote,
1287 1287 heads=pullop.heads,
1288 1288 force=pullop.force)
1289 1289 common, fetch, rheads = tmp
1290 1290 nm = pullop.repo.unfiltered().changelog.nodemap
1291 1291 if fetch and rheads:
1292 1292 # If a remote heads in filtered locally, lets drop it from the unknown
1293 1293 # remote heads and put in back in common.
1294 1294 #
1295 1295 # This is a hackish solution to catch most of "common but locally
1296 1296 # hidden situation". We do not performs discovery on unfiltered
1297 1297 # repository because it end up doing a pathological amount of round
1298 1298 # trip for w huge amount of changeset we do not care about.
1299 1299 #
1300 1300 # If a set of such "common but filtered" changeset exist on the server
1301 1301 # but are not including a remote heads, we'll not be able to detect it,
1302 1302 scommon = set(common)
1303 1303 filteredrheads = []
1304 1304 for n in rheads:
1305 1305 if n in nm:
1306 1306 if n not in scommon:
1307 1307 common.append(n)
1308 1308 else:
1309 1309 filteredrheads.append(n)
1310 1310 if not filteredrheads:
1311 1311 fetch = []
1312 1312 rheads = filteredrheads
1313 1313 pullop.common = common
1314 1314 pullop.fetch = fetch
1315 1315 pullop.rheads = rheads
1316 1316
1317 1317 def _pullbundle2(pullop):
1318 1318 """pull data using bundle2
1319 1319
1320 1320 For now, the only supported data are changegroup."""
1321 1321 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1322 1322
1323 1323 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1324 1324
1325 1325 # pulling changegroup
1326 1326 pullop.stepsdone.add('changegroup')
1327 1327
1328 1328 kwargs['common'] = pullop.common
1329 1329 kwargs['heads'] = pullop.heads or pullop.rheads
1330 1330 kwargs['cg'] = pullop.fetch
1331 1331 if 'listkeys' in pullop.remotebundle2caps:
1332 1332 kwargs['listkeys'] = ['phases']
1333 1333 if pullop.remotebookmarks is None:
1334 1334 # make sure to always includes bookmark data when migrating
1335 1335 # `hg incoming --bundle` to using this function.
1336 1336 kwargs['listkeys'].append('bookmarks')
1337 1337
1338 1338 # If this is a full pull / clone and the server supports the clone bundles
1339 1339 # feature, tell the server whether we attempted a clone bundle. The
1340 1340 # presence of this flag indicates the client supports clone bundles. This
1341 1341 # will enable the server to treat clients that support clone bundles
1342 1342 # differently from those that don't.
1343 1343 if (pullop.remote.capable('clonebundles')
1344 1344 and pullop.heads is None and list(pullop.common) == [nullid]):
1345 1345 kwargs['cbattempted'] = pullop.clonebundleattempted
1346 1346
1347 1347 if streaming:
1348 1348 pullop.repo.ui.status(_('streaming all changes\n'))
1349 1349 elif not pullop.fetch:
1350 1350 pullop.repo.ui.status(_("no changes found\n"))
1351 1351 pullop.cgresult = 0
1352 1352 else:
1353 1353 if pullop.heads is None and list(pullop.common) == [nullid]:
1354 1354 pullop.repo.ui.status(_("requesting all changes\n"))
1355 1355 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1356 1356 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1357 1357 if obsolete.commonversion(remoteversions) is not None:
1358 1358 kwargs['obsmarkers'] = True
1359 1359 pullop.stepsdone.add('obsmarkers')
1360 1360 _pullbundle2extraprepare(pullop, kwargs)
1361 1361 bundle = pullop.remote.getbundle('pull', **kwargs)
1362 1362 try:
1363 1363 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1364 1364 except error.BundleValueError as exc:
1365 1365 raise error.Abort(_('missing support for %s') % exc)
1366 1366
1367 1367 if pullop.fetch:
1368 1368 results = [cg['return'] for cg in op.records['changegroup']]
1369 1369 pullop.cgresult = changegroup.combineresults(results)
1370 1370
1371 1371 # processing phases change
1372 1372 for namespace, value in op.records['listkeys']:
1373 1373 if namespace == 'phases':
1374 1374 _pullapplyphases(pullop, value)
1375 1375
1376 1376 # processing bookmark update
1377 1377 for namespace, value in op.records['listkeys']:
1378 1378 if namespace == 'bookmarks':
1379 1379 pullop.remotebookmarks = value
1380 1380
1381 1381 # bookmark data were either already there or pulled in the bundle
1382 1382 if pullop.remotebookmarks is not None:
1383 1383 _pullbookmarks(pullop)
1384 1384
1385 1385 def _pullbundle2extraprepare(pullop, kwargs):
1386 1386 """hook function so that extensions can extend the getbundle call"""
1387 1387 pass
1388 1388
1389 1389 def _pullchangeset(pullop):
1390 1390 """pull changeset from unbundle into the local repo"""
1391 1391 # We delay the open of the transaction as late as possible so we
1392 1392 # don't open transaction for nothing or you break future useful
1393 1393 # rollback call
1394 1394 if 'changegroup' in pullop.stepsdone:
1395 1395 return
1396 1396 pullop.stepsdone.add('changegroup')
1397 1397 if not pullop.fetch:
1398 1398 pullop.repo.ui.status(_("no changes found\n"))
1399 1399 pullop.cgresult = 0
1400 1400 return
1401 1401 pullop.gettransaction()
1402 1402 if pullop.heads is None and list(pullop.common) == [nullid]:
1403 1403 pullop.repo.ui.status(_("requesting all changes\n"))
1404 1404 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1405 1405 # issue1320, avoid a race if remote changed after discovery
1406 1406 pullop.heads = pullop.rheads
1407 1407
1408 1408 if pullop.remote.capable('getbundle'):
1409 1409 # TODO: get bundlecaps from remote
1410 1410 cg = pullop.remote.getbundle('pull', common=pullop.common,
1411 1411 heads=pullop.heads or pullop.rheads)
1412 1412 elif pullop.heads is None:
1413 1413 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1414 1414 elif not pullop.remote.capable('changegroupsubset'):
1415 1415 raise error.Abort(_("partial pull cannot be done because "
1416 1416 "other repository doesn't support "
1417 1417 "changegroupsubset."))
1418 1418 else:
1419 1419 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1420 1420 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1421 1421
1422 1422 def _pullphase(pullop):
1423 1423 # Get remote phases data from remote
1424 1424 if 'phases' in pullop.stepsdone:
1425 1425 return
1426 1426 remotephases = pullop.remote.listkeys('phases')
1427 1427 _pullapplyphases(pullop, remotephases)
1428 1428
1429 1429 def _pullapplyphases(pullop, remotephases):
1430 1430 """apply phase movement from observed remote state"""
1431 1431 if 'phases' in pullop.stepsdone:
1432 1432 return
1433 1433 pullop.stepsdone.add('phases')
1434 1434 publishing = bool(remotephases.get('publishing', False))
1435 1435 if remotephases and not publishing:
1436 1436 # remote is new and non-publishing
1437 1437 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1438 1438 pullop.pulledsubset,
1439 1439 remotephases)
1440 1440 dheads = pullop.pulledsubset
1441 1441 else:
1442 1442 # Remote is old or publishing all common changesets
1443 1443 # should be seen as public
1444 1444 pheads = pullop.pulledsubset
1445 1445 dheads = []
1446 1446 unfi = pullop.repo.unfiltered()
1447 1447 phase = unfi._phasecache.phase
1448 1448 rev = unfi.changelog.nodemap.get
1449 1449 public = phases.public
1450 1450 draft = phases.draft
1451 1451
1452 1452 # exclude changesets already public locally and update the others
1453 1453 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1454 1454 if pheads:
1455 1455 tr = pullop.gettransaction()
1456 1456 phases.advanceboundary(pullop.repo, tr, public, pheads)
1457 1457
1458 1458 # exclude changesets already draft locally and update the others
1459 1459 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1460 1460 if dheads:
1461 1461 tr = pullop.gettransaction()
1462 1462 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1463 1463
1464 1464 def _pullbookmarks(pullop):
1465 1465 """process the remote bookmark information to update the local one"""
1466 1466 if 'bookmarks' in pullop.stepsdone:
1467 1467 return
1468 1468 pullop.stepsdone.add('bookmarks')
1469 1469 repo = pullop.repo
1470 1470 remotebookmarks = pullop.remotebookmarks
1471 1471 remotebookmarks = bookmod.unhexlifybookmarks(remotebookmarks)
1472 1472 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1473 1473 pullop.remote.url(),
1474 1474 pullop.gettransaction,
1475 1475 explicit=pullop.explicitbookmarks)
1476 1476
1477 1477 def _pullobsolete(pullop):
1478 1478 """utility function to pull obsolete markers from a remote
1479 1479
1480 1480 The `gettransaction` is function that return the pull transaction, creating
1481 1481 one if necessary. We return the transaction to inform the calling code that
1482 1482 a new transaction have been created (when applicable).
1483 1483
1484 1484 Exists mostly to allow overriding for experimentation purpose"""
1485 1485 if 'obsmarkers' in pullop.stepsdone:
1486 1486 return
1487 1487 pullop.stepsdone.add('obsmarkers')
1488 1488 tr = None
1489 1489 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1490 1490 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1491 1491 remoteobs = pullop.remote.listkeys('obsolete')
1492 1492 if 'dump0' in remoteobs:
1493 1493 tr = pullop.gettransaction()
1494 1494 markers = []
1495 1495 for key in sorted(remoteobs, reverse=True):
1496 1496 if key.startswith('dump'):
1497 1497 data = base85.b85decode(remoteobs[key])
1498 1498 version, newmarks = obsolete._readmarkers(data)
1499 1499 markers += newmarks
1500 1500 if markers:
1501 1501 pullop.repo.obsstore.add(tr, markers)
1502 1502 pullop.repo.invalidatevolatilesets()
1503 1503 return tr
1504 1504
1505 1505 def caps20to10(repo):
1506 1506 """return a set with appropriate options to use bundle20 during getbundle"""
1507 1507 caps = set(['HG20'])
1508 1508 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1509 1509 caps.add('bundle2=' + urlreq.quote(capsblob))
1510 1510 return caps
1511 1511
1512 1512 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1513 1513 getbundle2partsorder = []
1514 1514
1515 1515 # Mapping between step name and function
1516 1516 #
1517 1517 # This exists to help extensions wrap steps if necessary
1518 1518 getbundle2partsmapping = {}
1519 1519
1520 1520 def getbundle2partsgenerator(stepname, idx=None):
1521 1521 """decorator for function generating bundle2 part for getbundle
1522 1522
1523 1523 The function is added to the step -> function mapping and appended to the
1524 1524 list of steps. Beware that decorated functions will be added in order
1525 1525 (this may matter).
1526 1526
1527 1527 You can only use this decorator for new steps, if you want to wrap a step
1528 1528 from an extension, attack the getbundle2partsmapping dictionary directly."""
1529 1529 def dec(func):
1530 1530 assert stepname not in getbundle2partsmapping
1531 1531 getbundle2partsmapping[stepname] = func
1532 1532 if idx is None:
1533 1533 getbundle2partsorder.append(stepname)
1534 1534 else:
1535 1535 getbundle2partsorder.insert(idx, stepname)
1536 1536 return func
1537 1537 return dec
1538 1538
1539 1539 def bundle2requested(bundlecaps):
1540 1540 if bundlecaps is not None:
1541 1541 return any(cap.startswith('HG2') for cap in bundlecaps)
1542 1542 return False
1543 1543
1544 1544 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1545 1545 **kwargs):
1546 1546 """Return chunks constituting a bundle's raw data.
1547 1547
1548 1548 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1549 1549 passed.
1550 1550
1551 1551 Returns an iterator over raw chunks (of varying sizes).
1552 1552 """
1553 1553 usebundle2 = bundle2requested(bundlecaps)
1554 1554 # bundle10 case
1555 1555 if not usebundle2:
1556 1556 if bundlecaps and not kwargs.get('cg', True):
1557 1557 raise ValueError(_('request for bundle10 must include changegroup'))
1558 1558
1559 1559 if kwargs:
1560 1560 raise ValueError(_('unsupported getbundle arguments: %s')
1561 1561 % ', '.join(sorted(kwargs.keys())))
1562 1562 outgoing = _computeoutgoing(repo, heads, common)
1563 1563 bundler = changegroup.getbundler('01', repo, bundlecaps)
1564 1564 return changegroup.getsubsetraw(repo, outgoing, bundler, source)
1565 1565
1566 1566 # bundle20 case
1567 1567 b2caps = {}
1568 1568 for bcaps in bundlecaps:
1569 1569 if bcaps.startswith('bundle2='):
1570 1570 blob = urlreq.unquote(bcaps[len('bundle2='):])
1571 1571 b2caps.update(bundle2.decodecaps(blob))
1572 1572 bundler = bundle2.bundle20(repo.ui, b2caps)
1573 1573
1574 1574 kwargs['heads'] = heads
1575 1575 kwargs['common'] = common
1576 1576
1577 1577 for name in getbundle2partsorder:
1578 1578 func = getbundle2partsmapping[name]
1579 1579 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1580 1580 **kwargs)
1581 1581
1582 1582 return bundler.getchunks()
1583 1583
1584 1584 @getbundle2partsgenerator('changegroup')
1585 1585 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1586 1586 b2caps=None, heads=None, common=None, **kwargs):
1587 1587 """add a changegroup part to the requested bundle"""
1588 1588 cg = None
1589 1589 if kwargs.get('cg', True):
1590 1590 # build changegroup bundle here.
1591 1591 version = '01'
1592 1592 cgversions = b2caps.get('changegroup')
1593 1593 if cgversions: # 3.1 and 3.2 ship with an empty value
1594 1594 cgversions = [v for v in cgversions
1595 1595 if v in changegroup.supportedoutgoingversions(repo)]
1596 1596 if not cgversions:
1597 1597 raise ValueError(_('no common changegroup version'))
1598 1598 version = max(cgversions)
1599 1599 outgoing = _computeoutgoing(repo, heads, common)
1600 1600 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1601 1601 bundlecaps=bundlecaps,
1602 1602 version=version)
1603 1603
1604 1604 if cg:
1605 1605 part = bundler.newpart('changegroup', data=cg)
1606 1606 if cgversions:
1607 1607 part.addparam('version', version)
1608 1608 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1609 1609 if 'treemanifest' in repo.requirements:
1610 1610 part.addparam('treemanifest', '1')
1611 1611
1612 1612 @getbundle2partsgenerator('listkeys')
1613 1613 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1614 1614 b2caps=None, **kwargs):
1615 1615 """add parts containing listkeys namespaces to the requested bundle"""
1616 1616 listkeys = kwargs.get('listkeys', ())
1617 1617 for namespace in listkeys:
1618 1618 part = bundler.newpart('listkeys')
1619 1619 part.addparam('namespace', namespace)
1620 1620 keys = repo.listkeys(namespace).items()
1621 1621 part.data = pushkey.encodekeys(keys)
1622 1622
1623 1623 @getbundle2partsgenerator('obsmarkers')
1624 1624 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1625 1625 b2caps=None, heads=None, **kwargs):
1626 1626 """add an obsolescence markers part to the requested bundle"""
1627 1627 if kwargs.get('obsmarkers', False):
1628 1628 if heads is None:
1629 1629 heads = repo.heads()
1630 1630 subset = [c.node() for c in repo.set('::%ln', heads)]
1631 1631 markers = repo.obsstore.relevantmarkers(subset)
1632 1632 markers = sorted(markers)
1633 1633 buildobsmarkerspart(bundler, markers)
1634 1634
1635 1635 @getbundle2partsgenerator('hgtagsfnodes')
1636 1636 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1637 1637 b2caps=None, heads=None, common=None,
1638 1638 **kwargs):
1639 1639 """Transfer the .hgtags filenodes mapping.
1640 1640
1641 1641 Only values for heads in this bundle will be transferred.
1642 1642
1643 1643 The part data consists of pairs of 20 byte changeset node and .hgtags
1644 1644 filenodes raw values.
1645 1645 """
1646 1646 # Don't send unless:
1647 1647 # - changeset are being exchanged,
1648 1648 # - the client supports it.
1649 1649 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1650 1650 return
1651 1651
1652 1652 outgoing = _computeoutgoing(repo, heads, common)
1653 1653
1654 1654 if not outgoing.missingheads:
1655 1655 return
1656 1656
1657 1657 cache = tags.hgtagsfnodescache(repo.unfiltered())
1658 1658 chunks = []
1659 1659
1660 1660 # .hgtags fnodes are only relevant for head changesets. While we could
1661 1661 # transfer values for all known nodes, there will likely be little to
1662 1662 # no benefit.
1663 1663 #
1664 1664 # We don't bother using a generator to produce output data because
1665 1665 # a) we only have 40 bytes per head and even esoteric numbers of heads
1666 1666 # consume little memory (1M heads is 40MB) b) we don't want to send the
1667 1667 # part if we don't have entries and knowing if we have entries requires
1668 1668 # cache lookups.
1669 1669 for node in outgoing.missingheads:
1670 1670 # Don't compute missing, as this may slow down serving.
1671 1671 fnode = cache.getfnode(node, computemissing=False)
1672 1672 if fnode is not None:
1673 1673 chunks.extend([node, fnode])
1674 1674
1675 1675 if chunks:
1676 1676 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1677 1677
1678 1678 def _getbookmarks(repo, **kwargs):
1679 1679 """Returns bookmark to node mapping.
1680 1680
1681 1681 This function is primarily used to generate `bookmarks` bundle2 part.
1682 1682 It is a separate function in order to make it easy to wrap it
1683 1683 in extensions. Passing `kwargs` to the function makes it easy to
1684 1684 add new parameters in extensions.
1685 1685 """
1686 1686
1687 1687 return dict(bookmod.listbinbookmarks(repo))
1688 1688
1689 1689 def check_heads(repo, their_heads, context):
1690 1690 """check if the heads of a repo have been modified
1691 1691
1692 1692 Used by peer for unbundling.
1693 1693 """
1694 1694 heads = repo.heads()
1695 1695 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1696 1696 if not (their_heads == ['force'] or their_heads == heads or
1697 1697 their_heads == ['hashed', heads_hash]):
1698 1698 # someone else committed/pushed/unbundled while we
1699 1699 # were transferring data
1700 1700 raise error.PushRaced('repository changed while %s - '
1701 1701 'please try again' % context)
1702 1702
1703 1703 def unbundle(repo, cg, heads, source, url):
1704 1704 """Apply a bundle to a repo.
1705 1705
1706 1706 this function makes sure the repo is locked during the application and have
1707 1707 mechanism to check that no push race occurred between the creation of the
1708 1708 bundle and its application.
1709 1709
1710 1710 If the push was raced as PushRaced exception is raised."""
1711 1711 r = 0
1712 1712 # need a transaction when processing a bundle2 stream
1713 1713 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1714 1714 lockandtr = [None, None, None]
1715 1715 recordout = None
1716 1716 # quick fix for output mismatch with bundle2 in 3.4
1717 1717 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1718 1718 False)
1719 1719 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1720 1720 captureoutput = True
1721 1721 try:
1722 1722 # note: outside bundle1, 'heads' is expected to be empty and this
1723 1723 # 'check_heads' call wil be a no-op
1724 1724 check_heads(repo, heads, 'uploading changes')
1725 1725 # push can proceed
1726 if util.safehasattr(cg, 'params'):
1726 if not util.safehasattr(cg, 'params'):
1727 # legacy case: bundle1 (changegroup 01)
1728 lockandtr[1] = repo.lock()
1729 r = cg.apply(repo, source, url)
1730 else:
1727 1731 r = None
1728 1732 try:
1729 1733 def gettransaction():
1730 1734 if not lockandtr[2]:
1731 1735 lockandtr[0] = repo.wlock()
1732 1736 lockandtr[1] = repo.lock()
1733 1737 lockandtr[2] = repo.transaction(source)
1734 1738 lockandtr[2].hookargs['source'] = source
1735 1739 lockandtr[2].hookargs['url'] = url
1736 1740 lockandtr[2].hookargs['bundle2'] = '1'
1737 1741 return lockandtr[2]
1738 1742
1739 1743 # Do greedy locking by default until we're satisfied with lazy
1740 1744 # locking.
1741 1745 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1742 1746 gettransaction()
1743 1747
1744 1748 op = bundle2.bundleoperation(repo, gettransaction,
1745 1749 captureoutput=captureoutput)
1746 1750 try:
1747 1751 op = bundle2.processbundle(repo, cg, op=op)
1748 1752 finally:
1749 1753 r = op.reply
1750 1754 if captureoutput and r is not None:
1751 1755 repo.ui.pushbuffer(error=True, subproc=True)
1752 1756 def recordout(output):
1753 1757 r.newpart('output', data=output, mandatory=False)
1754 1758 if lockandtr[2] is not None:
1755 1759 lockandtr[2].close()
1756 1760 except BaseException as exc:
1757 1761 exc.duringunbundle2 = True
1758 1762 if captureoutput and r is not None:
1759 1763 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1760 1764 def recordout(output):
1761 1765 part = bundle2.bundlepart('output', data=output,
1762 1766 mandatory=False)
1763 1767 parts.append(part)
1764 1768 raise
1765 else:
1766 # legacy case: bundle1 (changegroup 01)
1767 lockandtr[1] = repo.lock()
1768 r = cg.apply(repo, source, url)
1769 1769 finally:
1770 1770 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1771 1771 if recordout is not None:
1772 1772 recordout(repo.ui.popbuffer())
1773 1773 return r
1774 1774
1775 1775 def _maybeapplyclonebundle(pullop):
1776 1776 """Apply a clone bundle from a remote, if possible."""
1777 1777
1778 1778 repo = pullop.repo
1779 1779 remote = pullop.remote
1780 1780
1781 1781 if not repo.ui.configbool('ui', 'clonebundles', True):
1782 1782 return
1783 1783
1784 1784 # Only run if local repo is empty.
1785 1785 if len(repo):
1786 1786 return
1787 1787
1788 1788 if pullop.heads:
1789 1789 return
1790 1790
1791 1791 if not remote.capable('clonebundles'):
1792 1792 return
1793 1793
1794 1794 res = remote._call('clonebundles')
1795 1795
1796 1796 # If we call the wire protocol command, that's good enough to record the
1797 1797 # attempt.
1798 1798 pullop.clonebundleattempted = True
1799 1799
1800 1800 entries = parseclonebundlesmanifest(repo, res)
1801 1801 if not entries:
1802 1802 repo.ui.note(_('no clone bundles available on remote; '
1803 1803 'falling back to regular clone\n'))
1804 1804 return
1805 1805
1806 1806 entries = filterclonebundleentries(repo, entries)
1807 1807 if not entries:
1808 1808 # There is a thundering herd concern here. However, if a server
1809 1809 # operator doesn't advertise bundles appropriate for its clients,
1810 1810 # they deserve what's coming. Furthermore, from a client's
1811 1811 # perspective, no automatic fallback would mean not being able to
1812 1812 # clone!
1813 1813 repo.ui.warn(_('no compatible clone bundles available on server; '
1814 1814 'falling back to regular clone\n'))
1815 1815 repo.ui.warn(_('(you may want to report this to the server '
1816 1816 'operator)\n'))
1817 1817 return
1818 1818
1819 1819 entries = sortclonebundleentries(repo.ui, entries)
1820 1820
1821 1821 url = entries[0]['URL']
1822 1822 repo.ui.status(_('applying clone bundle from %s\n') % url)
1823 1823 if trypullbundlefromurl(repo.ui, repo, url):
1824 1824 repo.ui.status(_('finished applying clone bundle\n'))
1825 1825 # Bundle failed.
1826 1826 #
1827 1827 # We abort by default to avoid the thundering herd of
1828 1828 # clients flooding a server that was expecting expensive
1829 1829 # clone load to be offloaded.
1830 1830 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1831 1831 repo.ui.warn(_('falling back to normal clone\n'))
1832 1832 else:
1833 1833 raise error.Abort(_('error applying bundle'),
1834 1834 hint=_('if this error persists, consider contacting '
1835 1835 'the server operator or disable clone '
1836 1836 'bundles via '
1837 1837 '"--config ui.clonebundles=false"'))
1838 1838
1839 1839 def parseclonebundlesmanifest(repo, s):
1840 1840 """Parses the raw text of a clone bundles manifest.
1841 1841
1842 1842 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1843 1843 to the URL and other keys are the attributes for the entry.
1844 1844 """
1845 1845 m = []
1846 1846 for line in s.splitlines():
1847 1847 fields = line.split()
1848 1848 if not fields:
1849 1849 continue
1850 1850 attrs = {'URL': fields[0]}
1851 1851 for rawattr in fields[1:]:
1852 1852 key, value = rawattr.split('=', 1)
1853 1853 key = urlreq.unquote(key)
1854 1854 value = urlreq.unquote(value)
1855 1855 attrs[key] = value
1856 1856
1857 1857 # Parse BUNDLESPEC into components. This makes client-side
1858 1858 # preferences easier to specify since you can prefer a single
1859 1859 # component of the BUNDLESPEC.
1860 1860 if key == 'BUNDLESPEC':
1861 1861 try:
1862 1862 comp, version, params = parsebundlespec(repo, value,
1863 1863 externalnames=True)
1864 1864 attrs['COMPRESSION'] = comp
1865 1865 attrs['VERSION'] = version
1866 1866 except error.InvalidBundleSpecification:
1867 1867 pass
1868 1868 except error.UnsupportedBundleSpecification:
1869 1869 pass
1870 1870
1871 1871 m.append(attrs)
1872 1872
1873 1873 return m
1874 1874
1875 1875 def filterclonebundleentries(repo, entries):
1876 1876 """Remove incompatible clone bundle manifest entries.
1877 1877
1878 1878 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1879 1879 and returns a new list consisting of only the entries that this client
1880 1880 should be able to apply.
1881 1881
1882 1882 There is no guarantee we'll be able to apply all returned entries because
1883 1883 the metadata we use to filter on may be missing or wrong.
1884 1884 """
1885 1885 newentries = []
1886 1886 for entry in entries:
1887 1887 spec = entry.get('BUNDLESPEC')
1888 1888 if spec:
1889 1889 try:
1890 1890 parsebundlespec(repo, spec, strict=True)
1891 1891 except error.InvalidBundleSpecification as e:
1892 1892 repo.ui.debug(str(e) + '\n')
1893 1893 continue
1894 1894 except error.UnsupportedBundleSpecification as e:
1895 1895 repo.ui.debug('filtering %s because unsupported bundle '
1896 1896 'spec: %s\n' % (entry['URL'], str(e)))
1897 1897 continue
1898 1898
1899 1899 if 'REQUIRESNI' in entry and not sslutil.hassni:
1900 1900 repo.ui.debug('filtering %s because SNI not supported\n' %
1901 1901 entry['URL'])
1902 1902 continue
1903 1903
1904 1904 newentries.append(entry)
1905 1905
1906 1906 return newentries
1907 1907
1908 1908 class clonebundleentry(object):
1909 1909 """Represents an item in a clone bundles manifest.
1910 1910
1911 1911 This rich class is needed to support sorting since sorted() in Python 3
1912 1912 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
1913 1913 won't work.
1914 1914 """
1915 1915
1916 1916 def __init__(self, value, prefers):
1917 1917 self.value = value
1918 1918 self.prefers = prefers
1919 1919
1920 1920 def _cmp(self, other):
1921 1921 for prefkey, prefvalue in self.prefers:
1922 1922 avalue = self.value.get(prefkey)
1923 1923 bvalue = other.value.get(prefkey)
1924 1924
1925 1925 # Special case for b missing attribute and a matches exactly.
1926 1926 if avalue is not None and bvalue is None and avalue == prefvalue:
1927 1927 return -1
1928 1928
1929 1929 # Special case for a missing attribute and b matches exactly.
1930 1930 if bvalue is not None and avalue is None and bvalue == prefvalue:
1931 1931 return 1
1932 1932
1933 1933 # We can't compare unless attribute present on both.
1934 1934 if avalue is None or bvalue is None:
1935 1935 continue
1936 1936
1937 1937 # Same values should fall back to next attribute.
1938 1938 if avalue == bvalue:
1939 1939 continue
1940 1940
1941 1941 # Exact matches come first.
1942 1942 if avalue == prefvalue:
1943 1943 return -1
1944 1944 if bvalue == prefvalue:
1945 1945 return 1
1946 1946
1947 1947 # Fall back to next attribute.
1948 1948 continue
1949 1949
1950 1950 # If we got here we couldn't sort by attributes and prefers. Fall
1951 1951 # back to index order.
1952 1952 return 0
1953 1953
1954 1954 def __lt__(self, other):
1955 1955 return self._cmp(other) < 0
1956 1956
1957 1957 def __gt__(self, other):
1958 1958 return self._cmp(other) > 0
1959 1959
1960 1960 def __eq__(self, other):
1961 1961 return self._cmp(other) == 0
1962 1962
1963 1963 def __le__(self, other):
1964 1964 return self._cmp(other) <= 0
1965 1965
1966 1966 def __ge__(self, other):
1967 1967 return self._cmp(other) >= 0
1968 1968
1969 1969 def __ne__(self, other):
1970 1970 return self._cmp(other) != 0
1971 1971
1972 1972 def sortclonebundleentries(ui, entries):
1973 1973 prefers = ui.configlist('ui', 'clonebundleprefers', default=[])
1974 1974 if not prefers:
1975 1975 return list(entries)
1976 1976
1977 1977 prefers = [p.split('=', 1) for p in prefers]
1978 1978
1979 1979 items = sorted(clonebundleentry(v, prefers) for v in entries)
1980 1980 return [i.value for i in items]
1981 1981
1982 1982 def trypullbundlefromurl(ui, repo, url):
1983 1983 """Attempt to apply a bundle from a URL."""
1984 1984 lock = repo.lock()
1985 1985 try:
1986 1986 tr = repo.transaction('bundleurl')
1987 1987 try:
1988 1988 try:
1989 1989 fh = urlmod.open(ui, url)
1990 1990 cg = readbundle(ui, fh, 'stream')
1991 1991
1992 1992 if isinstance(cg, bundle2.unbundle20):
1993 1993 bundle2.processbundle(repo, cg, lambda: tr)
1994 1994 elif isinstance(cg, streamclone.streamcloneapplier):
1995 1995 cg.apply(repo)
1996 1996 else:
1997 1997 cg.apply(repo, 'clonebundles', url)
1998 1998 tr.close()
1999 1999 return True
2000 2000 except urlerr.httperror as e:
2001 2001 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2002 2002 except urlerr.urlerror as e:
2003 2003 ui.warn(_('error fetching bundle: %s\n') % e.reason[1])
2004 2004
2005 2005 return False
2006 2006 finally:
2007 2007 tr.release()
2008 2008 finally:
2009 2009 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now