##// END OF EJS Templates
exchange: switch to usual way of testing for bundle2-ness...
Martin von Zweigbergk -
r32891:7e2eb964 default
parent child Browse files
Show More
@@ -1,2009 +1,2009 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 hex,
16 16 nullid,
17 17 )
18 18 from . import (
19 19 bookmarks as bookmod,
20 20 bundle2,
21 21 changegroup,
22 22 discovery,
23 23 error,
24 24 lock as lockmod,
25 25 obsolete,
26 26 phases,
27 27 pushkey,
28 28 scmutil,
29 29 sslutil,
30 30 streamclone,
31 31 url as urlmod,
32 32 util,
33 33 )
34 34
35 35 urlerr = util.urlerr
36 36 urlreq = util.urlreq
37 37
38 38 # Maps bundle version human names to changegroup versions.
39 39 _bundlespeccgversions = {'v1': '01',
40 40 'v2': '02',
41 41 'packed1': 's1',
42 42 'bundle2': '02', #legacy
43 43 }
44 44
45 45 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
46 46 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
47 47
48 48 def parsebundlespec(repo, spec, strict=True, externalnames=False):
49 49 """Parse a bundle string specification into parts.
50 50
51 51 Bundle specifications denote a well-defined bundle/exchange format.
52 52 The content of a given specification should not change over time in
53 53 order to ensure that bundles produced by a newer version of Mercurial are
54 54 readable from an older version.
55 55
56 56 The string currently has the form:
57 57
58 58 <compression>-<type>[;<parameter0>[;<parameter1>]]
59 59
60 60 Where <compression> is one of the supported compression formats
61 61 and <type> is (currently) a version string. A ";" can follow the type and
62 62 all text afterwards is interpreted as URI encoded, ";" delimited key=value
63 63 pairs.
64 64
65 65 If ``strict`` is True (the default) <compression> is required. Otherwise,
66 66 it is optional.
67 67
68 68 If ``externalnames`` is False (the default), the human-centric names will
69 69 be converted to their internal representation.
70 70
71 71 Returns a 3-tuple of (compression, version, parameters). Compression will
72 72 be ``None`` if not in strict mode and a compression isn't defined.
73 73
74 74 An ``InvalidBundleSpecification`` is raised when the specification is
75 75 not syntactically well formed.
76 76
77 77 An ``UnsupportedBundleSpecification`` is raised when the compression or
78 78 bundle type/version is not recognized.
79 79
80 80 Note: this function will likely eventually return a more complex data
81 81 structure, including bundle2 part information.
82 82 """
83 83 def parseparams(s):
84 84 if ';' not in s:
85 85 return s, {}
86 86
87 87 params = {}
88 88 version, paramstr = s.split(';', 1)
89 89
90 90 for p in paramstr.split(';'):
91 91 if '=' not in p:
92 92 raise error.InvalidBundleSpecification(
93 93 _('invalid bundle specification: '
94 94 'missing "=" in parameter: %s') % p)
95 95
96 96 key, value = p.split('=', 1)
97 97 key = urlreq.unquote(key)
98 98 value = urlreq.unquote(value)
99 99 params[key] = value
100 100
101 101 return version, params
102 102
103 103
104 104 if strict and '-' not in spec:
105 105 raise error.InvalidBundleSpecification(
106 106 _('invalid bundle specification; '
107 107 'must be prefixed with compression: %s') % spec)
108 108
109 109 if '-' in spec:
110 110 compression, version = spec.split('-', 1)
111 111
112 112 if compression not in util.compengines.supportedbundlenames:
113 113 raise error.UnsupportedBundleSpecification(
114 114 _('%s compression is not supported') % compression)
115 115
116 116 version, params = parseparams(version)
117 117
118 118 if version not in _bundlespeccgversions:
119 119 raise error.UnsupportedBundleSpecification(
120 120 _('%s is not a recognized bundle version') % version)
121 121 else:
122 122 # Value could be just the compression or just the version, in which
123 123 # case some defaults are assumed (but only when not in strict mode).
124 124 assert not strict
125 125
126 126 spec, params = parseparams(spec)
127 127
128 128 if spec in util.compengines.supportedbundlenames:
129 129 compression = spec
130 130 version = 'v1'
131 131 # Generaldelta repos require v2.
132 132 if 'generaldelta' in repo.requirements:
133 133 version = 'v2'
134 134 # Modern compression engines require v2.
135 135 if compression not in _bundlespecv1compengines:
136 136 version = 'v2'
137 137 elif spec in _bundlespeccgversions:
138 138 if spec == 'packed1':
139 139 compression = 'none'
140 140 else:
141 141 compression = 'bzip2'
142 142 version = spec
143 143 else:
144 144 raise error.UnsupportedBundleSpecification(
145 145 _('%s is not a recognized bundle specification') % spec)
146 146
147 147 # Bundle version 1 only supports a known set of compression engines.
148 148 if version == 'v1' and compression not in _bundlespecv1compengines:
149 149 raise error.UnsupportedBundleSpecification(
150 150 _('compression engine %s is not supported on v1 bundles') %
151 151 compression)
152 152
153 153 # The specification for packed1 can optionally declare the data formats
154 154 # required to apply it. If we see this metadata, compare against what the
155 155 # repo supports and error if the bundle isn't compatible.
156 156 if version == 'packed1' and 'requirements' in params:
157 157 requirements = set(params['requirements'].split(','))
158 158 missingreqs = requirements - repo.supportedformats
159 159 if missingreqs:
160 160 raise error.UnsupportedBundleSpecification(
161 161 _('missing support for repository features: %s') %
162 162 ', '.join(sorted(missingreqs)))
163 163
164 164 if not externalnames:
165 165 engine = util.compengines.forbundlename(compression)
166 166 compression = engine.bundletype()[1]
167 167 version = _bundlespeccgversions[version]
168 168 return compression, version, params
169 169
170 170 def readbundle(ui, fh, fname, vfs=None):
171 171 header = changegroup.readexactly(fh, 4)
172 172
173 173 alg = None
174 174 if not fname:
175 175 fname = "stream"
176 176 if not header.startswith('HG') and header.startswith('\0'):
177 177 fh = changegroup.headerlessfixup(fh, header)
178 178 header = "HG10"
179 179 alg = 'UN'
180 180 elif vfs:
181 181 fname = vfs.join(fname)
182 182
183 183 magic, version = header[0:2], header[2:4]
184 184
185 185 if magic != 'HG':
186 186 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
187 187 if version == '10':
188 188 if alg is None:
189 189 alg = changegroup.readexactly(fh, 2)
190 190 return changegroup.cg1unpacker(fh, alg)
191 191 elif version.startswith('2'):
192 192 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
193 193 elif version == 'S1':
194 194 return streamclone.streamcloneapplier(fh)
195 195 else:
196 196 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
197 197
198 198 def getbundlespec(ui, fh):
199 199 """Infer the bundlespec from a bundle file handle.
200 200
201 201 The input file handle is seeked and the original seek position is not
202 202 restored.
203 203 """
204 204 def speccompression(alg):
205 205 try:
206 206 return util.compengines.forbundletype(alg).bundletype()[0]
207 207 except KeyError:
208 208 return None
209 209
210 210 b = readbundle(ui, fh, None)
211 211 if isinstance(b, changegroup.cg1unpacker):
212 212 alg = b._type
213 213 if alg == '_truncatedBZ':
214 214 alg = 'BZ'
215 215 comp = speccompression(alg)
216 216 if not comp:
217 217 raise error.Abort(_('unknown compression algorithm: %s') % alg)
218 218 return '%s-v1' % comp
219 219 elif isinstance(b, bundle2.unbundle20):
220 220 if 'Compression' in b.params:
221 221 comp = speccompression(b.params['Compression'])
222 222 if not comp:
223 223 raise error.Abort(_('unknown compression algorithm: %s') % comp)
224 224 else:
225 225 comp = 'none'
226 226
227 227 version = None
228 228 for part in b.iterparts():
229 229 if part.type == 'changegroup':
230 230 version = part.params['version']
231 231 if version in ('01', '02'):
232 232 version = 'v2'
233 233 else:
234 234 raise error.Abort(_('changegroup version %s does not have '
235 235 'a known bundlespec') % version,
236 236 hint=_('try upgrading your Mercurial '
237 237 'client'))
238 238
239 239 if not version:
240 240 raise error.Abort(_('could not identify changegroup version in '
241 241 'bundle'))
242 242
243 243 return '%s-%s' % (comp, version)
244 244 elif isinstance(b, streamclone.streamcloneapplier):
245 245 requirements = streamclone.readbundle1header(fh)[2]
246 246 params = 'requirements=%s' % ','.join(sorted(requirements))
247 247 return 'none-packed1;%s' % urlreq.quote(params)
248 248 else:
249 249 raise error.Abort(_('unknown bundle type: %s') % b)
250 250
251 251 def _computeoutgoing(repo, heads, common):
252 252 """Computes which revs are outgoing given a set of common
253 253 and a set of heads.
254 254
255 255 This is a separate function so extensions can have access to
256 256 the logic.
257 257
258 258 Returns a discovery.outgoing object.
259 259 """
260 260 cl = repo.changelog
261 261 if common:
262 262 hasnode = cl.hasnode
263 263 common = [n for n in common if hasnode(n)]
264 264 else:
265 265 common = [nullid]
266 266 if not heads:
267 267 heads = cl.heads()
268 268 return discovery.outgoing(repo, common, heads)
269 269
270 270 def _forcebundle1(op):
271 271 """return true if a pull/push must use bundle1
272 272
273 273 This function is used to allow testing of the older bundle version"""
274 274 ui = op.repo.ui
275 275 forcebundle1 = False
276 276 # The goal is this config is to allow developer to choose the bundle
277 277 # version used during exchanged. This is especially handy during test.
278 278 # Value is a list of bundle version to be picked from, highest version
279 279 # should be used.
280 280 #
281 281 # developer config: devel.legacy.exchange
282 282 exchange = ui.configlist('devel', 'legacy.exchange')
283 283 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
284 284 return forcebundle1 or not op.remote.capable('bundle2')
285 285
286 286 class pushoperation(object):
287 287 """A object that represent a single push operation
288 288
289 289 Its purpose is to carry push related state and very common operations.
290 290
291 291 A new pushoperation should be created at the beginning of each push and
292 292 discarded afterward.
293 293 """
294 294
295 295 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
296 296 bookmarks=()):
297 297 # repo we push from
298 298 self.repo = repo
299 299 self.ui = repo.ui
300 300 # repo we push to
301 301 self.remote = remote
302 302 # force option provided
303 303 self.force = force
304 304 # revs to be pushed (None is "all")
305 305 self.revs = revs
306 306 # bookmark explicitly pushed
307 307 self.bookmarks = bookmarks
308 308 # allow push of new branch
309 309 self.newbranch = newbranch
310 310 # did a local lock get acquired?
311 311 self.locallocked = None
312 312 # step already performed
313 313 # (used to check what steps have been already performed through bundle2)
314 314 self.stepsdone = set()
315 315 # Integer version of the changegroup push result
316 316 # - None means nothing to push
317 317 # - 0 means HTTP error
318 318 # - 1 means we pushed and remote head count is unchanged *or*
319 319 # we have outgoing changesets but refused to push
320 320 # - other values as described by addchangegroup()
321 321 self.cgresult = None
322 322 # Boolean value for the bookmark push
323 323 self.bkresult = None
324 324 # discover.outgoing object (contains common and outgoing data)
325 325 self.outgoing = None
326 326 # all remote topological heads before the push
327 327 self.remoteheads = None
328 328 # Details of the remote branch pre and post push
329 329 #
330 330 # mapping: {'branch': ([remoteheads],
331 331 # [newheads],
332 332 # [unsyncedheads],
333 333 # [discardedheads])}
334 334 # - branch: the branch name
335 335 # - remoteheads: the list of remote heads known locally
336 336 # None if the branch is new
337 337 # - newheads: the new remote heads (known locally) with outgoing pushed
338 338 # - unsyncedheads: the list of remote heads unknown locally.
339 339 # - discardedheads: the list of remote heads made obsolete by the push
340 340 self.pushbranchmap = None
341 341 # testable as a boolean indicating if any nodes are missing locally.
342 342 self.incoming = None
343 343 # phases changes that must be pushed along side the changesets
344 344 self.outdatedphases = None
345 345 # phases changes that must be pushed if changeset push fails
346 346 self.fallbackoutdatedphases = None
347 347 # outgoing obsmarkers
348 348 self.outobsmarkers = set()
349 349 # outgoing bookmarks
350 350 self.outbookmarks = []
351 351 # transaction manager
352 352 self.trmanager = None
353 353 # map { pushkey partid -> callback handling failure}
354 354 # used to handle exception from mandatory pushkey part failure
355 355 self.pkfailcb = {}
356 356
357 357 @util.propertycache
358 358 def futureheads(self):
359 359 """future remote heads if the changeset push succeeds"""
360 360 return self.outgoing.missingheads
361 361
362 362 @util.propertycache
363 363 def fallbackheads(self):
364 364 """future remote heads if the changeset push fails"""
365 365 if self.revs is None:
366 366 # not target to push, all common are relevant
367 367 return self.outgoing.commonheads
368 368 unfi = self.repo.unfiltered()
369 369 # I want cheads = heads(::missingheads and ::commonheads)
370 370 # (missingheads is revs with secret changeset filtered out)
371 371 #
372 372 # This can be expressed as:
373 373 # cheads = ( (missingheads and ::commonheads)
374 374 # + (commonheads and ::missingheads))"
375 375 # )
376 376 #
377 377 # while trying to push we already computed the following:
378 378 # common = (::commonheads)
379 379 # missing = ((commonheads::missingheads) - commonheads)
380 380 #
381 381 # We can pick:
382 382 # * missingheads part of common (::commonheads)
383 383 common = self.outgoing.common
384 384 nm = self.repo.changelog.nodemap
385 385 cheads = [node for node in self.revs if nm[node] in common]
386 386 # and
387 387 # * commonheads parents on missing
388 388 revset = unfi.set('%ln and parents(roots(%ln))',
389 389 self.outgoing.commonheads,
390 390 self.outgoing.missing)
391 391 cheads.extend(c.node() for c in revset)
392 392 return cheads
393 393
394 394 @property
395 395 def commonheads(self):
396 396 """set of all common heads after changeset bundle push"""
397 397 if self.cgresult:
398 398 return self.futureheads
399 399 else:
400 400 return self.fallbackheads
401 401
402 402 # mapping of message used when pushing bookmark
403 403 bookmsgmap = {'update': (_("updating bookmark %s\n"),
404 404 _('updating bookmark %s failed!\n')),
405 405 'export': (_("exporting bookmark %s\n"),
406 406 _('exporting bookmark %s failed!\n')),
407 407 'delete': (_("deleting remote bookmark %s\n"),
408 408 _('deleting remote bookmark %s failed!\n')),
409 409 }
410 410
411 411
412 412 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
413 413 opargs=None):
414 414 '''Push outgoing changesets (limited by revs) from a local
415 415 repository to remote. Return an integer:
416 416 - None means nothing to push
417 417 - 0 means HTTP error
418 418 - 1 means we pushed and remote head count is unchanged *or*
419 419 we have outgoing changesets but refused to push
420 420 - other values as described by addchangegroup()
421 421 '''
422 422 if opargs is None:
423 423 opargs = {}
424 424 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
425 425 **opargs)
426 426 if pushop.remote.local():
427 427 missing = (set(pushop.repo.requirements)
428 428 - pushop.remote.local().supported)
429 429 if missing:
430 430 msg = _("required features are not"
431 431 " supported in the destination:"
432 432 " %s") % (', '.join(sorted(missing)))
433 433 raise error.Abort(msg)
434 434
435 435 # there are two ways to push to remote repo:
436 436 #
437 437 # addchangegroup assumes local user can lock remote
438 438 # repo (local filesystem, old ssh servers).
439 439 #
440 440 # unbundle assumes local user cannot lock remote repo (new ssh
441 441 # servers, http servers).
442 442
443 443 if not pushop.remote.canpush():
444 444 raise error.Abort(_("destination does not support push"))
445 445 # get local lock as we might write phase data
446 446 localwlock = locallock = None
447 447 try:
448 448 # bundle2 push may receive a reply bundle touching bookmarks or other
449 449 # things requiring the wlock. Take it now to ensure proper ordering.
450 450 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
451 451 if (not _forcebundle1(pushop)) and maypushback:
452 452 localwlock = pushop.repo.wlock()
453 453 locallock = pushop.repo.lock()
454 454 pushop.locallocked = True
455 455 except IOError as err:
456 456 pushop.locallocked = False
457 457 if err.errno != errno.EACCES:
458 458 raise
459 459 # source repo cannot be locked.
460 460 # We do not abort the push, but just disable the local phase
461 461 # synchronisation.
462 462 msg = 'cannot lock source repository: %s\n' % err
463 463 pushop.ui.debug(msg)
464 464 try:
465 465 if pushop.locallocked:
466 466 pushop.trmanager = transactionmanager(pushop.repo,
467 467 'push-response',
468 468 pushop.remote.url())
469 469 pushop.repo.checkpush(pushop)
470 470 lock = None
471 471 unbundle = pushop.remote.capable('unbundle')
472 472 if not unbundle:
473 473 lock = pushop.remote.lock()
474 474 try:
475 475 _pushdiscovery(pushop)
476 476 if not _forcebundle1(pushop):
477 477 _pushbundle2(pushop)
478 478 _pushchangeset(pushop)
479 479 _pushsyncphase(pushop)
480 480 _pushobsolete(pushop)
481 481 _pushbookmark(pushop)
482 482 finally:
483 483 if lock is not None:
484 484 lock.release()
485 485 if pushop.trmanager:
486 486 pushop.trmanager.close()
487 487 finally:
488 488 if pushop.trmanager:
489 489 pushop.trmanager.release()
490 490 if locallock is not None:
491 491 locallock.release()
492 492 if localwlock is not None:
493 493 localwlock.release()
494 494
495 495 return pushop
496 496
497 497 # list of steps to perform discovery before push
498 498 pushdiscoveryorder = []
499 499
500 500 # Mapping between step name and function
501 501 #
502 502 # This exists to help extensions wrap steps if necessary
503 503 pushdiscoverymapping = {}
504 504
505 505 def pushdiscovery(stepname):
506 506 """decorator for function performing discovery before push
507 507
508 508 The function is added to the step -> function mapping and appended to the
509 509 list of steps. Beware that decorated function will be added in order (this
510 510 may matter).
511 511
512 512 You can only use this decorator for a new step, if you want to wrap a step
513 513 from an extension, change the pushdiscovery dictionary directly."""
514 514 def dec(func):
515 515 assert stepname not in pushdiscoverymapping
516 516 pushdiscoverymapping[stepname] = func
517 517 pushdiscoveryorder.append(stepname)
518 518 return func
519 519 return dec
520 520
521 521 def _pushdiscovery(pushop):
522 522 """Run all discovery steps"""
523 523 for stepname in pushdiscoveryorder:
524 524 step = pushdiscoverymapping[stepname]
525 525 step(pushop)
526 526
527 527 @pushdiscovery('changeset')
528 528 def _pushdiscoverychangeset(pushop):
529 529 """discover the changeset that need to be pushed"""
530 530 fci = discovery.findcommonincoming
531 531 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
532 532 common, inc, remoteheads = commoninc
533 533 fco = discovery.findcommonoutgoing
534 534 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
535 535 commoninc=commoninc, force=pushop.force)
536 536 pushop.outgoing = outgoing
537 537 pushop.remoteheads = remoteheads
538 538 pushop.incoming = inc
539 539
540 540 @pushdiscovery('phase')
541 541 def _pushdiscoveryphase(pushop):
542 542 """discover the phase that needs to be pushed
543 543
544 544 (computed for both success and failure case for changesets push)"""
545 545 outgoing = pushop.outgoing
546 546 unfi = pushop.repo.unfiltered()
547 547 remotephases = pushop.remote.listkeys('phases')
548 548 publishing = remotephases.get('publishing', False)
549 549 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
550 550 and remotephases # server supports phases
551 551 and not pushop.outgoing.missing # no changesets to be pushed
552 552 and publishing):
553 553 # When:
554 554 # - this is a subrepo push
555 555 # - and remote support phase
556 556 # - and no changeset are to be pushed
557 557 # - and remote is publishing
558 558 # We may be in issue 3871 case!
559 559 # We drop the possible phase synchronisation done by
560 560 # courtesy to publish changesets possibly locally draft
561 561 # on the remote.
562 562 remotephases = {'publishing': 'True'}
563 563 ana = phases.analyzeremotephases(pushop.repo,
564 564 pushop.fallbackheads,
565 565 remotephases)
566 566 pheads, droots = ana
567 567 extracond = ''
568 568 if not publishing:
569 569 extracond = ' and public()'
570 570 revset = 'heads((%%ln::%%ln) %s)' % extracond
571 571 # Get the list of all revs draft on remote by public here.
572 572 # XXX Beware that revset break if droots is not strictly
573 573 # XXX root we may want to ensure it is but it is costly
574 574 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
575 575 if not outgoing.missing:
576 576 future = fallback
577 577 else:
578 578 # adds changeset we are going to push as draft
579 579 #
580 580 # should not be necessary for publishing server, but because of an
581 581 # issue fixed in xxxxx we have to do it anyway.
582 582 fdroots = list(unfi.set('roots(%ln + %ln::)',
583 583 outgoing.missing, droots))
584 584 fdroots = [f.node() for f in fdroots]
585 585 future = list(unfi.set(revset, fdroots, pushop.futureheads))
586 586 pushop.outdatedphases = future
587 587 pushop.fallbackoutdatedphases = fallback
588 588
589 589 @pushdiscovery('obsmarker')
590 590 def _pushdiscoveryobsmarkers(pushop):
591 591 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
592 592 and pushop.repo.obsstore
593 593 and 'obsolete' in pushop.remote.listkeys('namespaces')):
594 594 repo = pushop.repo
595 595 # very naive computation, that can be quite expensive on big repo.
596 596 # However: evolution is currently slow on them anyway.
597 597 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
598 598 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
599 599
600 600 @pushdiscovery('bookmarks')
601 601 def _pushdiscoverybookmarks(pushop):
602 602 ui = pushop.ui
603 603 repo = pushop.repo.unfiltered()
604 604 remote = pushop.remote
605 605 ui.debug("checking for updated bookmarks\n")
606 606 ancestors = ()
607 607 if pushop.revs:
608 608 revnums = map(repo.changelog.rev, pushop.revs)
609 609 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
610 610 remotebookmark = remote.listkeys('bookmarks')
611 611
612 612 explicit = set([repo._bookmarks.expandname(bookmark)
613 613 for bookmark in pushop.bookmarks])
614 614
615 615 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
616 616 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
617 617
618 618 def safehex(x):
619 619 if x is None:
620 620 return x
621 621 return hex(x)
622 622
623 623 def hexifycompbookmarks(bookmarks):
624 624 for b, scid, dcid in bookmarks:
625 625 yield b, safehex(scid), safehex(dcid)
626 626
627 627 comp = [hexifycompbookmarks(marks) for marks in comp]
628 628 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
629 629
630 630 for b, scid, dcid in advsrc:
631 631 if b in explicit:
632 632 explicit.remove(b)
633 633 if not ancestors or repo[scid].rev() in ancestors:
634 634 pushop.outbookmarks.append((b, dcid, scid))
635 635 # search added bookmark
636 636 for b, scid, dcid in addsrc:
637 637 if b in explicit:
638 638 explicit.remove(b)
639 639 pushop.outbookmarks.append((b, '', scid))
640 640 # search for overwritten bookmark
641 641 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
642 642 if b in explicit:
643 643 explicit.remove(b)
644 644 pushop.outbookmarks.append((b, dcid, scid))
645 645 # search for bookmark to delete
646 646 for b, scid, dcid in adddst:
647 647 if b in explicit:
648 648 explicit.remove(b)
649 649 # treat as "deleted locally"
650 650 pushop.outbookmarks.append((b, dcid, ''))
651 651 # identical bookmarks shouldn't get reported
652 652 for b, scid, dcid in same:
653 653 if b in explicit:
654 654 explicit.remove(b)
655 655
656 656 if explicit:
657 657 explicit = sorted(explicit)
658 658 # we should probably list all of them
659 659 ui.warn(_('bookmark %s does not exist on the local '
660 660 'or remote repository!\n') % explicit[0])
661 661 pushop.bkresult = 2
662 662
663 663 pushop.outbookmarks.sort()
664 664
665 665 def _pushcheckoutgoing(pushop):
666 666 outgoing = pushop.outgoing
667 667 unfi = pushop.repo.unfiltered()
668 668 if not outgoing.missing:
669 669 # nothing to push
670 670 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
671 671 return False
672 672 # something to push
673 673 if not pushop.force:
674 674 # if repo.obsstore == False --> no obsolete
675 675 # then, save the iteration
676 676 if unfi.obsstore:
677 677 # this message are here for 80 char limit reason
678 678 mso = _("push includes obsolete changeset: %s!")
679 679 mst = {"unstable": _("push includes unstable changeset: %s!"),
680 680 "bumped": _("push includes bumped changeset: %s!"),
681 681 "divergent": _("push includes divergent changeset: %s!")}
682 682 # If we are to push if there is at least one
683 683 # obsolete or unstable changeset in missing, at
684 684 # least one of the missinghead will be obsolete or
685 685 # unstable. So checking heads only is ok
686 686 for node in outgoing.missingheads:
687 687 ctx = unfi[node]
688 688 if ctx.obsolete():
689 689 raise error.Abort(mso % ctx)
690 690 elif ctx.troubled():
691 691 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
692 692
693 693 discovery.checkheads(pushop)
694 694 return True
695 695
696 696 # List of names of steps to perform for an outgoing bundle2, order matters.
697 697 b2partsgenorder = []
698 698
699 699 # Mapping between step name and function
700 700 #
701 701 # This exists to help extensions wrap steps if necessary
702 702 b2partsgenmapping = {}
703 703
704 704 def b2partsgenerator(stepname, idx=None):
705 705 """decorator for function generating bundle2 part
706 706
707 707 The function is added to the step -> function mapping and appended to the
708 708 list of steps. Beware that decorated functions will be added in order
709 709 (this may matter).
710 710
711 711 You can only use this decorator for new steps, if you want to wrap a step
712 712 from an extension, attack the b2partsgenmapping dictionary directly."""
713 713 def dec(func):
714 714 assert stepname not in b2partsgenmapping
715 715 b2partsgenmapping[stepname] = func
716 716 if idx is None:
717 717 b2partsgenorder.append(stepname)
718 718 else:
719 719 b2partsgenorder.insert(idx, stepname)
720 720 return func
721 721 return dec
722 722
723 723 def _pushb2ctxcheckheads(pushop, bundler):
724 724 """Generate race condition checking parts
725 725
726 726 Exists as an independent function to aid extensions
727 727 """
728 728 # * 'force' do not check for push race,
729 729 # * if we don't push anything, there are nothing to check.
730 730 if not pushop.force and pushop.outgoing.missingheads:
731 731 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
732 732 if not allowunrelated:
733 733 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
734 734 else:
735 735 affected = set()
736 736 for branch, heads in pushop.pushbranchmap.iteritems():
737 737 remoteheads, newheads, unsyncedheads, discardedheads = heads
738 738 if remoteheads is not None:
739 739 remote = set(remoteheads)
740 740 affected |= set(discardedheads) & remote
741 741 affected |= remote - set(newheads)
742 742 if affected:
743 743 data = iter(sorted(affected))
744 744 bundler.newpart('check:updated-heads', data=data)
745 745
746 746 @b2partsgenerator('changeset')
747 747 def _pushb2ctx(pushop, bundler):
748 748 """handle changegroup push through bundle2
749 749
750 750 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
751 751 """
752 752 if 'changesets' in pushop.stepsdone:
753 753 return
754 754 pushop.stepsdone.add('changesets')
755 755 # Send known heads to the server for race detection.
756 756 if not _pushcheckoutgoing(pushop):
757 757 return
758 758 pushop.repo.prepushoutgoinghooks(pushop)
759 759
760 760 _pushb2ctxcheckheads(pushop, bundler)
761 761
762 762 b2caps = bundle2.bundle2caps(pushop.remote)
763 763 version = '01'
764 764 cgversions = b2caps.get('changegroup')
765 765 if cgversions: # 3.1 and 3.2 ship with an empty value
766 766 cgversions = [v for v in cgversions
767 767 if v in changegroup.supportedoutgoingversions(
768 768 pushop.repo)]
769 769 if not cgversions:
770 770 raise ValueError(_('no common changegroup version'))
771 771 version = max(cgversions)
772 772 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
773 773 pushop.outgoing,
774 774 version=version)
775 775 cgpart = bundler.newpart('changegroup', data=cg)
776 776 if cgversions:
777 777 cgpart.addparam('version', version)
778 778 if 'treemanifest' in pushop.repo.requirements:
779 779 cgpart.addparam('treemanifest', '1')
780 780 def handlereply(op):
781 781 """extract addchangegroup returns from server reply"""
782 782 cgreplies = op.records.getreplies(cgpart.id)
783 783 assert len(cgreplies['changegroup']) == 1
784 784 pushop.cgresult = cgreplies['changegroup'][0]['return']
785 785 return handlereply
786 786
787 787 @b2partsgenerator('phase')
788 788 def _pushb2phases(pushop, bundler):
789 789 """handle phase push through bundle2"""
790 790 if 'phases' in pushop.stepsdone:
791 791 return
792 792 b2caps = bundle2.bundle2caps(pushop.remote)
793 793 if not 'pushkey' in b2caps:
794 794 return
795 795 pushop.stepsdone.add('phases')
796 796 part2node = []
797 797
798 798 def handlefailure(pushop, exc):
799 799 targetid = int(exc.partid)
800 800 for partid, node in part2node:
801 801 if partid == targetid:
802 802 raise error.Abort(_('updating %s to public failed') % node)
803 803
804 804 enc = pushkey.encode
805 805 for newremotehead in pushop.outdatedphases:
806 806 part = bundler.newpart('pushkey')
807 807 part.addparam('namespace', enc('phases'))
808 808 part.addparam('key', enc(newremotehead.hex()))
809 809 part.addparam('old', enc(str(phases.draft)))
810 810 part.addparam('new', enc(str(phases.public)))
811 811 part2node.append((part.id, newremotehead))
812 812 pushop.pkfailcb[part.id] = handlefailure
813 813
814 814 def handlereply(op):
815 815 for partid, node in part2node:
816 816 partrep = op.records.getreplies(partid)
817 817 results = partrep['pushkey']
818 818 assert len(results) <= 1
819 819 msg = None
820 820 if not results:
821 821 msg = _('server ignored update of %s to public!\n') % node
822 822 elif not int(results[0]['return']):
823 823 msg = _('updating %s to public failed!\n') % node
824 824 if msg is not None:
825 825 pushop.ui.warn(msg)
826 826 return handlereply
827 827
828 828 @b2partsgenerator('obsmarkers')
829 829 def _pushb2obsmarkers(pushop, bundler):
830 830 if 'obsmarkers' in pushop.stepsdone:
831 831 return
832 832 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
833 833 if obsolete.commonversion(remoteversions) is None:
834 834 return
835 835 pushop.stepsdone.add('obsmarkers')
836 836 if pushop.outobsmarkers:
837 837 markers = sorted(pushop.outobsmarkers)
838 838 bundle2.buildobsmarkerspart(bundler, markers)
839 839
840 840 @b2partsgenerator('bookmarks')
841 841 def _pushb2bookmarks(pushop, bundler):
842 842 """handle bookmark push through bundle2"""
843 843 if 'bookmarks' in pushop.stepsdone:
844 844 return
845 845 b2caps = bundle2.bundle2caps(pushop.remote)
846 846 if 'pushkey' not in b2caps:
847 847 return
848 848 pushop.stepsdone.add('bookmarks')
849 849 part2book = []
850 850 enc = pushkey.encode
851 851
852 852 def handlefailure(pushop, exc):
853 853 targetid = int(exc.partid)
854 854 for partid, book, action in part2book:
855 855 if partid == targetid:
856 856 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
857 857 # we should not be called for part we did not generated
858 858 assert False
859 859
860 860 for book, old, new in pushop.outbookmarks:
861 861 part = bundler.newpart('pushkey')
862 862 part.addparam('namespace', enc('bookmarks'))
863 863 part.addparam('key', enc(book))
864 864 part.addparam('old', enc(old))
865 865 part.addparam('new', enc(new))
866 866 action = 'update'
867 867 if not old:
868 868 action = 'export'
869 869 elif not new:
870 870 action = 'delete'
871 871 part2book.append((part.id, book, action))
872 872 pushop.pkfailcb[part.id] = handlefailure
873 873
874 874 def handlereply(op):
875 875 ui = pushop.ui
876 876 for partid, book, action in part2book:
877 877 partrep = op.records.getreplies(partid)
878 878 results = partrep['pushkey']
879 879 assert len(results) <= 1
880 880 if not results:
881 881 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
882 882 else:
883 883 ret = int(results[0]['return'])
884 884 if ret:
885 885 ui.status(bookmsgmap[action][0] % book)
886 886 else:
887 887 ui.warn(bookmsgmap[action][1] % book)
888 888 if pushop.bkresult is not None:
889 889 pushop.bkresult = 1
890 890 return handlereply
891 891
892 892
893 893 def _pushbundle2(pushop):
894 894 """push data to the remote using bundle2
895 895
896 896 The only currently supported type of data is changegroup but this will
897 897 evolve in the future."""
898 898 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
899 899 pushback = (pushop.trmanager
900 900 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
901 901
902 902 # create reply capability
903 903 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
904 904 allowpushback=pushback))
905 905 bundler.newpart('replycaps', data=capsblob)
906 906 replyhandlers = []
907 907 for partgenname in b2partsgenorder:
908 908 partgen = b2partsgenmapping[partgenname]
909 909 ret = partgen(pushop, bundler)
910 910 if callable(ret):
911 911 replyhandlers.append(ret)
912 912 # do not push if nothing to push
913 913 if bundler.nbparts <= 1:
914 914 return
915 915 stream = util.chunkbuffer(bundler.getchunks())
916 916 try:
917 917 try:
918 918 reply = pushop.remote.unbundle(
919 919 stream, ['force'], pushop.remote.url())
920 920 except error.BundleValueError as exc:
921 921 raise error.Abort(_('missing support for %s') % exc)
922 922 try:
923 923 trgetter = None
924 924 if pushback:
925 925 trgetter = pushop.trmanager.transaction
926 926 op = bundle2.processbundle(pushop.repo, reply, trgetter)
927 927 except error.BundleValueError as exc:
928 928 raise error.Abort(_('missing support for %s') % exc)
929 929 except bundle2.AbortFromPart as exc:
930 930 pushop.ui.status(_('remote: %s\n') % exc)
931 931 if exc.hint is not None:
932 932 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
933 933 raise error.Abort(_('push failed on remote'))
934 934 except error.PushkeyFailed as exc:
935 935 partid = int(exc.partid)
936 936 if partid not in pushop.pkfailcb:
937 937 raise
938 938 pushop.pkfailcb[partid](pushop, exc)
939 939 for rephand in replyhandlers:
940 940 rephand(op)
941 941
942 942 def _pushchangeset(pushop):
943 943 """Make the actual push of changeset bundle to remote repo"""
944 944 if 'changesets' in pushop.stepsdone:
945 945 return
946 946 pushop.stepsdone.add('changesets')
947 947 if not _pushcheckoutgoing(pushop):
948 948 return
949 949 pushop.repo.prepushoutgoinghooks(pushop)
950 950 outgoing = pushop.outgoing
951 951 unbundle = pushop.remote.capable('unbundle')
952 952 # TODO: get bundlecaps from remote
953 953 bundlecaps = None
954 954 # create a changegroup from local
955 955 if pushop.revs is None and not (outgoing.excluded
956 956 or pushop.repo.changelog.filteredrevs):
957 957 # push everything,
958 958 # use the fast path, no race possible on push
959 959 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
960 960 cg = changegroup.getsubset(pushop.repo,
961 961 outgoing,
962 962 bundler,
963 963 'push',
964 964 fastpath=True)
965 965 else:
966 966 cg = changegroup.getchangegroup(pushop.repo, 'push', outgoing,
967 967 bundlecaps=bundlecaps)
968 968
969 969 # apply changegroup to remote
970 970 if unbundle:
971 971 # local repo finds heads on server, finds out what
972 972 # revs it must push. once revs transferred, if server
973 973 # finds it has different heads (someone else won
974 974 # commit/push race), server aborts.
975 975 if pushop.force:
976 976 remoteheads = ['force']
977 977 else:
978 978 remoteheads = pushop.remoteheads
979 979 # ssh: return remote's addchangegroup()
980 980 # http: return remote's addchangegroup() or 0 for error
981 981 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
982 982 pushop.repo.url())
983 983 else:
984 984 # we return an integer indicating remote head count
985 985 # change
986 986 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
987 987 pushop.repo.url())
988 988
989 989 def _pushsyncphase(pushop):
990 990 """synchronise phase information locally and remotely"""
991 991 cheads = pushop.commonheads
992 992 # even when we don't push, exchanging phase data is useful
993 993 remotephases = pushop.remote.listkeys('phases')
994 994 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
995 995 and remotephases # server supports phases
996 996 and pushop.cgresult is None # nothing was pushed
997 997 and remotephases.get('publishing', False)):
998 998 # When:
999 999 # - this is a subrepo push
1000 1000 # - and remote support phase
1001 1001 # - and no changeset was pushed
1002 1002 # - and remote is publishing
1003 1003 # We may be in issue 3871 case!
1004 1004 # We drop the possible phase synchronisation done by
1005 1005 # courtesy to publish changesets possibly locally draft
1006 1006 # on the remote.
1007 1007 remotephases = {'publishing': 'True'}
1008 1008 if not remotephases: # old server or public only reply from non-publishing
1009 1009 _localphasemove(pushop, cheads)
1010 1010 # don't push any phase data as there is nothing to push
1011 1011 else:
1012 1012 ana = phases.analyzeremotephases(pushop.repo, cheads,
1013 1013 remotephases)
1014 1014 pheads, droots = ana
1015 1015 ### Apply remote phase on local
1016 1016 if remotephases.get('publishing', False):
1017 1017 _localphasemove(pushop, cheads)
1018 1018 else: # publish = False
1019 1019 _localphasemove(pushop, pheads)
1020 1020 _localphasemove(pushop, cheads, phases.draft)
1021 1021 ### Apply local phase on remote
1022 1022
1023 1023 if pushop.cgresult:
1024 1024 if 'phases' in pushop.stepsdone:
1025 1025 # phases already pushed though bundle2
1026 1026 return
1027 1027 outdated = pushop.outdatedphases
1028 1028 else:
1029 1029 outdated = pushop.fallbackoutdatedphases
1030 1030
1031 1031 pushop.stepsdone.add('phases')
1032 1032
1033 1033 # filter heads already turned public by the push
1034 1034 outdated = [c for c in outdated if c.node() not in pheads]
1035 1035 # fallback to independent pushkey command
1036 1036 for newremotehead in outdated:
1037 1037 r = pushop.remote.pushkey('phases',
1038 1038 newremotehead.hex(),
1039 1039 str(phases.draft),
1040 1040 str(phases.public))
1041 1041 if not r:
1042 1042 pushop.ui.warn(_('updating %s to public failed!\n')
1043 1043 % newremotehead)
1044 1044
1045 1045 def _localphasemove(pushop, nodes, phase=phases.public):
1046 1046 """move <nodes> to <phase> in the local source repo"""
1047 1047 if pushop.trmanager:
1048 1048 phases.advanceboundary(pushop.repo,
1049 1049 pushop.trmanager.transaction(),
1050 1050 phase,
1051 1051 nodes)
1052 1052 else:
1053 1053 # repo is not locked, do not change any phases!
1054 1054 # Informs the user that phases should have been moved when
1055 1055 # applicable.
1056 1056 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1057 1057 phasestr = phases.phasenames[phase]
1058 1058 if actualmoves:
1059 1059 pushop.ui.status(_('cannot lock source repo, skipping '
1060 1060 'local %s phase update\n') % phasestr)
1061 1061
1062 1062 def _pushobsolete(pushop):
1063 1063 """utility function to push obsolete markers to a remote"""
1064 1064 if 'obsmarkers' in pushop.stepsdone:
1065 1065 return
1066 1066 repo = pushop.repo
1067 1067 remote = pushop.remote
1068 1068 pushop.stepsdone.add('obsmarkers')
1069 1069 if pushop.outobsmarkers:
1070 1070 pushop.ui.debug('try to push obsolete markers to remote\n')
1071 1071 rslts = []
1072 1072 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1073 1073 for key in sorted(remotedata, reverse=True):
1074 1074 # reverse sort to ensure we end with dump0
1075 1075 data = remotedata[key]
1076 1076 rslts.append(remote.pushkey('obsolete', key, '', data))
1077 1077 if [r for r in rslts if not r]:
1078 1078 msg = _('failed to push some obsolete markers!\n')
1079 1079 repo.ui.warn(msg)
1080 1080
1081 1081 def _pushbookmark(pushop):
1082 1082 """Update bookmark position on remote"""
1083 1083 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1084 1084 return
1085 1085 pushop.stepsdone.add('bookmarks')
1086 1086 ui = pushop.ui
1087 1087 remote = pushop.remote
1088 1088
1089 1089 for b, old, new in pushop.outbookmarks:
1090 1090 action = 'update'
1091 1091 if not old:
1092 1092 action = 'export'
1093 1093 elif not new:
1094 1094 action = 'delete'
1095 1095 if remote.pushkey('bookmarks', b, old, new):
1096 1096 ui.status(bookmsgmap[action][0] % b)
1097 1097 else:
1098 1098 ui.warn(bookmsgmap[action][1] % b)
1099 1099 # discovery can have set the value form invalid entry
1100 1100 if pushop.bkresult is not None:
1101 1101 pushop.bkresult = 1
1102 1102
1103 1103 class pulloperation(object):
1104 1104 """A object that represent a single pull operation
1105 1105
1106 1106 It purpose is to carry pull related state and very common operation.
1107 1107
1108 1108 A new should be created at the beginning of each pull and discarded
1109 1109 afterward.
1110 1110 """
1111 1111
1112 1112 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1113 1113 remotebookmarks=None, streamclonerequested=None):
1114 1114 # repo we pull into
1115 1115 self.repo = repo
1116 1116 # repo we pull from
1117 1117 self.remote = remote
1118 1118 # revision we try to pull (None is "all")
1119 1119 self.heads = heads
1120 1120 # bookmark pulled explicitly
1121 1121 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1122 1122 for bookmark in bookmarks]
1123 1123 # do we force pull?
1124 1124 self.force = force
1125 1125 # whether a streaming clone was requested
1126 1126 self.streamclonerequested = streamclonerequested
1127 1127 # transaction manager
1128 1128 self.trmanager = None
1129 1129 # set of common changeset between local and remote before pull
1130 1130 self.common = None
1131 1131 # set of pulled head
1132 1132 self.rheads = None
1133 1133 # list of missing changeset to fetch remotely
1134 1134 self.fetch = None
1135 1135 # remote bookmarks data
1136 1136 self.remotebookmarks = remotebookmarks
1137 1137 # result of changegroup pulling (used as return code by pull)
1138 1138 self.cgresult = None
1139 1139 # list of step already done
1140 1140 self.stepsdone = set()
1141 1141 # Whether we attempted a clone from pre-generated bundles.
1142 1142 self.clonebundleattempted = False
1143 1143
1144 1144 @util.propertycache
1145 1145 def pulledsubset(self):
1146 1146 """heads of the set of changeset target by the pull"""
1147 1147 # compute target subset
1148 1148 if self.heads is None:
1149 1149 # We pulled every thing possible
1150 1150 # sync on everything common
1151 1151 c = set(self.common)
1152 1152 ret = list(self.common)
1153 1153 for n in self.rheads:
1154 1154 if n not in c:
1155 1155 ret.append(n)
1156 1156 return ret
1157 1157 else:
1158 1158 # We pulled a specific subset
1159 1159 # sync on this subset
1160 1160 return self.heads
1161 1161
1162 1162 @util.propertycache
1163 1163 def canusebundle2(self):
1164 1164 return not _forcebundle1(self)
1165 1165
1166 1166 @util.propertycache
1167 1167 def remotebundle2caps(self):
1168 1168 return bundle2.bundle2caps(self.remote)
1169 1169
1170 1170 def gettransaction(self):
1171 1171 # deprecated; talk to trmanager directly
1172 1172 return self.trmanager.transaction()
1173 1173
1174 1174 class transactionmanager(object):
1175 1175 """An object to manage the life cycle of a transaction
1176 1176
1177 1177 It creates the transaction on demand and calls the appropriate hooks when
1178 1178 closing the transaction."""
1179 1179 def __init__(self, repo, source, url):
1180 1180 self.repo = repo
1181 1181 self.source = source
1182 1182 self.url = url
1183 1183 self._tr = None
1184 1184
1185 1185 def transaction(self):
1186 1186 """Return an open transaction object, constructing if necessary"""
1187 1187 if not self._tr:
1188 1188 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1189 1189 self._tr = self.repo.transaction(trname)
1190 1190 self._tr.hookargs['source'] = self.source
1191 1191 self._tr.hookargs['url'] = self.url
1192 1192 return self._tr
1193 1193
1194 1194 def close(self):
1195 1195 """close transaction if created"""
1196 1196 if self._tr is not None:
1197 1197 self._tr.close()
1198 1198
1199 1199 def release(self):
1200 1200 """release transaction if created"""
1201 1201 if self._tr is not None:
1202 1202 self._tr.release()
1203 1203
1204 1204 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1205 1205 streamclonerequested=None):
1206 1206 """Fetch repository data from a remote.
1207 1207
1208 1208 This is the main function used to retrieve data from a remote repository.
1209 1209
1210 1210 ``repo`` is the local repository to clone into.
1211 1211 ``remote`` is a peer instance.
1212 1212 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1213 1213 default) means to pull everything from the remote.
1214 1214 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1215 1215 default, all remote bookmarks are pulled.
1216 1216 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1217 1217 initialization.
1218 1218 ``streamclonerequested`` is a boolean indicating whether a "streaming
1219 1219 clone" is requested. A "streaming clone" is essentially a raw file copy
1220 1220 of revlogs from the server. This only works when the local repository is
1221 1221 empty. The default value of ``None`` means to respect the server
1222 1222 configuration for preferring stream clones.
1223 1223
1224 1224 Returns the ``pulloperation`` created for this pull.
1225 1225 """
1226 1226 if opargs is None:
1227 1227 opargs = {}
1228 1228 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1229 1229 streamclonerequested=streamclonerequested, **opargs)
1230 1230 if pullop.remote.local():
1231 1231 missing = set(pullop.remote.requirements) - pullop.repo.supported
1232 1232 if missing:
1233 1233 msg = _("required features are not"
1234 1234 " supported in the destination:"
1235 1235 " %s") % (', '.join(sorted(missing)))
1236 1236 raise error.Abort(msg)
1237 1237
1238 1238 wlock = lock = None
1239 1239 try:
1240 1240 wlock = pullop.repo.wlock()
1241 1241 lock = pullop.repo.lock()
1242 1242 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1243 1243 streamclone.maybeperformlegacystreamclone(pullop)
1244 1244 # This should ideally be in _pullbundle2(). However, it needs to run
1245 1245 # before discovery to avoid extra work.
1246 1246 _maybeapplyclonebundle(pullop)
1247 1247 _pulldiscovery(pullop)
1248 1248 if pullop.canusebundle2:
1249 1249 _pullbundle2(pullop)
1250 1250 _pullchangeset(pullop)
1251 1251 _pullphase(pullop)
1252 1252 _pullbookmarks(pullop)
1253 1253 _pullobsolete(pullop)
1254 1254 pullop.trmanager.close()
1255 1255 finally:
1256 1256 lockmod.release(pullop.trmanager, lock, wlock)
1257 1257
1258 1258 return pullop
1259 1259
1260 1260 # list of steps to perform discovery before pull
1261 1261 pulldiscoveryorder = []
1262 1262
1263 1263 # Mapping between step name and function
1264 1264 #
1265 1265 # This exists to help extensions wrap steps if necessary
1266 1266 pulldiscoverymapping = {}
1267 1267
1268 1268 def pulldiscovery(stepname):
1269 1269 """decorator for function performing discovery before pull
1270 1270
1271 1271 The function is added to the step -> function mapping and appended to the
1272 1272 list of steps. Beware that decorated function will be added in order (this
1273 1273 may matter).
1274 1274
1275 1275 You can only use this decorator for a new step, if you want to wrap a step
1276 1276 from an extension, change the pulldiscovery dictionary directly."""
1277 1277 def dec(func):
1278 1278 assert stepname not in pulldiscoverymapping
1279 1279 pulldiscoverymapping[stepname] = func
1280 1280 pulldiscoveryorder.append(stepname)
1281 1281 return func
1282 1282 return dec
1283 1283
1284 1284 def _pulldiscovery(pullop):
1285 1285 """Run all discovery steps"""
1286 1286 for stepname in pulldiscoveryorder:
1287 1287 step = pulldiscoverymapping[stepname]
1288 1288 step(pullop)
1289 1289
1290 1290 @pulldiscovery('b1:bookmarks')
1291 1291 def _pullbookmarkbundle1(pullop):
1292 1292 """fetch bookmark data in bundle1 case
1293 1293
1294 1294 If not using bundle2, we have to fetch bookmarks before changeset
1295 1295 discovery to reduce the chance and impact of race conditions."""
1296 1296 if pullop.remotebookmarks is not None:
1297 1297 return
1298 1298 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1299 1299 # all known bundle2 servers now support listkeys, but lets be nice with
1300 1300 # new implementation.
1301 1301 return
1302 1302 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1303 1303
1304 1304
1305 1305 @pulldiscovery('changegroup')
1306 1306 def _pulldiscoverychangegroup(pullop):
1307 1307 """discovery phase for the pull
1308 1308
1309 1309 Current handle changeset discovery only, will change handle all discovery
1310 1310 at some point."""
1311 1311 tmp = discovery.findcommonincoming(pullop.repo,
1312 1312 pullop.remote,
1313 1313 heads=pullop.heads,
1314 1314 force=pullop.force)
1315 1315 common, fetch, rheads = tmp
1316 1316 nm = pullop.repo.unfiltered().changelog.nodemap
1317 1317 if fetch and rheads:
1318 1318 # If a remote heads in filtered locally, lets drop it from the unknown
1319 1319 # remote heads and put in back in common.
1320 1320 #
1321 1321 # This is a hackish solution to catch most of "common but locally
1322 1322 # hidden situation". We do not performs discovery on unfiltered
1323 1323 # repository because it end up doing a pathological amount of round
1324 1324 # trip for w huge amount of changeset we do not care about.
1325 1325 #
1326 1326 # If a set of such "common but filtered" changeset exist on the server
1327 1327 # but are not including a remote heads, we'll not be able to detect it,
1328 1328 scommon = set(common)
1329 1329 filteredrheads = []
1330 1330 for n in rheads:
1331 1331 if n in nm:
1332 1332 if n not in scommon:
1333 1333 common.append(n)
1334 1334 else:
1335 1335 filteredrheads.append(n)
1336 1336 if not filteredrheads:
1337 1337 fetch = []
1338 1338 rheads = filteredrheads
1339 1339 pullop.common = common
1340 1340 pullop.fetch = fetch
1341 1341 pullop.rheads = rheads
1342 1342
1343 1343 def _pullbundle2(pullop):
1344 1344 """pull data using bundle2
1345 1345
1346 1346 For now, the only supported data are changegroup."""
1347 1347 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1348 1348
1349 1349 # At the moment we don't do stream clones over bundle2. If that is
1350 1350 # implemented then here's where the check for that will go.
1351 1351 streaming = False
1352 1352
1353 1353 # pulling changegroup
1354 1354 pullop.stepsdone.add('changegroup')
1355 1355
1356 1356 kwargs['common'] = pullop.common
1357 1357 kwargs['heads'] = pullop.heads or pullop.rheads
1358 1358 kwargs['cg'] = pullop.fetch
1359 1359 if 'listkeys' in pullop.remotebundle2caps:
1360 1360 kwargs['listkeys'] = ['phases']
1361 1361 if pullop.remotebookmarks is None:
1362 1362 # make sure to always includes bookmark data when migrating
1363 1363 # `hg incoming --bundle` to using this function.
1364 1364 kwargs['listkeys'].append('bookmarks')
1365 1365
1366 1366 # If this is a full pull / clone and the server supports the clone bundles
1367 1367 # feature, tell the server whether we attempted a clone bundle. The
1368 1368 # presence of this flag indicates the client supports clone bundles. This
1369 1369 # will enable the server to treat clients that support clone bundles
1370 1370 # differently from those that don't.
1371 1371 if (pullop.remote.capable('clonebundles')
1372 1372 and pullop.heads is None and list(pullop.common) == [nullid]):
1373 1373 kwargs['cbattempted'] = pullop.clonebundleattempted
1374 1374
1375 1375 if streaming:
1376 1376 pullop.repo.ui.status(_('streaming all changes\n'))
1377 1377 elif not pullop.fetch:
1378 1378 pullop.repo.ui.status(_("no changes found\n"))
1379 1379 pullop.cgresult = 0
1380 1380 else:
1381 1381 if pullop.heads is None and list(pullop.common) == [nullid]:
1382 1382 pullop.repo.ui.status(_("requesting all changes\n"))
1383 1383 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1384 1384 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1385 1385 if obsolete.commonversion(remoteversions) is not None:
1386 1386 kwargs['obsmarkers'] = True
1387 1387 pullop.stepsdone.add('obsmarkers')
1388 1388 _pullbundle2extraprepare(pullop, kwargs)
1389 1389 bundle = pullop.remote.getbundle('pull', **kwargs)
1390 1390 try:
1391 1391 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1392 1392 except bundle2.AbortFromPart as exc:
1393 1393 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1394 1394 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1395 1395 except error.BundleValueError as exc:
1396 1396 raise error.Abort(_('missing support for %s') % exc)
1397 1397
1398 1398 if pullop.fetch:
1399 1399 results = [cg['return'] for cg in op.records['changegroup']]
1400 1400 pullop.cgresult = changegroup.combineresults(results)
1401 1401
1402 1402 # processing phases change
1403 1403 for namespace, value in op.records['listkeys']:
1404 1404 if namespace == 'phases':
1405 1405 _pullapplyphases(pullop, value)
1406 1406
1407 1407 # processing bookmark update
1408 1408 for namespace, value in op.records['listkeys']:
1409 1409 if namespace == 'bookmarks':
1410 1410 pullop.remotebookmarks = value
1411 1411
1412 1412 # bookmark data were either already there or pulled in the bundle
1413 1413 if pullop.remotebookmarks is not None:
1414 1414 _pullbookmarks(pullop)
1415 1415
1416 1416 def _pullbundle2extraprepare(pullop, kwargs):
1417 1417 """hook function so that extensions can extend the getbundle call"""
1418 1418 pass
1419 1419
1420 1420 def _pullchangeset(pullop):
1421 1421 """pull changeset from unbundle into the local repo"""
1422 1422 # We delay the open of the transaction as late as possible so we
1423 1423 # don't open transaction for nothing or you break future useful
1424 1424 # rollback call
1425 1425 if 'changegroup' in pullop.stepsdone:
1426 1426 return
1427 1427 pullop.stepsdone.add('changegroup')
1428 1428 if not pullop.fetch:
1429 1429 pullop.repo.ui.status(_("no changes found\n"))
1430 1430 pullop.cgresult = 0
1431 1431 return
1432 1432 pullop.gettransaction()
1433 1433 if pullop.heads is None and list(pullop.common) == [nullid]:
1434 1434 pullop.repo.ui.status(_("requesting all changes\n"))
1435 1435 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1436 1436 # issue1320, avoid a race if remote changed after discovery
1437 1437 pullop.heads = pullop.rheads
1438 1438
1439 1439 if pullop.remote.capable('getbundle'):
1440 1440 # TODO: get bundlecaps from remote
1441 1441 cg = pullop.remote.getbundle('pull', common=pullop.common,
1442 1442 heads=pullop.heads or pullop.rheads)
1443 1443 elif pullop.heads is None:
1444 1444 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1445 1445 elif not pullop.remote.capable('changegroupsubset'):
1446 1446 raise error.Abort(_("partial pull cannot be done because "
1447 1447 "other repository doesn't support "
1448 1448 "changegroupsubset."))
1449 1449 else:
1450 1450 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1451 1451 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1452 1452
1453 1453 def _pullphase(pullop):
1454 1454 # Get remote phases data from remote
1455 1455 if 'phases' in pullop.stepsdone:
1456 1456 return
1457 1457 remotephases = pullop.remote.listkeys('phases')
1458 1458 _pullapplyphases(pullop, remotephases)
1459 1459
1460 1460 def _pullapplyphases(pullop, remotephases):
1461 1461 """apply phase movement from observed remote state"""
1462 1462 if 'phases' in pullop.stepsdone:
1463 1463 return
1464 1464 pullop.stepsdone.add('phases')
1465 1465 publishing = bool(remotephases.get('publishing', False))
1466 1466 if remotephases and not publishing:
1467 1467 # remote is new and non-publishing
1468 1468 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1469 1469 pullop.pulledsubset,
1470 1470 remotephases)
1471 1471 dheads = pullop.pulledsubset
1472 1472 else:
1473 1473 # Remote is old or publishing all common changesets
1474 1474 # should be seen as public
1475 1475 pheads = pullop.pulledsubset
1476 1476 dheads = []
1477 1477 unfi = pullop.repo.unfiltered()
1478 1478 phase = unfi._phasecache.phase
1479 1479 rev = unfi.changelog.nodemap.get
1480 1480 public = phases.public
1481 1481 draft = phases.draft
1482 1482
1483 1483 # exclude changesets already public locally and update the others
1484 1484 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1485 1485 if pheads:
1486 1486 tr = pullop.gettransaction()
1487 1487 phases.advanceboundary(pullop.repo, tr, public, pheads)
1488 1488
1489 1489 # exclude changesets already draft locally and update the others
1490 1490 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1491 1491 if dheads:
1492 1492 tr = pullop.gettransaction()
1493 1493 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1494 1494
1495 1495 def _pullbookmarks(pullop):
1496 1496 """process the remote bookmark information to update the local one"""
1497 1497 if 'bookmarks' in pullop.stepsdone:
1498 1498 return
1499 1499 pullop.stepsdone.add('bookmarks')
1500 1500 repo = pullop.repo
1501 1501 remotebookmarks = pullop.remotebookmarks
1502 1502 remotebookmarks = bookmod.unhexlifybookmarks(remotebookmarks)
1503 1503 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1504 1504 pullop.remote.url(),
1505 1505 pullop.gettransaction,
1506 1506 explicit=pullop.explicitbookmarks)
1507 1507
1508 1508 def _pullobsolete(pullop):
1509 1509 """utility function to pull obsolete markers from a remote
1510 1510
1511 1511 The `gettransaction` is function that return the pull transaction, creating
1512 1512 one if necessary. We return the transaction to inform the calling code that
1513 1513 a new transaction have been created (when applicable).
1514 1514
1515 1515 Exists mostly to allow overriding for experimentation purpose"""
1516 1516 if 'obsmarkers' in pullop.stepsdone:
1517 1517 return
1518 1518 pullop.stepsdone.add('obsmarkers')
1519 1519 tr = None
1520 1520 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1521 1521 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1522 1522 remoteobs = pullop.remote.listkeys('obsolete')
1523 1523 if 'dump0' in remoteobs:
1524 1524 tr = pullop.gettransaction()
1525 1525 markers = []
1526 1526 for key in sorted(remoteobs, reverse=True):
1527 1527 if key.startswith('dump'):
1528 1528 data = util.b85decode(remoteobs[key])
1529 1529 version, newmarks = obsolete._readmarkers(data)
1530 1530 markers += newmarks
1531 1531 if markers:
1532 1532 pullop.repo.obsstore.add(tr, markers)
1533 1533 pullop.repo.invalidatevolatilesets()
1534 1534 return tr
1535 1535
1536 1536 def caps20to10(repo):
1537 1537 """return a set with appropriate options to use bundle20 during getbundle"""
1538 1538 caps = {'HG20'}
1539 1539 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1540 1540 caps.add('bundle2=' + urlreq.quote(capsblob))
1541 1541 return caps
1542 1542
1543 1543 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1544 1544 getbundle2partsorder = []
1545 1545
1546 1546 # Mapping between step name and function
1547 1547 #
1548 1548 # This exists to help extensions wrap steps if necessary
1549 1549 getbundle2partsmapping = {}
1550 1550
1551 1551 def getbundle2partsgenerator(stepname, idx=None):
1552 1552 """decorator for function generating bundle2 part for getbundle
1553 1553
1554 1554 The function is added to the step -> function mapping and appended to the
1555 1555 list of steps. Beware that decorated functions will be added in order
1556 1556 (this may matter).
1557 1557
1558 1558 You can only use this decorator for new steps, if you want to wrap a step
1559 1559 from an extension, attack the getbundle2partsmapping dictionary directly."""
1560 1560 def dec(func):
1561 1561 assert stepname not in getbundle2partsmapping
1562 1562 getbundle2partsmapping[stepname] = func
1563 1563 if idx is None:
1564 1564 getbundle2partsorder.append(stepname)
1565 1565 else:
1566 1566 getbundle2partsorder.insert(idx, stepname)
1567 1567 return func
1568 1568 return dec
1569 1569
1570 1570 def bundle2requested(bundlecaps):
1571 1571 if bundlecaps is not None:
1572 1572 return any(cap.startswith('HG2') for cap in bundlecaps)
1573 1573 return False
1574 1574
1575 1575 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1576 1576 **kwargs):
1577 1577 """Return chunks constituting a bundle's raw data.
1578 1578
1579 1579 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1580 1580 passed.
1581 1581
1582 1582 Returns an iterator over raw chunks (of varying sizes).
1583 1583 """
1584 1584 usebundle2 = bundle2requested(bundlecaps)
1585 1585 # bundle10 case
1586 1586 if not usebundle2:
1587 1587 if bundlecaps and not kwargs.get('cg', True):
1588 1588 raise ValueError(_('request for bundle10 must include changegroup'))
1589 1589
1590 1590 if kwargs:
1591 1591 raise ValueError(_('unsupported getbundle arguments: %s')
1592 1592 % ', '.join(sorted(kwargs.keys())))
1593 1593 outgoing = _computeoutgoing(repo, heads, common)
1594 1594 bundler = changegroup.getbundler('01', repo, bundlecaps)
1595 1595 return changegroup.getsubsetraw(repo, outgoing, bundler, source)
1596 1596
1597 1597 # bundle20 case
1598 1598 b2caps = {}
1599 1599 for bcaps in bundlecaps:
1600 1600 if bcaps.startswith('bundle2='):
1601 1601 blob = urlreq.unquote(bcaps[len('bundle2='):])
1602 1602 b2caps.update(bundle2.decodecaps(blob))
1603 1603 bundler = bundle2.bundle20(repo.ui, b2caps)
1604 1604
1605 1605 kwargs['heads'] = heads
1606 1606 kwargs['common'] = common
1607 1607
1608 1608 for name in getbundle2partsorder:
1609 1609 func = getbundle2partsmapping[name]
1610 1610 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1611 1611 **kwargs)
1612 1612
1613 1613 return bundler.getchunks()
1614 1614
1615 1615 @getbundle2partsgenerator('changegroup')
1616 1616 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1617 1617 b2caps=None, heads=None, common=None, **kwargs):
1618 1618 """add a changegroup part to the requested bundle"""
1619 1619 cg = None
1620 1620 if kwargs.get('cg', True):
1621 1621 # build changegroup bundle here.
1622 1622 version = '01'
1623 1623 cgversions = b2caps.get('changegroup')
1624 1624 if cgversions: # 3.1 and 3.2 ship with an empty value
1625 1625 cgversions = [v for v in cgversions
1626 1626 if v in changegroup.supportedoutgoingversions(repo)]
1627 1627 if not cgversions:
1628 1628 raise ValueError(_('no common changegroup version'))
1629 1629 version = max(cgversions)
1630 1630 outgoing = _computeoutgoing(repo, heads, common)
1631 1631 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1632 1632 bundlecaps=bundlecaps,
1633 1633 version=version)
1634 1634
1635 1635 if cg:
1636 1636 part = bundler.newpart('changegroup', data=cg)
1637 1637 if cgversions:
1638 1638 part.addparam('version', version)
1639 1639 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1640 1640 if 'treemanifest' in repo.requirements:
1641 1641 part.addparam('treemanifest', '1')
1642 1642
1643 1643 @getbundle2partsgenerator('listkeys')
1644 1644 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1645 1645 b2caps=None, **kwargs):
1646 1646 """add parts containing listkeys namespaces to the requested bundle"""
1647 1647 listkeys = kwargs.get('listkeys', ())
1648 1648 for namespace in listkeys:
1649 1649 part = bundler.newpart('listkeys')
1650 1650 part.addparam('namespace', namespace)
1651 1651 keys = repo.listkeys(namespace).items()
1652 1652 part.data = pushkey.encodekeys(keys)
1653 1653
1654 1654 @getbundle2partsgenerator('obsmarkers')
1655 1655 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1656 1656 b2caps=None, heads=None, **kwargs):
1657 1657 """add an obsolescence markers part to the requested bundle"""
1658 1658 if kwargs.get('obsmarkers', False):
1659 1659 if heads is None:
1660 1660 heads = repo.heads()
1661 1661 subset = [c.node() for c in repo.set('::%ln', heads)]
1662 1662 markers = repo.obsstore.relevantmarkers(subset)
1663 1663 markers = sorted(markers)
1664 1664 bundle2.buildobsmarkerspart(bundler, markers)
1665 1665
1666 1666 @getbundle2partsgenerator('hgtagsfnodes')
1667 1667 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1668 1668 b2caps=None, heads=None, common=None,
1669 1669 **kwargs):
1670 1670 """Transfer the .hgtags filenodes mapping.
1671 1671
1672 1672 Only values for heads in this bundle will be transferred.
1673 1673
1674 1674 The part data consists of pairs of 20 byte changeset node and .hgtags
1675 1675 filenodes raw values.
1676 1676 """
1677 1677 # Don't send unless:
1678 1678 # - changeset are being exchanged,
1679 1679 # - the client supports it.
1680 1680 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1681 1681 return
1682 1682
1683 1683 outgoing = _computeoutgoing(repo, heads, common)
1684 1684 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1685 1685
1686 1686 def _getbookmarks(repo, **kwargs):
1687 1687 """Returns bookmark to node mapping.
1688 1688
1689 1689 This function is primarily used to generate `bookmarks` bundle2 part.
1690 1690 It is a separate function in order to make it easy to wrap it
1691 1691 in extensions. Passing `kwargs` to the function makes it easy to
1692 1692 add new parameters in extensions.
1693 1693 """
1694 1694
1695 1695 return dict(bookmod.listbinbookmarks(repo))
1696 1696
1697 1697 def check_heads(repo, their_heads, context):
1698 1698 """check if the heads of a repo have been modified
1699 1699
1700 1700 Used by peer for unbundling.
1701 1701 """
1702 1702 heads = repo.heads()
1703 1703 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1704 1704 if not (their_heads == ['force'] or their_heads == heads or
1705 1705 their_heads == ['hashed', heads_hash]):
1706 1706 # someone else committed/pushed/unbundled while we
1707 1707 # were transferring data
1708 1708 raise error.PushRaced('repository changed while %s - '
1709 1709 'please try again' % context)
1710 1710
1711 1711 def unbundle(repo, cg, heads, source, url):
1712 1712 """Apply a bundle to a repo.
1713 1713
1714 1714 this function makes sure the repo is locked during the application and have
1715 1715 mechanism to check that no push race occurred between the creation of the
1716 1716 bundle and its application.
1717 1717
1718 1718 If the push was raced as PushRaced exception is raised."""
1719 1719 r = 0
1720 1720 # need a transaction when processing a bundle2 stream
1721 1721 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1722 1722 lockandtr = [None, None, None]
1723 1723 recordout = None
1724 1724 # quick fix for output mismatch with bundle2 in 3.4
1725 1725 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1726 1726 False)
1727 1727 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1728 1728 captureoutput = True
1729 1729 try:
1730 1730 # note: outside bundle1, 'heads' is expected to be empty and this
1731 1731 # 'check_heads' call wil be a no-op
1732 1732 check_heads(repo, heads, 'uploading changes')
1733 1733 # push can proceed
1734 if not util.safehasattr(cg, 'params'):
1734 if not isinstance(cg, bundle2.unbundle20):
1735 1735 # legacy case: bundle1 (changegroup 01)
1736 1736 with repo.lock():
1737 1737 r = cg.apply(repo, source, url)
1738 1738 else:
1739 1739 r = None
1740 1740 try:
1741 1741 def gettransaction():
1742 1742 if not lockandtr[2]:
1743 1743 lockandtr[0] = repo.wlock()
1744 1744 lockandtr[1] = repo.lock()
1745 1745 lockandtr[2] = repo.transaction(source)
1746 1746 lockandtr[2].hookargs['source'] = source
1747 1747 lockandtr[2].hookargs['url'] = url
1748 1748 lockandtr[2].hookargs['bundle2'] = '1'
1749 1749 return lockandtr[2]
1750 1750
1751 1751 # Do greedy locking by default until we're satisfied with lazy
1752 1752 # locking.
1753 1753 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1754 1754 gettransaction()
1755 1755
1756 1756 op = bundle2.bundleoperation(repo, gettransaction,
1757 1757 captureoutput=captureoutput)
1758 1758 try:
1759 1759 op = bundle2.processbundle(repo, cg, op=op)
1760 1760 finally:
1761 1761 r = op.reply
1762 1762 if captureoutput and r is not None:
1763 1763 repo.ui.pushbuffer(error=True, subproc=True)
1764 1764 def recordout(output):
1765 1765 r.newpart('output', data=output, mandatory=False)
1766 1766 if lockandtr[2] is not None:
1767 1767 lockandtr[2].close()
1768 1768 except BaseException as exc:
1769 1769 exc.duringunbundle2 = True
1770 1770 if captureoutput and r is not None:
1771 1771 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1772 1772 def recordout(output):
1773 1773 part = bundle2.bundlepart('output', data=output,
1774 1774 mandatory=False)
1775 1775 parts.append(part)
1776 1776 raise
1777 1777 finally:
1778 1778 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1779 1779 if recordout is not None:
1780 1780 recordout(repo.ui.popbuffer())
1781 1781 return r
1782 1782
1783 1783 def _maybeapplyclonebundle(pullop):
1784 1784 """Apply a clone bundle from a remote, if possible."""
1785 1785
1786 1786 repo = pullop.repo
1787 1787 remote = pullop.remote
1788 1788
1789 1789 if not repo.ui.configbool('ui', 'clonebundles', True):
1790 1790 return
1791 1791
1792 1792 # Only run if local repo is empty.
1793 1793 if len(repo):
1794 1794 return
1795 1795
1796 1796 if pullop.heads:
1797 1797 return
1798 1798
1799 1799 if not remote.capable('clonebundles'):
1800 1800 return
1801 1801
1802 1802 res = remote._call('clonebundles')
1803 1803
1804 1804 # If we call the wire protocol command, that's good enough to record the
1805 1805 # attempt.
1806 1806 pullop.clonebundleattempted = True
1807 1807
1808 1808 entries = parseclonebundlesmanifest(repo, res)
1809 1809 if not entries:
1810 1810 repo.ui.note(_('no clone bundles available on remote; '
1811 1811 'falling back to regular clone\n'))
1812 1812 return
1813 1813
1814 1814 entries = filterclonebundleentries(repo, entries)
1815 1815 if not entries:
1816 1816 # There is a thundering herd concern here. However, if a server
1817 1817 # operator doesn't advertise bundles appropriate for its clients,
1818 1818 # they deserve what's coming. Furthermore, from a client's
1819 1819 # perspective, no automatic fallback would mean not being able to
1820 1820 # clone!
1821 1821 repo.ui.warn(_('no compatible clone bundles available on server; '
1822 1822 'falling back to regular clone\n'))
1823 1823 repo.ui.warn(_('(you may want to report this to the server '
1824 1824 'operator)\n'))
1825 1825 return
1826 1826
1827 1827 entries = sortclonebundleentries(repo.ui, entries)
1828 1828
1829 1829 url = entries[0]['URL']
1830 1830 repo.ui.status(_('applying clone bundle from %s\n') % url)
1831 1831 if trypullbundlefromurl(repo.ui, repo, url):
1832 1832 repo.ui.status(_('finished applying clone bundle\n'))
1833 1833 # Bundle failed.
1834 1834 #
1835 1835 # We abort by default to avoid the thundering herd of
1836 1836 # clients flooding a server that was expecting expensive
1837 1837 # clone load to be offloaded.
1838 1838 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1839 1839 repo.ui.warn(_('falling back to normal clone\n'))
1840 1840 else:
1841 1841 raise error.Abort(_('error applying bundle'),
1842 1842 hint=_('if this error persists, consider contacting '
1843 1843 'the server operator or disable clone '
1844 1844 'bundles via '
1845 1845 '"--config ui.clonebundles=false"'))
1846 1846
1847 1847 def parseclonebundlesmanifest(repo, s):
1848 1848 """Parses the raw text of a clone bundles manifest.
1849 1849
1850 1850 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1851 1851 to the URL and other keys are the attributes for the entry.
1852 1852 """
1853 1853 m = []
1854 1854 for line in s.splitlines():
1855 1855 fields = line.split()
1856 1856 if not fields:
1857 1857 continue
1858 1858 attrs = {'URL': fields[0]}
1859 1859 for rawattr in fields[1:]:
1860 1860 key, value = rawattr.split('=', 1)
1861 1861 key = urlreq.unquote(key)
1862 1862 value = urlreq.unquote(value)
1863 1863 attrs[key] = value
1864 1864
1865 1865 # Parse BUNDLESPEC into components. This makes client-side
1866 1866 # preferences easier to specify since you can prefer a single
1867 1867 # component of the BUNDLESPEC.
1868 1868 if key == 'BUNDLESPEC':
1869 1869 try:
1870 1870 comp, version, params = parsebundlespec(repo, value,
1871 1871 externalnames=True)
1872 1872 attrs['COMPRESSION'] = comp
1873 1873 attrs['VERSION'] = version
1874 1874 except error.InvalidBundleSpecification:
1875 1875 pass
1876 1876 except error.UnsupportedBundleSpecification:
1877 1877 pass
1878 1878
1879 1879 m.append(attrs)
1880 1880
1881 1881 return m
1882 1882
1883 1883 def filterclonebundleentries(repo, entries):
1884 1884 """Remove incompatible clone bundle manifest entries.
1885 1885
1886 1886 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1887 1887 and returns a new list consisting of only the entries that this client
1888 1888 should be able to apply.
1889 1889
1890 1890 There is no guarantee we'll be able to apply all returned entries because
1891 1891 the metadata we use to filter on may be missing or wrong.
1892 1892 """
1893 1893 newentries = []
1894 1894 for entry in entries:
1895 1895 spec = entry.get('BUNDLESPEC')
1896 1896 if spec:
1897 1897 try:
1898 1898 parsebundlespec(repo, spec, strict=True)
1899 1899 except error.InvalidBundleSpecification as e:
1900 1900 repo.ui.debug(str(e) + '\n')
1901 1901 continue
1902 1902 except error.UnsupportedBundleSpecification as e:
1903 1903 repo.ui.debug('filtering %s because unsupported bundle '
1904 1904 'spec: %s\n' % (entry['URL'], str(e)))
1905 1905 continue
1906 1906
1907 1907 if 'REQUIRESNI' in entry and not sslutil.hassni:
1908 1908 repo.ui.debug('filtering %s because SNI not supported\n' %
1909 1909 entry['URL'])
1910 1910 continue
1911 1911
1912 1912 newentries.append(entry)
1913 1913
1914 1914 return newentries
1915 1915
1916 1916 class clonebundleentry(object):
1917 1917 """Represents an item in a clone bundles manifest.
1918 1918
1919 1919 This rich class is needed to support sorting since sorted() in Python 3
1920 1920 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
1921 1921 won't work.
1922 1922 """
1923 1923
1924 1924 def __init__(self, value, prefers):
1925 1925 self.value = value
1926 1926 self.prefers = prefers
1927 1927
1928 1928 def _cmp(self, other):
1929 1929 for prefkey, prefvalue in self.prefers:
1930 1930 avalue = self.value.get(prefkey)
1931 1931 bvalue = other.value.get(prefkey)
1932 1932
1933 1933 # Special case for b missing attribute and a matches exactly.
1934 1934 if avalue is not None and bvalue is None and avalue == prefvalue:
1935 1935 return -1
1936 1936
1937 1937 # Special case for a missing attribute and b matches exactly.
1938 1938 if bvalue is not None and avalue is None and bvalue == prefvalue:
1939 1939 return 1
1940 1940
1941 1941 # We can't compare unless attribute present on both.
1942 1942 if avalue is None or bvalue is None:
1943 1943 continue
1944 1944
1945 1945 # Same values should fall back to next attribute.
1946 1946 if avalue == bvalue:
1947 1947 continue
1948 1948
1949 1949 # Exact matches come first.
1950 1950 if avalue == prefvalue:
1951 1951 return -1
1952 1952 if bvalue == prefvalue:
1953 1953 return 1
1954 1954
1955 1955 # Fall back to next attribute.
1956 1956 continue
1957 1957
1958 1958 # If we got here we couldn't sort by attributes and prefers. Fall
1959 1959 # back to index order.
1960 1960 return 0
1961 1961
1962 1962 def __lt__(self, other):
1963 1963 return self._cmp(other) < 0
1964 1964
1965 1965 def __gt__(self, other):
1966 1966 return self._cmp(other) > 0
1967 1967
1968 1968 def __eq__(self, other):
1969 1969 return self._cmp(other) == 0
1970 1970
1971 1971 def __le__(self, other):
1972 1972 return self._cmp(other) <= 0
1973 1973
1974 1974 def __ge__(self, other):
1975 1975 return self._cmp(other) >= 0
1976 1976
1977 1977 def __ne__(self, other):
1978 1978 return self._cmp(other) != 0
1979 1979
1980 1980 def sortclonebundleentries(ui, entries):
1981 1981 prefers = ui.configlist('ui', 'clonebundleprefers', default=[])
1982 1982 if not prefers:
1983 1983 return list(entries)
1984 1984
1985 1985 prefers = [p.split('=', 1) for p in prefers]
1986 1986
1987 1987 items = sorted(clonebundleentry(v, prefers) for v in entries)
1988 1988 return [i.value for i in items]
1989 1989
1990 1990 def trypullbundlefromurl(ui, repo, url):
1991 1991 """Attempt to apply a bundle from a URL."""
1992 1992 with repo.lock(), repo.transaction('bundleurl') as tr:
1993 1993 try:
1994 1994 fh = urlmod.open(ui, url)
1995 1995 cg = readbundle(ui, fh, 'stream')
1996 1996
1997 1997 if isinstance(cg, bundle2.unbundle20):
1998 1998 bundle2.processbundle(repo, cg, lambda: tr)
1999 1999 elif isinstance(cg, streamclone.streamcloneapplier):
2000 2000 cg.apply(repo)
2001 2001 else:
2002 2002 cg.apply(repo, 'clonebundles', url)
2003 2003 return True
2004 2004 except urlerr.httperror as e:
2005 2005 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2006 2006 except urlerr.urlerror as e:
2007 2007 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2008 2008
2009 2009 return False
General Comments 0
You need to be logged in to leave comments. Login now