##// END OF EJS Templates
exchange: implement function for inferring bundle specification...
Gregory Szorc -
r27883:4f4b80b3 default
parent child Browse files
Show More
@@ -1,1880 +1,1933
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import urllib
12 12 import urllib2
13 13
14 14 from .i18n import _
15 15 from .node import (
16 16 hex,
17 17 nullid,
18 18 )
19 19 from . import (
20 20 base85,
21 21 bookmarks as bookmod,
22 22 bundle2,
23 23 changegroup,
24 24 discovery,
25 25 error,
26 26 lock as lockmod,
27 27 obsolete,
28 28 phases,
29 29 pushkey,
30 30 scmutil,
31 31 sslutil,
32 32 streamclone,
33 33 tags,
34 34 url as urlmod,
35 35 util,
36 36 )
37 37
38 38 # Maps bundle compression human names to internal representation.
39 39 _bundlespeccompressions = {'none': None,
40 40 'bzip2': 'BZ',
41 41 'gzip': 'GZ',
42 42 }
43 43
44 44 # Maps bundle version human names to changegroup versions.
45 45 _bundlespeccgversions = {'v1': '01',
46 46 'v2': '02',
47 47 'packed1': 's1',
48 48 'bundle2': '02', #legacy
49 49 }
50 50
51 51 def parsebundlespec(repo, spec, strict=True, externalnames=False):
52 52 """Parse a bundle string specification into parts.
53 53
54 54 Bundle specifications denote a well-defined bundle/exchange format.
55 55 The content of a given specification should not change over time in
56 56 order to ensure that bundles produced by a newer version of Mercurial are
57 57 readable from an older version.
58 58
59 59 The string currently has the form:
60 60
61 61 <compression>-<type>[;<parameter0>[;<parameter1>]]
62 62
63 63 Where <compression> is one of the supported compression formats
64 64 and <type> is (currently) a version string. A ";" can follow the type and
65 65 all text afterwards is interpretted as URI encoded, ";" delimited key=value
66 66 pairs.
67 67
68 68 If ``strict`` is True (the default) <compression> is required. Otherwise,
69 69 it is optional.
70 70
71 71 If ``externalnames`` is False (the default), the human-centric names will
72 72 be converted to their internal representation.
73 73
74 74 Returns a 3-tuple of (compression, version, parameters). Compression will
75 75 be ``None`` if not in strict mode and a compression isn't defined.
76 76
77 77 An ``InvalidBundleSpecification`` is raised when the specification is
78 78 not syntactically well formed.
79 79
80 80 An ``UnsupportedBundleSpecification`` is raised when the compression or
81 81 bundle type/version is not recognized.
82 82
83 83 Note: this function will likely eventually return a more complex data
84 84 structure, including bundle2 part information.
85 85 """
86 86 def parseparams(s):
87 87 if ';' not in s:
88 88 return s, {}
89 89
90 90 params = {}
91 91 version, paramstr = s.split(';', 1)
92 92
93 93 for p in paramstr.split(';'):
94 94 if '=' not in p:
95 95 raise error.InvalidBundleSpecification(
96 96 _('invalid bundle specification: '
97 97 'missing "=" in parameter: %s') % p)
98 98
99 99 key, value = p.split('=', 1)
100 100 key = urllib.unquote(key)
101 101 value = urllib.unquote(value)
102 102 params[key] = value
103 103
104 104 return version, params
105 105
106 106
107 107 if strict and '-' not in spec:
108 108 raise error.InvalidBundleSpecification(
109 109 _('invalid bundle specification; '
110 110 'must be prefixed with compression: %s') % spec)
111 111
112 112 if '-' in spec:
113 113 compression, version = spec.split('-', 1)
114 114
115 115 if compression not in _bundlespeccompressions:
116 116 raise error.UnsupportedBundleSpecification(
117 117 _('%s compression is not supported') % compression)
118 118
119 119 version, params = parseparams(version)
120 120
121 121 if version not in _bundlespeccgversions:
122 122 raise error.UnsupportedBundleSpecification(
123 123 _('%s is not a recognized bundle version') % version)
124 124 else:
125 125 # Value could be just the compression or just the version, in which
126 126 # case some defaults are assumed (but only when not in strict mode).
127 127 assert not strict
128 128
129 129 spec, params = parseparams(spec)
130 130
131 131 if spec in _bundlespeccompressions:
132 132 compression = spec
133 133 version = 'v1'
134 134 if 'generaldelta' in repo.requirements:
135 135 version = 'v2'
136 136 elif spec in _bundlespeccgversions:
137 137 if spec == 'packed1':
138 138 compression = 'none'
139 139 else:
140 140 compression = 'bzip2'
141 141 version = spec
142 142 else:
143 143 raise error.UnsupportedBundleSpecification(
144 144 _('%s is not a recognized bundle specification') % spec)
145 145
146 146 # The specification for packed1 can optionally declare the data formats
147 147 # required to apply it. If we see this metadata, compare against what the
148 148 # repo supports and error if the bundle isn't compatible.
149 149 if version == 'packed1' and 'requirements' in params:
150 150 requirements = set(params['requirements'].split(','))
151 151 missingreqs = requirements - repo.supportedformats
152 152 if missingreqs:
153 153 raise error.UnsupportedBundleSpecification(
154 154 _('missing support for repository features: %s') %
155 155 ', '.join(sorted(missingreqs)))
156 156
157 157 if not externalnames:
158 158 compression = _bundlespeccompressions[compression]
159 159 version = _bundlespeccgversions[version]
160 160 return compression, version, params
161 161
162 162 def readbundle(ui, fh, fname, vfs=None):
163 163 header = changegroup.readexactly(fh, 4)
164 164
165 165 alg = None
166 166 if not fname:
167 167 fname = "stream"
168 168 if not header.startswith('HG') and header.startswith('\0'):
169 169 fh = changegroup.headerlessfixup(fh, header)
170 170 header = "HG10"
171 171 alg = 'UN'
172 172 elif vfs:
173 173 fname = vfs.join(fname)
174 174
175 175 magic, version = header[0:2], header[2:4]
176 176
177 177 if magic != 'HG':
178 178 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
179 179 if version == '10':
180 180 if alg is None:
181 181 alg = changegroup.readexactly(fh, 2)
182 182 return changegroup.cg1unpacker(fh, alg)
183 183 elif version.startswith('2'):
184 184 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
185 185 elif version == 'S1':
186 186 return streamclone.streamcloneapplier(fh)
187 187 else:
188 188 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
189 189
190 def getbundlespec(ui, fh):
191 """Infer the bundlespec from a bundle file handle.
192
193 The input file handle is seeked and the original seek position is not
194 restored.
195 """
196 def speccompression(alg):
197 for k, v in _bundlespeccompressions.items():
198 if v == alg:
199 return k
200 return None
201
202 b = readbundle(ui, fh, None)
203 if isinstance(b, changegroup.cg1unpacker):
204 alg = b._type
205 if alg == '_truncatedBZ':
206 alg = 'BZ'
207 comp = speccompression(alg)
208 if not comp:
209 raise error.Abort(_('unknown compression algorithm: %s') % alg)
210 return '%s-v1' % comp
211 elif isinstance(b, bundle2.unbundle20):
212 if 'Compression' in b.params:
213 comp = speccompression(b.params['Compression'])
214 if not comp:
215 raise error.Abort(_('unknown compression algorithm: %s') % comp)
216 else:
217 comp = 'none'
218
219 version = None
220 for part in b.iterparts():
221 if part.type == 'changegroup':
222 version = part.params['version']
223 if version in ('01', '02'):
224 version = 'v2'
225 else:
226 raise error.Abort(_('changegroup version %s does not have '
227 'a known bundlespec') % version,
228 hint=_('try upgrading your Mercurial '
229 'client'))
230
231 if not version:
232 raise error.Abort(_('could not identify changegroup version in '
233 'bundle'))
234
235 return '%s-%s' % (comp, version)
236 elif isinstance(b, streamclone.streamcloneapplier):
237 requirements = streamclone.readbundle1header(fh)[2]
238 params = 'requirements=%s' % ','.join(sorted(requirements))
239 return 'none-packed1;%s' % urllib.quote(params)
240 else:
241 raise error.Abort(_('unknown bundle type: %s') % b)
242
190 243 def buildobsmarkerspart(bundler, markers):
191 244 """add an obsmarker part to the bundler with <markers>
192 245
193 246 No part is created if markers is empty.
194 247 Raises ValueError if the bundler doesn't support any known obsmarker format.
195 248 """
196 249 if markers:
197 250 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
198 251 version = obsolete.commonversion(remoteversions)
199 252 if version is None:
200 253 raise ValueError('bundler does not support common obsmarker format')
201 254 stream = obsolete.encodemarkers(markers, True, version=version)
202 255 return bundler.newpart('obsmarkers', data=stream)
203 256 return None
204 257
205 258 def _canusebundle2(op):
206 259 """return true if a pull/push can use bundle2
207 260
208 261 Feel free to nuke this function when we drop the experimental option"""
209 262 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
210 263 and op.remote.capable('bundle2'))
211 264
212 265
213 266 class pushoperation(object):
214 267 """A object that represent a single push operation
215 268
216 269 It purpose is to carry push related state and very common operation.
217 270
218 271 A new should be created at the beginning of each push and discarded
219 272 afterward.
220 273 """
221 274
222 275 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
223 276 bookmarks=()):
224 277 # repo we push from
225 278 self.repo = repo
226 279 self.ui = repo.ui
227 280 # repo we push to
228 281 self.remote = remote
229 282 # force option provided
230 283 self.force = force
231 284 # revs to be pushed (None is "all")
232 285 self.revs = revs
233 286 # bookmark explicitly pushed
234 287 self.bookmarks = bookmarks
235 288 # allow push of new branch
236 289 self.newbranch = newbranch
237 290 # did a local lock get acquired?
238 291 self.locallocked = None
239 292 # step already performed
240 293 # (used to check what steps have been already performed through bundle2)
241 294 self.stepsdone = set()
242 295 # Integer version of the changegroup push result
243 296 # - None means nothing to push
244 297 # - 0 means HTTP error
245 298 # - 1 means we pushed and remote head count is unchanged *or*
246 299 # we have outgoing changesets but refused to push
247 300 # - other values as described by addchangegroup()
248 301 self.cgresult = None
249 302 # Boolean value for the bookmark push
250 303 self.bkresult = None
251 304 # discover.outgoing object (contains common and outgoing data)
252 305 self.outgoing = None
253 306 # all remote heads before the push
254 307 self.remoteheads = None
255 308 # testable as a boolean indicating if any nodes are missing locally.
256 309 self.incoming = None
257 310 # phases changes that must be pushed along side the changesets
258 311 self.outdatedphases = None
259 312 # phases changes that must be pushed if changeset push fails
260 313 self.fallbackoutdatedphases = None
261 314 # outgoing obsmarkers
262 315 self.outobsmarkers = set()
263 316 # outgoing bookmarks
264 317 self.outbookmarks = []
265 318 # transaction manager
266 319 self.trmanager = None
267 320 # map { pushkey partid -> callback handling failure}
268 321 # used to handle exception from mandatory pushkey part failure
269 322 self.pkfailcb = {}
270 323
271 324 @util.propertycache
272 325 def futureheads(self):
273 326 """future remote heads if the changeset push succeeds"""
274 327 return self.outgoing.missingheads
275 328
276 329 @util.propertycache
277 330 def fallbackheads(self):
278 331 """future remote heads if the changeset push fails"""
279 332 if self.revs is None:
280 333 # not target to push, all common are relevant
281 334 return self.outgoing.commonheads
282 335 unfi = self.repo.unfiltered()
283 336 # I want cheads = heads(::missingheads and ::commonheads)
284 337 # (missingheads is revs with secret changeset filtered out)
285 338 #
286 339 # This can be expressed as:
287 340 # cheads = ( (missingheads and ::commonheads)
288 341 # + (commonheads and ::missingheads))"
289 342 # )
290 343 #
291 344 # while trying to push we already computed the following:
292 345 # common = (::commonheads)
293 346 # missing = ((commonheads::missingheads) - commonheads)
294 347 #
295 348 # We can pick:
296 349 # * missingheads part of common (::commonheads)
297 350 common = self.outgoing.common
298 351 nm = self.repo.changelog.nodemap
299 352 cheads = [node for node in self.revs if nm[node] in common]
300 353 # and
301 354 # * commonheads parents on missing
302 355 revset = unfi.set('%ln and parents(roots(%ln))',
303 356 self.outgoing.commonheads,
304 357 self.outgoing.missing)
305 358 cheads.extend(c.node() for c in revset)
306 359 return cheads
307 360
308 361 @property
309 362 def commonheads(self):
310 363 """set of all common heads after changeset bundle push"""
311 364 if self.cgresult:
312 365 return self.futureheads
313 366 else:
314 367 return self.fallbackheads
315 368
316 369 # mapping of message used when pushing bookmark
317 370 bookmsgmap = {'update': (_("updating bookmark %s\n"),
318 371 _('updating bookmark %s failed!\n')),
319 372 'export': (_("exporting bookmark %s\n"),
320 373 _('exporting bookmark %s failed!\n')),
321 374 'delete': (_("deleting remote bookmark %s\n"),
322 375 _('deleting remote bookmark %s failed!\n')),
323 376 }
324 377
325 378
326 379 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
327 380 opargs=None):
328 381 '''Push outgoing changesets (limited by revs) from a local
329 382 repository to remote. Return an integer:
330 383 - None means nothing to push
331 384 - 0 means HTTP error
332 385 - 1 means we pushed and remote head count is unchanged *or*
333 386 we have outgoing changesets but refused to push
334 387 - other values as described by addchangegroup()
335 388 '''
336 389 if opargs is None:
337 390 opargs = {}
338 391 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
339 392 **opargs)
340 393 if pushop.remote.local():
341 394 missing = (set(pushop.repo.requirements)
342 395 - pushop.remote.local().supported)
343 396 if missing:
344 397 msg = _("required features are not"
345 398 " supported in the destination:"
346 399 " %s") % (', '.join(sorted(missing)))
347 400 raise error.Abort(msg)
348 401
349 402 # there are two ways to push to remote repo:
350 403 #
351 404 # addchangegroup assumes local user can lock remote
352 405 # repo (local filesystem, old ssh servers).
353 406 #
354 407 # unbundle assumes local user cannot lock remote repo (new ssh
355 408 # servers, http servers).
356 409
357 410 if not pushop.remote.canpush():
358 411 raise error.Abort(_("destination does not support push"))
359 412 # get local lock as we might write phase data
360 413 localwlock = locallock = None
361 414 try:
362 415 # bundle2 push may receive a reply bundle touching bookmarks or other
363 416 # things requiring the wlock. Take it now to ensure proper ordering.
364 417 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
365 418 if _canusebundle2(pushop) and maypushback:
366 419 localwlock = pushop.repo.wlock()
367 420 locallock = pushop.repo.lock()
368 421 pushop.locallocked = True
369 422 except IOError as err:
370 423 pushop.locallocked = False
371 424 if err.errno != errno.EACCES:
372 425 raise
373 426 # source repo cannot be locked.
374 427 # We do not abort the push, but just disable the local phase
375 428 # synchronisation.
376 429 msg = 'cannot lock source repository: %s\n' % err
377 430 pushop.ui.debug(msg)
378 431 try:
379 432 if pushop.locallocked:
380 433 pushop.trmanager = transactionmanager(pushop.repo,
381 434 'push-response',
382 435 pushop.remote.url())
383 436 pushop.repo.checkpush(pushop)
384 437 lock = None
385 438 unbundle = pushop.remote.capable('unbundle')
386 439 if not unbundle:
387 440 lock = pushop.remote.lock()
388 441 try:
389 442 _pushdiscovery(pushop)
390 443 if _canusebundle2(pushop):
391 444 _pushbundle2(pushop)
392 445 _pushchangeset(pushop)
393 446 _pushsyncphase(pushop)
394 447 _pushobsolete(pushop)
395 448 _pushbookmark(pushop)
396 449 finally:
397 450 if lock is not None:
398 451 lock.release()
399 452 if pushop.trmanager:
400 453 pushop.trmanager.close()
401 454 finally:
402 455 if pushop.trmanager:
403 456 pushop.trmanager.release()
404 457 if locallock is not None:
405 458 locallock.release()
406 459 if localwlock is not None:
407 460 localwlock.release()
408 461
409 462 return pushop
410 463
411 464 # list of steps to perform discovery before push
412 465 pushdiscoveryorder = []
413 466
414 467 # Mapping between step name and function
415 468 #
416 469 # This exists to help extensions wrap steps if necessary
417 470 pushdiscoverymapping = {}
418 471
419 472 def pushdiscovery(stepname):
420 473 """decorator for function performing discovery before push
421 474
422 475 The function is added to the step -> function mapping and appended to the
423 476 list of steps. Beware that decorated function will be added in order (this
424 477 may matter).
425 478
426 479 You can only use this decorator for a new step, if you want to wrap a step
427 480 from an extension, change the pushdiscovery dictionary directly."""
428 481 def dec(func):
429 482 assert stepname not in pushdiscoverymapping
430 483 pushdiscoverymapping[stepname] = func
431 484 pushdiscoveryorder.append(stepname)
432 485 return func
433 486 return dec
434 487
435 488 def _pushdiscovery(pushop):
436 489 """Run all discovery steps"""
437 490 for stepname in pushdiscoveryorder:
438 491 step = pushdiscoverymapping[stepname]
439 492 step(pushop)
440 493
441 494 @pushdiscovery('changeset')
442 495 def _pushdiscoverychangeset(pushop):
443 496 """discover the changeset that need to be pushed"""
444 497 fci = discovery.findcommonincoming
445 498 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
446 499 common, inc, remoteheads = commoninc
447 500 fco = discovery.findcommonoutgoing
448 501 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
449 502 commoninc=commoninc, force=pushop.force)
450 503 pushop.outgoing = outgoing
451 504 pushop.remoteheads = remoteheads
452 505 pushop.incoming = inc
453 506
454 507 @pushdiscovery('phase')
455 508 def _pushdiscoveryphase(pushop):
456 509 """discover the phase that needs to be pushed
457 510
458 511 (computed for both success and failure case for changesets push)"""
459 512 outgoing = pushop.outgoing
460 513 unfi = pushop.repo.unfiltered()
461 514 remotephases = pushop.remote.listkeys('phases')
462 515 publishing = remotephases.get('publishing', False)
463 516 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
464 517 and remotephases # server supports phases
465 518 and not pushop.outgoing.missing # no changesets to be pushed
466 519 and publishing):
467 520 # When:
468 521 # - this is a subrepo push
469 522 # - and remote support phase
470 523 # - and no changeset are to be pushed
471 524 # - and remote is publishing
472 525 # We may be in issue 3871 case!
473 526 # We drop the possible phase synchronisation done by
474 527 # courtesy to publish changesets possibly locally draft
475 528 # on the remote.
476 529 remotephases = {'publishing': 'True'}
477 530 ana = phases.analyzeremotephases(pushop.repo,
478 531 pushop.fallbackheads,
479 532 remotephases)
480 533 pheads, droots = ana
481 534 extracond = ''
482 535 if not publishing:
483 536 extracond = ' and public()'
484 537 revset = 'heads((%%ln::%%ln) %s)' % extracond
485 538 # Get the list of all revs draft on remote by public here.
486 539 # XXX Beware that revset break if droots is not strictly
487 540 # XXX root we may want to ensure it is but it is costly
488 541 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
489 542 if not outgoing.missing:
490 543 future = fallback
491 544 else:
492 545 # adds changeset we are going to push as draft
493 546 #
494 547 # should not be necessary for publishing server, but because of an
495 548 # issue fixed in xxxxx we have to do it anyway.
496 549 fdroots = list(unfi.set('roots(%ln + %ln::)',
497 550 outgoing.missing, droots))
498 551 fdroots = [f.node() for f in fdroots]
499 552 future = list(unfi.set(revset, fdroots, pushop.futureheads))
500 553 pushop.outdatedphases = future
501 554 pushop.fallbackoutdatedphases = fallback
502 555
503 556 @pushdiscovery('obsmarker')
504 557 def _pushdiscoveryobsmarkers(pushop):
505 558 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
506 559 and pushop.repo.obsstore
507 560 and 'obsolete' in pushop.remote.listkeys('namespaces')):
508 561 repo = pushop.repo
509 562 # very naive computation, that can be quite expensive on big repo.
510 563 # However: evolution is currently slow on them anyway.
511 564 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
512 565 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
513 566
514 567 @pushdiscovery('bookmarks')
515 568 def _pushdiscoverybookmarks(pushop):
516 569 ui = pushop.ui
517 570 repo = pushop.repo.unfiltered()
518 571 remote = pushop.remote
519 572 ui.debug("checking for updated bookmarks\n")
520 573 ancestors = ()
521 574 if pushop.revs:
522 575 revnums = map(repo.changelog.rev, pushop.revs)
523 576 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
524 577 remotebookmark = remote.listkeys('bookmarks')
525 578
526 579 explicit = set(pushop.bookmarks)
527 580
528 581 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
529 582 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
530 583 for b, scid, dcid in advsrc:
531 584 if b in explicit:
532 585 explicit.remove(b)
533 586 if not ancestors or repo[scid].rev() in ancestors:
534 587 pushop.outbookmarks.append((b, dcid, scid))
535 588 # search added bookmark
536 589 for b, scid, dcid in addsrc:
537 590 if b in explicit:
538 591 explicit.remove(b)
539 592 pushop.outbookmarks.append((b, '', scid))
540 593 # search for overwritten bookmark
541 594 for b, scid, dcid in advdst + diverge + differ:
542 595 if b in explicit:
543 596 explicit.remove(b)
544 597 pushop.outbookmarks.append((b, dcid, scid))
545 598 # search for bookmark to delete
546 599 for b, scid, dcid in adddst:
547 600 if b in explicit:
548 601 explicit.remove(b)
549 602 # treat as "deleted locally"
550 603 pushop.outbookmarks.append((b, dcid, ''))
551 604 # identical bookmarks shouldn't get reported
552 605 for b, scid, dcid in same:
553 606 if b in explicit:
554 607 explicit.remove(b)
555 608
556 609 if explicit:
557 610 explicit = sorted(explicit)
558 611 # we should probably list all of them
559 612 ui.warn(_('bookmark %s does not exist on the local '
560 613 'or remote repository!\n') % explicit[0])
561 614 pushop.bkresult = 2
562 615
563 616 pushop.outbookmarks.sort()
564 617
565 618 def _pushcheckoutgoing(pushop):
566 619 outgoing = pushop.outgoing
567 620 unfi = pushop.repo.unfiltered()
568 621 if not outgoing.missing:
569 622 # nothing to push
570 623 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
571 624 return False
572 625 # something to push
573 626 if not pushop.force:
574 627 # if repo.obsstore == False --> no obsolete
575 628 # then, save the iteration
576 629 if unfi.obsstore:
577 630 # this message are here for 80 char limit reason
578 631 mso = _("push includes obsolete changeset: %s!")
579 632 mst = {"unstable": _("push includes unstable changeset: %s!"),
580 633 "bumped": _("push includes bumped changeset: %s!"),
581 634 "divergent": _("push includes divergent changeset: %s!")}
582 635 # If we are to push if there is at least one
583 636 # obsolete or unstable changeset in missing, at
584 637 # least one of the missinghead will be obsolete or
585 638 # unstable. So checking heads only is ok
586 639 for node in outgoing.missingheads:
587 640 ctx = unfi[node]
588 641 if ctx.obsolete():
589 642 raise error.Abort(mso % ctx)
590 643 elif ctx.troubled():
591 644 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
592 645
593 646 discovery.checkheads(pushop)
594 647 return True
595 648
596 649 # List of names of steps to perform for an outgoing bundle2, order matters.
597 650 b2partsgenorder = []
598 651
599 652 # Mapping between step name and function
600 653 #
601 654 # This exists to help extensions wrap steps if necessary
602 655 b2partsgenmapping = {}
603 656
604 657 def b2partsgenerator(stepname, idx=None):
605 658 """decorator for function generating bundle2 part
606 659
607 660 The function is added to the step -> function mapping and appended to the
608 661 list of steps. Beware that decorated functions will be added in order
609 662 (this may matter).
610 663
611 664 You can only use this decorator for new steps, if you want to wrap a step
612 665 from an extension, attack the b2partsgenmapping dictionary directly."""
613 666 def dec(func):
614 667 assert stepname not in b2partsgenmapping
615 668 b2partsgenmapping[stepname] = func
616 669 if idx is None:
617 670 b2partsgenorder.append(stepname)
618 671 else:
619 672 b2partsgenorder.insert(idx, stepname)
620 673 return func
621 674 return dec
622 675
623 676 def _pushb2ctxcheckheads(pushop, bundler):
624 677 """Generate race condition checking parts
625 678
626 679 Exists as an independent function to aid extensions
627 680 """
628 681 if not pushop.force:
629 682 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
630 683
631 684 @b2partsgenerator('changeset')
632 685 def _pushb2ctx(pushop, bundler):
633 686 """handle changegroup push through bundle2
634 687
635 688 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
636 689 """
637 690 if 'changesets' in pushop.stepsdone:
638 691 return
639 692 pushop.stepsdone.add('changesets')
640 693 # Send known heads to the server for race detection.
641 694 if not _pushcheckoutgoing(pushop):
642 695 return
643 696 pushop.repo.prepushoutgoinghooks(pushop.repo,
644 697 pushop.remote,
645 698 pushop.outgoing)
646 699
647 700 _pushb2ctxcheckheads(pushop, bundler)
648 701
649 702 b2caps = bundle2.bundle2caps(pushop.remote)
650 703 version = None
651 704 cgversions = b2caps.get('changegroup')
652 705 if not cgversions: # 3.1 and 3.2 ship with an empty value
653 706 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
654 707 pushop.outgoing)
655 708 else:
656 709 cgversions = [v for v in cgversions
657 710 if v in changegroup.supportedversions(pushop.repo)]
658 711 if not cgversions:
659 712 raise ValueError(_('no common changegroup version'))
660 713 version = max(cgversions)
661 714 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
662 715 pushop.outgoing,
663 716 version=version)
664 717 cgpart = bundler.newpart('changegroup', data=cg)
665 718 if version is not None:
666 719 cgpart.addparam('version', version)
667 720 def handlereply(op):
668 721 """extract addchangegroup returns from server reply"""
669 722 cgreplies = op.records.getreplies(cgpart.id)
670 723 assert len(cgreplies['changegroup']) == 1
671 724 pushop.cgresult = cgreplies['changegroup'][0]['return']
672 725 return handlereply
673 726
674 727 @b2partsgenerator('phase')
675 728 def _pushb2phases(pushop, bundler):
676 729 """handle phase push through bundle2"""
677 730 if 'phases' in pushop.stepsdone:
678 731 return
679 732 b2caps = bundle2.bundle2caps(pushop.remote)
680 733 if not 'pushkey' in b2caps:
681 734 return
682 735 pushop.stepsdone.add('phases')
683 736 part2node = []
684 737
685 738 def handlefailure(pushop, exc):
686 739 targetid = int(exc.partid)
687 740 for partid, node in part2node:
688 741 if partid == targetid:
689 742 raise error.Abort(_('updating %s to public failed') % node)
690 743
691 744 enc = pushkey.encode
692 745 for newremotehead in pushop.outdatedphases:
693 746 part = bundler.newpart('pushkey')
694 747 part.addparam('namespace', enc('phases'))
695 748 part.addparam('key', enc(newremotehead.hex()))
696 749 part.addparam('old', enc(str(phases.draft)))
697 750 part.addparam('new', enc(str(phases.public)))
698 751 part2node.append((part.id, newremotehead))
699 752 pushop.pkfailcb[part.id] = handlefailure
700 753
701 754 def handlereply(op):
702 755 for partid, node in part2node:
703 756 partrep = op.records.getreplies(partid)
704 757 results = partrep['pushkey']
705 758 assert len(results) <= 1
706 759 msg = None
707 760 if not results:
708 761 msg = _('server ignored update of %s to public!\n') % node
709 762 elif not int(results[0]['return']):
710 763 msg = _('updating %s to public failed!\n') % node
711 764 if msg is not None:
712 765 pushop.ui.warn(msg)
713 766 return handlereply
714 767
715 768 @b2partsgenerator('obsmarkers')
716 769 def _pushb2obsmarkers(pushop, bundler):
717 770 if 'obsmarkers' in pushop.stepsdone:
718 771 return
719 772 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
720 773 if obsolete.commonversion(remoteversions) is None:
721 774 return
722 775 pushop.stepsdone.add('obsmarkers')
723 776 if pushop.outobsmarkers:
724 777 markers = sorted(pushop.outobsmarkers)
725 778 buildobsmarkerspart(bundler, markers)
726 779
727 780 @b2partsgenerator('bookmarks')
728 781 def _pushb2bookmarks(pushop, bundler):
729 782 """handle bookmark push through bundle2"""
730 783 if 'bookmarks' in pushop.stepsdone:
731 784 return
732 785 b2caps = bundle2.bundle2caps(pushop.remote)
733 786 if 'pushkey' not in b2caps:
734 787 return
735 788 pushop.stepsdone.add('bookmarks')
736 789 part2book = []
737 790 enc = pushkey.encode
738 791
739 792 def handlefailure(pushop, exc):
740 793 targetid = int(exc.partid)
741 794 for partid, book, action in part2book:
742 795 if partid == targetid:
743 796 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
744 797 # we should not be called for part we did not generated
745 798 assert False
746 799
747 800 for book, old, new in pushop.outbookmarks:
748 801 part = bundler.newpart('pushkey')
749 802 part.addparam('namespace', enc('bookmarks'))
750 803 part.addparam('key', enc(book))
751 804 part.addparam('old', enc(old))
752 805 part.addparam('new', enc(new))
753 806 action = 'update'
754 807 if not old:
755 808 action = 'export'
756 809 elif not new:
757 810 action = 'delete'
758 811 part2book.append((part.id, book, action))
759 812 pushop.pkfailcb[part.id] = handlefailure
760 813
761 814 def handlereply(op):
762 815 ui = pushop.ui
763 816 for partid, book, action in part2book:
764 817 partrep = op.records.getreplies(partid)
765 818 results = partrep['pushkey']
766 819 assert len(results) <= 1
767 820 if not results:
768 821 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
769 822 else:
770 823 ret = int(results[0]['return'])
771 824 if ret:
772 825 ui.status(bookmsgmap[action][0] % book)
773 826 else:
774 827 ui.warn(bookmsgmap[action][1] % book)
775 828 if pushop.bkresult is not None:
776 829 pushop.bkresult = 1
777 830 return handlereply
778 831
779 832
780 833 def _pushbundle2(pushop):
781 834 """push data to the remote using bundle2
782 835
783 836 The only currently supported type of data is changegroup but this will
784 837 evolve in the future."""
785 838 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
786 839 pushback = (pushop.trmanager
787 840 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
788 841
789 842 # create reply capability
790 843 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
791 844 allowpushback=pushback))
792 845 bundler.newpart('replycaps', data=capsblob)
793 846 replyhandlers = []
794 847 for partgenname in b2partsgenorder:
795 848 partgen = b2partsgenmapping[partgenname]
796 849 ret = partgen(pushop, bundler)
797 850 if callable(ret):
798 851 replyhandlers.append(ret)
799 852 # do not push if nothing to push
800 853 if bundler.nbparts <= 1:
801 854 return
802 855 stream = util.chunkbuffer(bundler.getchunks())
803 856 try:
804 857 try:
805 858 reply = pushop.remote.unbundle(stream, ['force'], 'push')
806 859 except error.BundleValueError as exc:
807 860 raise error.Abort('missing support for %s' % exc)
808 861 try:
809 862 trgetter = None
810 863 if pushback:
811 864 trgetter = pushop.trmanager.transaction
812 865 op = bundle2.processbundle(pushop.repo, reply, trgetter)
813 866 except error.BundleValueError as exc:
814 867 raise error.Abort('missing support for %s' % exc)
815 868 except bundle2.AbortFromPart as exc:
816 869 pushop.ui.status(_('remote: %s\n') % exc)
817 870 raise error.Abort(_('push failed on remote'), hint=exc.hint)
818 871 except error.PushkeyFailed as exc:
819 872 partid = int(exc.partid)
820 873 if partid not in pushop.pkfailcb:
821 874 raise
822 875 pushop.pkfailcb[partid](pushop, exc)
823 876 for rephand in replyhandlers:
824 877 rephand(op)
825 878
826 879 def _pushchangeset(pushop):
827 880 """Make the actual push of changeset bundle to remote repo"""
828 881 if 'changesets' in pushop.stepsdone:
829 882 return
830 883 pushop.stepsdone.add('changesets')
831 884 if not _pushcheckoutgoing(pushop):
832 885 return
833 886 pushop.repo.prepushoutgoinghooks(pushop.repo,
834 887 pushop.remote,
835 888 pushop.outgoing)
836 889 outgoing = pushop.outgoing
837 890 unbundle = pushop.remote.capable('unbundle')
838 891 # TODO: get bundlecaps from remote
839 892 bundlecaps = None
840 893 # create a changegroup from local
841 894 if pushop.revs is None and not (outgoing.excluded
842 895 or pushop.repo.changelog.filteredrevs):
843 896 # push everything,
844 897 # use the fast path, no race possible on push
845 898 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
846 899 cg = changegroup.getsubset(pushop.repo,
847 900 outgoing,
848 901 bundler,
849 902 'push',
850 903 fastpath=True)
851 904 else:
852 905 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
853 906 bundlecaps)
854 907
855 908 # apply changegroup to remote
856 909 if unbundle:
857 910 # local repo finds heads on server, finds out what
858 911 # revs it must push. once revs transferred, if server
859 912 # finds it has different heads (someone else won
860 913 # commit/push race), server aborts.
861 914 if pushop.force:
862 915 remoteheads = ['force']
863 916 else:
864 917 remoteheads = pushop.remoteheads
865 918 # ssh: return remote's addchangegroup()
866 919 # http: return remote's addchangegroup() or 0 for error
867 920 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
868 921 pushop.repo.url())
869 922 else:
870 923 # we return an integer indicating remote head count
871 924 # change
872 925 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
873 926 pushop.repo.url())
874 927
875 928 def _pushsyncphase(pushop):
876 929 """synchronise phase information locally and remotely"""
877 930 cheads = pushop.commonheads
878 931 # even when we don't push, exchanging phase data is useful
879 932 remotephases = pushop.remote.listkeys('phases')
880 933 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
881 934 and remotephases # server supports phases
882 935 and pushop.cgresult is None # nothing was pushed
883 936 and remotephases.get('publishing', False)):
884 937 # When:
885 938 # - this is a subrepo push
886 939 # - and remote support phase
887 940 # - and no changeset was pushed
888 941 # - and remote is publishing
889 942 # We may be in issue 3871 case!
890 943 # We drop the possible phase synchronisation done by
891 944 # courtesy to publish changesets possibly locally draft
892 945 # on the remote.
893 946 remotephases = {'publishing': 'True'}
894 947 if not remotephases: # old server or public only reply from non-publishing
895 948 _localphasemove(pushop, cheads)
896 949 # don't push any phase data as there is nothing to push
897 950 else:
898 951 ana = phases.analyzeremotephases(pushop.repo, cheads,
899 952 remotephases)
900 953 pheads, droots = ana
901 954 ### Apply remote phase on local
902 955 if remotephases.get('publishing', False):
903 956 _localphasemove(pushop, cheads)
904 957 else: # publish = False
905 958 _localphasemove(pushop, pheads)
906 959 _localphasemove(pushop, cheads, phases.draft)
907 960 ### Apply local phase on remote
908 961
909 962 if pushop.cgresult:
910 963 if 'phases' in pushop.stepsdone:
911 964 # phases already pushed though bundle2
912 965 return
913 966 outdated = pushop.outdatedphases
914 967 else:
915 968 outdated = pushop.fallbackoutdatedphases
916 969
917 970 pushop.stepsdone.add('phases')
918 971
919 972 # filter heads already turned public by the push
920 973 outdated = [c for c in outdated if c.node() not in pheads]
921 974 # fallback to independent pushkey command
922 975 for newremotehead in outdated:
923 976 r = pushop.remote.pushkey('phases',
924 977 newremotehead.hex(),
925 978 str(phases.draft),
926 979 str(phases.public))
927 980 if not r:
928 981 pushop.ui.warn(_('updating %s to public failed!\n')
929 982 % newremotehead)
930 983
931 984 def _localphasemove(pushop, nodes, phase=phases.public):
932 985 """move <nodes> to <phase> in the local source repo"""
933 986 if pushop.trmanager:
934 987 phases.advanceboundary(pushop.repo,
935 988 pushop.trmanager.transaction(),
936 989 phase,
937 990 nodes)
938 991 else:
939 992 # repo is not locked, do not change any phases!
940 993 # Informs the user that phases should have been moved when
941 994 # applicable.
942 995 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
943 996 phasestr = phases.phasenames[phase]
944 997 if actualmoves:
945 998 pushop.ui.status(_('cannot lock source repo, skipping '
946 999 'local %s phase update\n') % phasestr)
947 1000
948 1001 def _pushobsolete(pushop):
949 1002 """utility function to push obsolete markers to a remote"""
950 1003 if 'obsmarkers' in pushop.stepsdone:
951 1004 return
952 1005 repo = pushop.repo
953 1006 remote = pushop.remote
954 1007 pushop.stepsdone.add('obsmarkers')
955 1008 if pushop.outobsmarkers:
956 1009 pushop.ui.debug('try to push obsolete markers to remote\n')
957 1010 rslts = []
958 1011 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
959 1012 for key in sorted(remotedata, reverse=True):
960 1013 # reverse sort to ensure we end with dump0
961 1014 data = remotedata[key]
962 1015 rslts.append(remote.pushkey('obsolete', key, '', data))
963 1016 if [r for r in rslts if not r]:
964 1017 msg = _('failed to push some obsolete markers!\n')
965 1018 repo.ui.warn(msg)
966 1019
967 1020 def _pushbookmark(pushop):
968 1021 """Update bookmark position on remote"""
969 1022 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
970 1023 return
971 1024 pushop.stepsdone.add('bookmarks')
972 1025 ui = pushop.ui
973 1026 remote = pushop.remote
974 1027
975 1028 for b, old, new in pushop.outbookmarks:
976 1029 action = 'update'
977 1030 if not old:
978 1031 action = 'export'
979 1032 elif not new:
980 1033 action = 'delete'
981 1034 if remote.pushkey('bookmarks', b, old, new):
982 1035 ui.status(bookmsgmap[action][0] % b)
983 1036 else:
984 1037 ui.warn(bookmsgmap[action][1] % b)
985 1038 # discovery can have set the value form invalid entry
986 1039 if pushop.bkresult is not None:
987 1040 pushop.bkresult = 1
988 1041
989 1042 class pulloperation(object):
990 1043 """A object that represent a single pull operation
991 1044
992 1045 It purpose is to carry pull related state and very common operation.
993 1046
994 1047 A new should be created at the beginning of each pull and discarded
995 1048 afterward.
996 1049 """
997 1050
998 1051 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
999 1052 remotebookmarks=None, streamclonerequested=None):
1000 1053 # repo we pull into
1001 1054 self.repo = repo
1002 1055 # repo we pull from
1003 1056 self.remote = remote
1004 1057 # revision we try to pull (None is "all")
1005 1058 self.heads = heads
1006 1059 # bookmark pulled explicitly
1007 1060 self.explicitbookmarks = bookmarks
1008 1061 # do we force pull?
1009 1062 self.force = force
1010 1063 # whether a streaming clone was requested
1011 1064 self.streamclonerequested = streamclonerequested
1012 1065 # transaction manager
1013 1066 self.trmanager = None
1014 1067 # set of common changeset between local and remote before pull
1015 1068 self.common = None
1016 1069 # set of pulled head
1017 1070 self.rheads = None
1018 1071 # list of missing changeset to fetch remotely
1019 1072 self.fetch = None
1020 1073 # remote bookmarks data
1021 1074 self.remotebookmarks = remotebookmarks
1022 1075 # result of changegroup pulling (used as return code by pull)
1023 1076 self.cgresult = None
1024 1077 # list of step already done
1025 1078 self.stepsdone = set()
1026 1079 # Whether we attempted a clone from pre-generated bundles.
1027 1080 self.clonebundleattempted = False
1028 1081
1029 1082 @util.propertycache
1030 1083 def pulledsubset(self):
1031 1084 """heads of the set of changeset target by the pull"""
1032 1085 # compute target subset
1033 1086 if self.heads is None:
1034 1087 # We pulled every thing possible
1035 1088 # sync on everything common
1036 1089 c = set(self.common)
1037 1090 ret = list(self.common)
1038 1091 for n in self.rheads:
1039 1092 if n not in c:
1040 1093 ret.append(n)
1041 1094 return ret
1042 1095 else:
1043 1096 # We pulled a specific subset
1044 1097 # sync on this subset
1045 1098 return self.heads
1046 1099
1047 1100 @util.propertycache
1048 1101 def canusebundle2(self):
1049 1102 return _canusebundle2(self)
1050 1103
1051 1104 @util.propertycache
1052 1105 def remotebundle2caps(self):
1053 1106 return bundle2.bundle2caps(self.remote)
1054 1107
1055 1108 def gettransaction(self):
1056 1109 # deprecated; talk to trmanager directly
1057 1110 return self.trmanager.transaction()
1058 1111
1059 1112 class transactionmanager(object):
1060 1113 """An object to manage the life cycle of a transaction
1061 1114
1062 1115 It creates the transaction on demand and calls the appropriate hooks when
1063 1116 closing the transaction."""
1064 1117 def __init__(self, repo, source, url):
1065 1118 self.repo = repo
1066 1119 self.source = source
1067 1120 self.url = url
1068 1121 self._tr = None
1069 1122
1070 1123 def transaction(self):
1071 1124 """Return an open transaction object, constructing if necessary"""
1072 1125 if not self._tr:
1073 1126 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1074 1127 self._tr = self.repo.transaction(trname)
1075 1128 self._tr.hookargs['source'] = self.source
1076 1129 self._tr.hookargs['url'] = self.url
1077 1130 return self._tr
1078 1131
1079 1132 def close(self):
1080 1133 """close transaction if created"""
1081 1134 if self._tr is not None:
1082 1135 self._tr.close()
1083 1136
1084 1137 def release(self):
1085 1138 """release transaction if created"""
1086 1139 if self._tr is not None:
1087 1140 self._tr.release()
1088 1141
1089 1142 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1090 1143 streamclonerequested=None):
1091 1144 """Fetch repository data from a remote.
1092 1145
1093 1146 This is the main function used to retrieve data from a remote repository.
1094 1147
1095 1148 ``repo`` is the local repository to clone into.
1096 1149 ``remote`` is a peer instance.
1097 1150 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1098 1151 default) means to pull everything from the remote.
1099 1152 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1100 1153 default, all remote bookmarks are pulled.
1101 1154 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1102 1155 initialization.
1103 1156 ``streamclonerequested`` is a boolean indicating whether a "streaming
1104 1157 clone" is requested. A "streaming clone" is essentially a raw file copy
1105 1158 of revlogs from the server. This only works when the local repository is
1106 1159 empty. The default value of ``None`` means to respect the server
1107 1160 configuration for preferring stream clones.
1108 1161
1109 1162 Returns the ``pulloperation`` created for this pull.
1110 1163 """
1111 1164 if opargs is None:
1112 1165 opargs = {}
1113 1166 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1114 1167 streamclonerequested=streamclonerequested, **opargs)
1115 1168 if pullop.remote.local():
1116 1169 missing = set(pullop.remote.requirements) - pullop.repo.supported
1117 1170 if missing:
1118 1171 msg = _("required features are not"
1119 1172 " supported in the destination:"
1120 1173 " %s") % (', '.join(sorted(missing)))
1121 1174 raise error.Abort(msg)
1122 1175
1123 1176 lock = pullop.repo.lock()
1124 1177 try:
1125 1178 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1126 1179 streamclone.maybeperformlegacystreamclone(pullop)
1127 1180 # This should ideally be in _pullbundle2(). However, it needs to run
1128 1181 # before discovery to avoid extra work.
1129 1182 _maybeapplyclonebundle(pullop)
1130 1183 _pulldiscovery(pullop)
1131 1184 if pullop.canusebundle2:
1132 1185 _pullbundle2(pullop)
1133 1186 _pullchangeset(pullop)
1134 1187 _pullphase(pullop)
1135 1188 _pullbookmarks(pullop)
1136 1189 _pullobsolete(pullop)
1137 1190 pullop.trmanager.close()
1138 1191 finally:
1139 1192 pullop.trmanager.release()
1140 1193 lock.release()
1141 1194
1142 1195 return pullop
1143 1196
1144 1197 # list of steps to perform discovery before pull
1145 1198 pulldiscoveryorder = []
1146 1199
1147 1200 # Mapping between step name and function
1148 1201 #
1149 1202 # This exists to help extensions wrap steps if necessary
1150 1203 pulldiscoverymapping = {}
1151 1204
1152 1205 def pulldiscovery(stepname):
1153 1206 """decorator for function performing discovery before pull
1154 1207
1155 1208 The function is added to the step -> function mapping and appended to the
1156 1209 list of steps. Beware that decorated function will be added in order (this
1157 1210 may matter).
1158 1211
1159 1212 You can only use this decorator for a new step, if you want to wrap a step
1160 1213 from an extension, change the pulldiscovery dictionary directly."""
1161 1214 def dec(func):
1162 1215 assert stepname not in pulldiscoverymapping
1163 1216 pulldiscoverymapping[stepname] = func
1164 1217 pulldiscoveryorder.append(stepname)
1165 1218 return func
1166 1219 return dec
1167 1220
1168 1221 def _pulldiscovery(pullop):
1169 1222 """Run all discovery steps"""
1170 1223 for stepname in pulldiscoveryorder:
1171 1224 step = pulldiscoverymapping[stepname]
1172 1225 step(pullop)
1173 1226
1174 1227 @pulldiscovery('b1:bookmarks')
1175 1228 def _pullbookmarkbundle1(pullop):
1176 1229 """fetch bookmark data in bundle1 case
1177 1230
1178 1231 If not using bundle2, we have to fetch bookmarks before changeset
1179 1232 discovery to reduce the chance and impact of race conditions."""
1180 1233 if pullop.remotebookmarks is not None:
1181 1234 return
1182 1235 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1183 1236 # all known bundle2 servers now support listkeys, but lets be nice with
1184 1237 # new implementation.
1185 1238 return
1186 1239 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1187 1240
1188 1241
1189 1242 @pulldiscovery('changegroup')
1190 1243 def _pulldiscoverychangegroup(pullop):
1191 1244 """discovery phase for the pull
1192 1245
1193 1246 Current handle changeset discovery only, will change handle all discovery
1194 1247 at some point."""
1195 1248 tmp = discovery.findcommonincoming(pullop.repo,
1196 1249 pullop.remote,
1197 1250 heads=pullop.heads,
1198 1251 force=pullop.force)
1199 1252 common, fetch, rheads = tmp
1200 1253 nm = pullop.repo.unfiltered().changelog.nodemap
1201 1254 if fetch and rheads:
1202 1255 # If a remote heads in filtered locally, lets drop it from the unknown
1203 1256 # remote heads and put in back in common.
1204 1257 #
1205 1258 # This is a hackish solution to catch most of "common but locally
1206 1259 # hidden situation". We do not performs discovery on unfiltered
1207 1260 # repository because it end up doing a pathological amount of round
1208 1261 # trip for w huge amount of changeset we do not care about.
1209 1262 #
1210 1263 # If a set of such "common but filtered" changeset exist on the server
1211 1264 # but are not including a remote heads, we'll not be able to detect it,
1212 1265 scommon = set(common)
1213 1266 filteredrheads = []
1214 1267 for n in rheads:
1215 1268 if n in nm:
1216 1269 if n not in scommon:
1217 1270 common.append(n)
1218 1271 else:
1219 1272 filteredrheads.append(n)
1220 1273 if not filteredrheads:
1221 1274 fetch = []
1222 1275 rheads = filteredrheads
1223 1276 pullop.common = common
1224 1277 pullop.fetch = fetch
1225 1278 pullop.rheads = rheads
1226 1279
1227 1280 def _pullbundle2(pullop):
1228 1281 """pull data using bundle2
1229 1282
1230 1283 For now, the only supported data are changegroup."""
1231 1284 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1232 1285
1233 1286 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1234 1287
1235 1288 # pulling changegroup
1236 1289 pullop.stepsdone.add('changegroup')
1237 1290
1238 1291 kwargs['common'] = pullop.common
1239 1292 kwargs['heads'] = pullop.heads or pullop.rheads
1240 1293 kwargs['cg'] = pullop.fetch
1241 1294 if 'listkeys' in pullop.remotebundle2caps:
1242 1295 kwargs['listkeys'] = ['phase']
1243 1296 if pullop.remotebookmarks is None:
1244 1297 # make sure to always includes bookmark data when migrating
1245 1298 # `hg incoming --bundle` to using this function.
1246 1299 kwargs['listkeys'].append('bookmarks')
1247 1300
1248 1301 # If this is a full pull / clone and the server supports the clone bundles
1249 1302 # feature, tell the server whether we attempted a clone bundle. The
1250 1303 # presence of this flag indicates the client supports clone bundles. This
1251 1304 # will enable the server to treat clients that support clone bundles
1252 1305 # differently from those that don't.
1253 1306 if (pullop.remote.capable('clonebundles')
1254 1307 and pullop.heads is None and list(pullop.common) == [nullid]):
1255 1308 kwargs['cbattempted'] = pullop.clonebundleattempted
1256 1309
1257 1310 if streaming:
1258 1311 pullop.repo.ui.status(_('streaming all changes\n'))
1259 1312 elif not pullop.fetch:
1260 1313 pullop.repo.ui.status(_("no changes found\n"))
1261 1314 pullop.cgresult = 0
1262 1315 else:
1263 1316 if pullop.heads is None and list(pullop.common) == [nullid]:
1264 1317 pullop.repo.ui.status(_("requesting all changes\n"))
1265 1318 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1266 1319 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1267 1320 if obsolete.commonversion(remoteversions) is not None:
1268 1321 kwargs['obsmarkers'] = True
1269 1322 pullop.stepsdone.add('obsmarkers')
1270 1323 _pullbundle2extraprepare(pullop, kwargs)
1271 1324 bundle = pullop.remote.getbundle('pull', **kwargs)
1272 1325 try:
1273 1326 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1274 1327 except error.BundleValueError as exc:
1275 1328 raise error.Abort('missing support for %s' % exc)
1276 1329
1277 1330 if pullop.fetch:
1278 1331 results = [cg['return'] for cg in op.records['changegroup']]
1279 1332 pullop.cgresult = changegroup.combineresults(results)
1280 1333
1281 1334 # processing phases change
1282 1335 for namespace, value in op.records['listkeys']:
1283 1336 if namespace == 'phases':
1284 1337 _pullapplyphases(pullop, value)
1285 1338
1286 1339 # processing bookmark update
1287 1340 for namespace, value in op.records['listkeys']:
1288 1341 if namespace == 'bookmarks':
1289 1342 pullop.remotebookmarks = value
1290 1343
1291 1344 # bookmark data were either already there or pulled in the bundle
1292 1345 if pullop.remotebookmarks is not None:
1293 1346 _pullbookmarks(pullop)
1294 1347
1295 1348 def _pullbundle2extraprepare(pullop, kwargs):
1296 1349 """hook function so that extensions can extend the getbundle call"""
1297 1350 pass
1298 1351
1299 1352 def _pullchangeset(pullop):
1300 1353 """pull changeset from unbundle into the local repo"""
1301 1354 # We delay the open of the transaction as late as possible so we
1302 1355 # don't open transaction for nothing or you break future useful
1303 1356 # rollback call
1304 1357 if 'changegroup' in pullop.stepsdone:
1305 1358 return
1306 1359 pullop.stepsdone.add('changegroup')
1307 1360 if not pullop.fetch:
1308 1361 pullop.repo.ui.status(_("no changes found\n"))
1309 1362 pullop.cgresult = 0
1310 1363 return
1311 1364 pullop.gettransaction()
1312 1365 if pullop.heads is None and list(pullop.common) == [nullid]:
1313 1366 pullop.repo.ui.status(_("requesting all changes\n"))
1314 1367 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1315 1368 # issue1320, avoid a race if remote changed after discovery
1316 1369 pullop.heads = pullop.rheads
1317 1370
1318 1371 if pullop.remote.capable('getbundle'):
1319 1372 # TODO: get bundlecaps from remote
1320 1373 cg = pullop.remote.getbundle('pull', common=pullop.common,
1321 1374 heads=pullop.heads or pullop.rheads)
1322 1375 elif pullop.heads is None:
1323 1376 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1324 1377 elif not pullop.remote.capable('changegroupsubset'):
1325 1378 raise error.Abort(_("partial pull cannot be done because "
1326 1379 "other repository doesn't support "
1327 1380 "changegroupsubset."))
1328 1381 else:
1329 1382 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1330 1383 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1331 1384
1332 1385 def _pullphase(pullop):
1333 1386 # Get remote phases data from remote
1334 1387 if 'phases' in pullop.stepsdone:
1335 1388 return
1336 1389 remotephases = pullop.remote.listkeys('phases')
1337 1390 _pullapplyphases(pullop, remotephases)
1338 1391
1339 1392 def _pullapplyphases(pullop, remotephases):
1340 1393 """apply phase movement from observed remote state"""
1341 1394 if 'phases' in pullop.stepsdone:
1342 1395 return
1343 1396 pullop.stepsdone.add('phases')
1344 1397 publishing = bool(remotephases.get('publishing', False))
1345 1398 if remotephases and not publishing:
1346 1399 # remote is new and unpublishing
1347 1400 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1348 1401 pullop.pulledsubset,
1349 1402 remotephases)
1350 1403 dheads = pullop.pulledsubset
1351 1404 else:
1352 1405 # Remote is old or publishing all common changesets
1353 1406 # should be seen as public
1354 1407 pheads = pullop.pulledsubset
1355 1408 dheads = []
1356 1409 unfi = pullop.repo.unfiltered()
1357 1410 phase = unfi._phasecache.phase
1358 1411 rev = unfi.changelog.nodemap.get
1359 1412 public = phases.public
1360 1413 draft = phases.draft
1361 1414
1362 1415 # exclude changesets already public locally and update the others
1363 1416 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1364 1417 if pheads:
1365 1418 tr = pullop.gettransaction()
1366 1419 phases.advanceboundary(pullop.repo, tr, public, pheads)
1367 1420
1368 1421 # exclude changesets already draft locally and update the others
1369 1422 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1370 1423 if dheads:
1371 1424 tr = pullop.gettransaction()
1372 1425 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1373 1426
1374 1427 def _pullbookmarks(pullop):
1375 1428 """process the remote bookmark information to update the local one"""
1376 1429 if 'bookmarks' in pullop.stepsdone:
1377 1430 return
1378 1431 pullop.stepsdone.add('bookmarks')
1379 1432 repo = pullop.repo
1380 1433 remotebookmarks = pullop.remotebookmarks
1381 1434 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1382 1435 pullop.remote.url(),
1383 1436 pullop.gettransaction,
1384 1437 explicit=pullop.explicitbookmarks)
1385 1438
1386 1439 def _pullobsolete(pullop):
1387 1440 """utility function to pull obsolete markers from a remote
1388 1441
1389 1442 The `gettransaction` is function that return the pull transaction, creating
1390 1443 one if necessary. We return the transaction to inform the calling code that
1391 1444 a new transaction have been created (when applicable).
1392 1445
1393 1446 Exists mostly to allow overriding for experimentation purpose"""
1394 1447 if 'obsmarkers' in pullop.stepsdone:
1395 1448 return
1396 1449 pullop.stepsdone.add('obsmarkers')
1397 1450 tr = None
1398 1451 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1399 1452 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1400 1453 remoteobs = pullop.remote.listkeys('obsolete')
1401 1454 if 'dump0' in remoteobs:
1402 1455 tr = pullop.gettransaction()
1403 1456 markers = []
1404 1457 for key in sorted(remoteobs, reverse=True):
1405 1458 if key.startswith('dump'):
1406 1459 data = base85.b85decode(remoteobs[key])
1407 1460 version, newmarks = obsolete._readmarkers(data)
1408 1461 markers += newmarks
1409 1462 if markers:
1410 1463 pullop.repo.obsstore.add(tr, markers)
1411 1464 pullop.repo.invalidatevolatilesets()
1412 1465 return tr
1413 1466
1414 1467 def caps20to10(repo):
1415 1468 """return a set with appropriate options to use bundle20 during getbundle"""
1416 1469 caps = set(['HG20'])
1417 1470 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1418 1471 caps.add('bundle2=' + urllib.quote(capsblob))
1419 1472 return caps
1420 1473
1421 1474 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1422 1475 getbundle2partsorder = []
1423 1476
1424 1477 # Mapping between step name and function
1425 1478 #
1426 1479 # This exists to help extensions wrap steps if necessary
1427 1480 getbundle2partsmapping = {}
1428 1481
1429 1482 def getbundle2partsgenerator(stepname, idx=None):
1430 1483 """decorator for function generating bundle2 part for getbundle
1431 1484
1432 1485 The function is added to the step -> function mapping and appended to the
1433 1486 list of steps. Beware that decorated functions will be added in order
1434 1487 (this may matter).
1435 1488
1436 1489 You can only use this decorator for new steps, if you want to wrap a step
1437 1490 from an extension, attack the getbundle2partsmapping dictionary directly."""
1438 1491 def dec(func):
1439 1492 assert stepname not in getbundle2partsmapping
1440 1493 getbundle2partsmapping[stepname] = func
1441 1494 if idx is None:
1442 1495 getbundle2partsorder.append(stepname)
1443 1496 else:
1444 1497 getbundle2partsorder.insert(idx, stepname)
1445 1498 return func
1446 1499 return dec
1447 1500
1448 1501 def bundle2requested(bundlecaps):
1449 1502 if bundlecaps is not None:
1450 1503 return any(cap.startswith('HG2') for cap in bundlecaps)
1451 1504 return False
1452 1505
1453 1506 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1454 1507 **kwargs):
1455 1508 """return a full bundle (with potentially multiple kind of parts)
1456 1509
1457 1510 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1458 1511 passed. For now, the bundle can contain only changegroup, but this will
1459 1512 changes when more part type will be available for bundle2.
1460 1513
1461 1514 This is different from changegroup.getchangegroup that only returns an HG10
1462 1515 changegroup bundle. They may eventually get reunited in the future when we
1463 1516 have a clearer idea of the API we what to query different data.
1464 1517
1465 1518 The implementation is at a very early stage and will get massive rework
1466 1519 when the API of bundle is refined.
1467 1520 """
1468 1521 usebundle2 = bundle2requested(bundlecaps)
1469 1522 # bundle10 case
1470 1523 if not usebundle2:
1471 1524 if bundlecaps and not kwargs.get('cg', True):
1472 1525 raise ValueError(_('request for bundle10 must include changegroup'))
1473 1526
1474 1527 if kwargs:
1475 1528 raise ValueError(_('unsupported getbundle arguments: %s')
1476 1529 % ', '.join(sorted(kwargs.keys())))
1477 1530 return changegroup.getchangegroup(repo, source, heads=heads,
1478 1531 common=common, bundlecaps=bundlecaps)
1479 1532
1480 1533 # bundle20 case
1481 1534 b2caps = {}
1482 1535 for bcaps in bundlecaps:
1483 1536 if bcaps.startswith('bundle2='):
1484 1537 blob = urllib.unquote(bcaps[len('bundle2='):])
1485 1538 b2caps.update(bundle2.decodecaps(blob))
1486 1539 bundler = bundle2.bundle20(repo.ui, b2caps)
1487 1540
1488 1541 kwargs['heads'] = heads
1489 1542 kwargs['common'] = common
1490 1543
1491 1544 for name in getbundle2partsorder:
1492 1545 func = getbundle2partsmapping[name]
1493 1546 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1494 1547 **kwargs)
1495 1548
1496 1549 return util.chunkbuffer(bundler.getchunks())
1497 1550
1498 1551 @getbundle2partsgenerator('changegroup')
1499 1552 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1500 1553 b2caps=None, heads=None, common=None, **kwargs):
1501 1554 """add a changegroup part to the requested bundle"""
1502 1555 cg = None
1503 1556 if kwargs.get('cg', True):
1504 1557 # build changegroup bundle here.
1505 1558 version = None
1506 1559 cgversions = b2caps.get('changegroup')
1507 1560 getcgkwargs = {}
1508 1561 if cgversions: # 3.1 and 3.2 ship with an empty value
1509 1562 cgversions = [v for v in cgversions
1510 1563 if v in changegroup.supportedversions(repo)]
1511 1564 if not cgversions:
1512 1565 raise ValueError(_('no common changegroup version'))
1513 1566 version = getcgkwargs['version'] = max(cgversions)
1514 1567 outgoing = changegroup.computeoutgoing(repo, heads, common)
1515 1568 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1516 1569 bundlecaps=bundlecaps,
1517 1570 **getcgkwargs)
1518 1571
1519 1572 if cg:
1520 1573 part = bundler.newpart('changegroup', data=cg)
1521 1574 if version is not None:
1522 1575 part.addparam('version', version)
1523 1576 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1524 1577 if 'treemanifest' in repo.requirements:
1525 1578 part.addparam('treemanifest', '1')
1526 1579
1527 1580 @getbundle2partsgenerator('listkeys')
1528 1581 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1529 1582 b2caps=None, **kwargs):
1530 1583 """add parts containing listkeys namespaces to the requested bundle"""
1531 1584 listkeys = kwargs.get('listkeys', ())
1532 1585 for namespace in listkeys:
1533 1586 part = bundler.newpart('listkeys')
1534 1587 part.addparam('namespace', namespace)
1535 1588 keys = repo.listkeys(namespace).items()
1536 1589 part.data = pushkey.encodekeys(keys)
1537 1590
1538 1591 @getbundle2partsgenerator('obsmarkers')
1539 1592 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1540 1593 b2caps=None, heads=None, **kwargs):
1541 1594 """add an obsolescence markers part to the requested bundle"""
1542 1595 if kwargs.get('obsmarkers', False):
1543 1596 if heads is None:
1544 1597 heads = repo.heads()
1545 1598 subset = [c.node() for c in repo.set('::%ln', heads)]
1546 1599 markers = repo.obsstore.relevantmarkers(subset)
1547 1600 markers = sorted(markers)
1548 1601 buildobsmarkerspart(bundler, markers)
1549 1602
1550 1603 @getbundle2partsgenerator('hgtagsfnodes')
1551 1604 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1552 1605 b2caps=None, heads=None, common=None,
1553 1606 **kwargs):
1554 1607 """Transfer the .hgtags filenodes mapping.
1555 1608
1556 1609 Only values for heads in this bundle will be transferred.
1557 1610
1558 1611 The part data consists of pairs of 20 byte changeset node and .hgtags
1559 1612 filenodes raw values.
1560 1613 """
1561 1614 # Don't send unless:
1562 1615 # - changeset are being exchanged,
1563 1616 # - the client supports it.
1564 1617 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1565 1618 return
1566 1619
1567 1620 outgoing = changegroup.computeoutgoing(repo, heads, common)
1568 1621
1569 1622 if not outgoing.missingheads:
1570 1623 return
1571 1624
1572 1625 cache = tags.hgtagsfnodescache(repo.unfiltered())
1573 1626 chunks = []
1574 1627
1575 1628 # .hgtags fnodes are only relevant for head changesets. While we could
1576 1629 # transfer values for all known nodes, there will likely be little to
1577 1630 # no benefit.
1578 1631 #
1579 1632 # We don't bother using a generator to produce output data because
1580 1633 # a) we only have 40 bytes per head and even esoteric numbers of heads
1581 1634 # consume little memory (1M heads is 40MB) b) we don't want to send the
1582 1635 # part if we don't have entries and knowing if we have entries requires
1583 1636 # cache lookups.
1584 1637 for node in outgoing.missingheads:
1585 1638 # Don't compute missing, as this may slow down serving.
1586 1639 fnode = cache.getfnode(node, computemissing=False)
1587 1640 if fnode is not None:
1588 1641 chunks.extend([node, fnode])
1589 1642
1590 1643 if chunks:
1591 1644 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1592 1645
1593 1646 def check_heads(repo, their_heads, context):
1594 1647 """check if the heads of a repo have been modified
1595 1648
1596 1649 Used by peer for unbundling.
1597 1650 """
1598 1651 heads = repo.heads()
1599 1652 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1600 1653 if not (their_heads == ['force'] or their_heads == heads or
1601 1654 their_heads == ['hashed', heads_hash]):
1602 1655 # someone else committed/pushed/unbundled while we
1603 1656 # were transferring data
1604 1657 raise error.PushRaced('repository changed while %s - '
1605 1658 'please try again' % context)
1606 1659
1607 1660 def unbundle(repo, cg, heads, source, url):
1608 1661 """Apply a bundle to a repo.
1609 1662
1610 1663 this function makes sure the repo is locked during the application and have
1611 1664 mechanism to check that no push race occurred between the creation of the
1612 1665 bundle and its application.
1613 1666
1614 1667 If the push was raced as PushRaced exception is raised."""
1615 1668 r = 0
1616 1669 # need a transaction when processing a bundle2 stream
1617 1670 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1618 1671 lockandtr = [None, None, None]
1619 1672 recordout = None
1620 1673 # quick fix for output mismatch with bundle2 in 3.4
1621 1674 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1622 1675 False)
1623 1676 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1624 1677 captureoutput = True
1625 1678 try:
1626 1679 check_heads(repo, heads, 'uploading changes')
1627 1680 # push can proceed
1628 1681 if util.safehasattr(cg, 'params'):
1629 1682 r = None
1630 1683 try:
1631 1684 def gettransaction():
1632 1685 if not lockandtr[2]:
1633 1686 lockandtr[0] = repo.wlock()
1634 1687 lockandtr[1] = repo.lock()
1635 1688 lockandtr[2] = repo.transaction(source)
1636 1689 lockandtr[2].hookargs['source'] = source
1637 1690 lockandtr[2].hookargs['url'] = url
1638 1691 lockandtr[2].hookargs['bundle2'] = '1'
1639 1692 return lockandtr[2]
1640 1693
1641 1694 # Do greedy locking by default until we're satisfied with lazy
1642 1695 # locking.
1643 1696 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1644 1697 gettransaction()
1645 1698
1646 1699 op = bundle2.bundleoperation(repo, gettransaction,
1647 1700 captureoutput=captureoutput)
1648 1701 try:
1649 1702 op = bundle2.processbundle(repo, cg, op=op)
1650 1703 finally:
1651 1704 r = op.reply
1652 1705 if captureoutput and r is not None:
1653 1706 repo.ui.pushbuffer(error=True, subproc=True)
1654 1707 def recordout(output):
1655 1708 r.newpart('output', data=output, mandatory=False)
1656 1709 if lockandtr[2] is not None:
1657 1710 lockandtr[2].close()
1658 1711 except BaseException as exc:
1659 1712 exc.duringunbundle2 = True
1660 1713 if captureoutput and r is not None:
1661 1714 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1662 1715 def recordout(output):
1663 1716 part = bundle2.bundlepart('output', data=output,
1664 1717 mandatory=False)
1665 1718 parts.append(part)
1666 1719 raise
1667 1720 else:
1668 1721 lockandtr[1] = repo.lock()
1669 1722 r = cg.apply(repo, source, url)
1670 1723 finally:
1671 1724 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1672 1725 if recordout is not None:
1673 1726 recordout(repo.ui.popbuffer())
1674 1727 return r
1675 1728
1676 1729 def _maybeapplyclonebundle(pullop):
1677 1730 """Apply a clone bundle from a remote, if possible."""
1678 1731
1679 1732 repo = pullop.repo
1680 1733 remote = pullop.remote
1681 1734
1682 1735 if not repo.ui.configbool('ui', 'clonebundles', True):
1683 1736 return
1684 1737
1685 1738 # Only run if local repo is empty.
1686 1739 if len(repo):
1687 1740 return
1688 1741
1689 1742 if pullop.heads:
1690 1743 return
1691 1744
1692 1745 if not remote.capable('clonebundles'):
1693 1746 return
1694 1747
1695 1748 res = remote._call('clonebundles')
1696 1749
1697 1750 # If we call the wire protocol command, that's good enough to record the
1698 1751 # attempt.
1699 1752 pullop.clonebundleattempted = True
1700 1753
1701 1754 entries = parseclonebundlesmanifest(repo, res)
1702 1755 if not entries:
1703 1756 repo.ui.note(_('no clone bundles available on remote; '
1704 1757 'falling back to regular clone\n'))
1705 1758 return
1706 1759
1707 1760 entries = filterclonebundleentries(repo, entries)
1708 1761 if not entries:
1709 1762 # There is a thundering herd concern here. However, if a server
1710 1763 # operator doesn't advertise bundles appropriate for its clients,
1711 1764 # they deserve what's coming. Furthermore, from a client's
1712 1765 # perspective, no automatic fallback would mean not being able to
1713 1766 # clone!
1714 1767 repo.ui.warn(_('no compatible clone bundles available on server; '
1715 1768 'falling back to regular clone\n'))
1716 1769 repo.ui.warn(_('(you may want to report this to the server '
1717 1770 'operator)\n'))
1718 1771 return
1719 1772
1720 1773 entries = sortclonebundleentries(repo.ui, entries)
1721 1774
1722 1775 url = entries[0]['URL']
1723 1776 repo.ui.status(_('applying clone bundle from %s\n') % url)
1724 1777 if trypullbundlefromurl(repo.ui, repo, url):
1725 1778 repo.ui.status(_('finished applying clone bundle\n'))
1726 1779 # Bundle failed.
1727 1780 #
1728 1781 # We abort by default to avoid the thundering herd of
1729 1782 # clients flooding a server that was expecting expensive
1730 1783 # clone load to be offloaded.
1731 1784 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1732 1785 repo.ui.warn(_('falling back to normal clone\n'))
1733 1786 else:
1734 1787 raise error.Abort(_('error applying bundle'),
1735 1788 hint=_('if this error persists, consider contacting '
1736 1789 'the server operator or disable clone '
1737 1790 'bundles via '
1738 1791 '"--config ui.clonebundles=false"'))
1739 1792
1740 1793 def parseclonebundlesmanifest(repo, s):
1741 1794 """Parses the raw text of a clone bundles manifest.
1742 1795
1743 1796 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1744 1797 to the URL and other keys are the attributes for the entry.
1745 1798 """
1746 1799 m = []
1747 1800 for line in s.splitlines():
1748 1801 fields = line.split()
1749 1802 if not fields:
1750 1803 continue
1751 1804 attrs = {'URL': fields[0]}
1752 1805 for rawattr in fields[1:]:
1753 1806 key, value = rawattr.split('=', 1)
1754 1807 key = urllib.unquote(key)
1755 1808 value = urllib.unquote(value)
1756 1809 attrs[key] = value
1757 1810
1758 1811 # Parse BUNDLESPEC into components. This makes client-side
1759 1812 # preferences easier to specify since you can prefer a single
1760 1813 # component of the BUNDLESPEC.
1761 1814 if key == 'BUNDLESPEC':
1762 1815 try:
1763 1816 comp, version, params = parsebundlespec(repo, value,
1764 1817 externalnames=True)
1765 1818 attrs['COMPRESSION'] = comp
1766 1819 attrs['VERSION'] = version
1767 1820 except error.InvalidBundleSpecification:
1768 1821 pass
1769 1822 except error.UnsupportedBundleSpecification:
1770 1823 pass
1771 1824
1772 1825 m.append(attrs)
1773 1826
1774 1827 return m
1775 1828
1776 1829 def filterclonebundleentries(repo, entries):
1777 1830 """Remove incompatible clone bundle manifest entries.
1778 1831
1779 1832 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1780 1833 and returns a new list consisting of only the entries that this client
1781 1834 should be able to apply.
1782 1835
1783 1836 There is no guarantee we'll be able to apply all returned entries because
1784 1837 the metadata we use to filter on may be missing or wrong.
1785 1838 """
1786 1839 newentries = []
1787 1840 for entry in entries:
1788 1841 spec = entry.get('BUNDLESPEC')
1789 1842 if spec:
1790 1843 try:
1791 1844 parsebundlespec(repo, spec, strict=True)
1792 1845 except error.InvalidBundleSpecification as e:
1793 1846 repo.ui.debug(str(e) + '\n')
1794 1847 continue
1795 1848 except error.UnsupportedBundleSpecification as e:
1796 1849 repo.ui.debug('filtering %s because unsupported bundle '
1797 1850 'spec: %s\n' % (entry['URL'], str(e)))
1798 1851 continue
1799 1852
1800 1853 if 'REQUIRESNI' in entry and not sslutil.hassni:
1801 1854 repo.ui.debug('filtering %s because SNI not supported\n' %
1802 1855 entry['URL'])
1803 1856 continue
1804 1857
1805 1858 newentries.append(entry)
1806 1859
1807 1860 return newentries
1808 1861
1809 1862 def sortclonebundleentries(ui, entries):
1810 1863 prefers = ui.configlist('ui', 'clonebundleprefers', default=[])
1811 1864 if not prefers:
1812 1865 return list(entries)
1813 1866
1814 1867 prefers = [p.split('=', 1) for p in prefers]
1815 1868
1816 1869 # Our sort function.
1817 1870 def compareentry(a, b):
1818 1871 for prefkey, prefvalue in prefers:
1819 1872 avalue = a.get(prefkey)
1820 1873 bvalue = b.get(prefkey)
1821 1874
1822 1875 # Special case for b missing attribute and a matches exactly.
1823 1876 if avalue is not None and bvalue is None and avalue == prefvalue:
1824 1877 return -1
1825 1878
1826 1879 # Special case for a missing attribute and b matches exactly.
1827 1880 if bvalue is not None and avalue is None and bvalue == prefvalue:
1828 1881 return 1
1829 1882
1830 1883 # We can't compare unless attribute present on both.
1831 1884 if avalue is None or bvalue is None:
1832 1885 continue
1833 1886
1834 1887 # Same values should fall back to next attribute.
1835 1888 if avalue == bvalue:
1836 1889 continue
1837 1890
1838 1891 # Exact matches come first.
1839 1892 if avalue == prefvalue:
1840 1893 return -1
1841 1894 if bvalue == prefvalue:
1842 1895 return 1
1843 1896
1844 1897 # Fall back to next attribute.
1845 1898 continue
1846 1899
1847 1900 # If we got here we couldn't sort by attributes and prefers. Fall
1848 1901 # back to index order.
1849 1902 return 0
1850 1903
1851 1904 return sorted(entries, cmp=compareentry)
1852 1905
1853 1906 def trypullbundlefromurl(ui, repo, url):
1854 1907 """Attempt to apply a bundle from a URL."""
1855 1908 lock = repo.lock()
1856 1909 try:
1857 1910 tr = repo.transaction('bundleurl')
1858 1911 try:
1859 1912 try:
1860 1913 fh = urlmod.open(ui, url)
1861 1914 cg = readbundle(ui, fh, 'stream')
1862 1915
1863 1916 if isinstance(cg, bundle2.unbundle20):
1864 1917 bundle2.processbundle(repo, cg, lambda: tr)
1865 1918 elif isinstance(cg, streamclone.streamcloneapplier):
1866 1919 cg.apply(repo)
1867 1920 else:
1868 1921 cg.apply(repo, 'clonebundles', url)
1869 1922 tr.close()
1870 1923 return True
1871 1924 except urllib2.HTTPError as e:
1872 1925 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1873 1926 except urllib2.URLError as e:
1874 1927 ui.warn(_('error fetching bundle: %s\n') % e.reason[1])
1875 1928
1876 1929 return False
1877 1930 finally:
1878 1931 tr.release()
1879 1932 finally:
1880 1933 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now