##// END OF EJS Templates
exchange: add `_getbookmarks()` function...
Stanislau Hlebik -
r30483:8491845a default
parent child Browse files
Show More
@@ -1,1951 +1,1962 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 hex,
16 16 nullid,
17 17 )
18 18 from . import (
19 19 base85,
20 20 bookmarks as bookmod,
21 21 bundle2,
22 22 changegroup,
23 23 discovery,
24 24 error,
25 25 lock as lockmod,
26 26 obsolete,
27 27 phases,
28 28 pushkey,
29 29 scmutil,
30 30 sslutil,
31 31 streamclone,
32 32 tags,
33 33 url as urlmod,
34 34 util,
35 35 )
36 36
37 37 urlerr = util.urlerr
38 38 urlreq = util.urlreq
39 39
40 40 # Maps bundle version human names to changegroup versions.
41 41 _bundlespeccgversions = {'v1': '01',
42 42 'v2': '02',
43 43 'packed1': 's1',
44 44 'bundle2': '02', #legacy
45 45 }
46 46
47 47 def parsebundlespec(repo, spec, strict=True, externalnames=False):
48 48 """Parse a bundle string specification into parts.
49 49
50 50 Bundle specifications denote a well-defined bundle/exchange format.
51 51 The content of a given specification should not change over time in
52 52 order to ensure that bundles produced by a newer version of Mercurial are
53 53 readable from an older version.
54 54
55 55 The string currently has the form:
56 56
57 57 <compression>-<type>[;<parameter0>[;<parameter1>]]
58 58
59 59 Where <compression> is one of the supported compression formats
60 60 and <type> is (currently) a version string. A ";" can follow the type and
61 61 all text afterwards is interpreted as URI encoded, ";" delimited key=value
62 62 pairs.
63 63
64 64 If ``strict`` is True (the default) <compression> is required. Otherwise,
65 65 it is optional.
66 66
67 67 If ``externalnames`` is False (the default), the human-centric names will
68 68 be converted to their internal representation.
69 69
70 70 Returns a 3-tuple of (compression, version, parameters). Compression will
71 71 be ``None`` if not in strict mode and a compression isn't defined.
72 72
73 73 An ``InvalidBundleSpecification`` is raised when the specification is
74 74 not syntactically well formed.
75 75
76 76 An ``UnsupportedBundleSpecification`` is raised when the compression or
77 77 bundle type/version is not recognized.
78 78
79 79 Note: this function will likely eventually return a more complex data
80 80 structure, including bundle2 part information.
81 81 """
82 82 def parseparams(s):
83 83 if ';' not in s:
84 84 return s, {}
85 85
86 86 params = {}
87 87 version, paramstr = s.split(';', 1)
88 88
89 89 for p in paramstr.split(';'):
90 90 if '=' not in p:
91 91 raise error.InvalidBundleSpecification(
92 92 _('invalid bundle specification: '
93 93 'missing "=" in parameter: %s') % p)
94 94
95 95 key, value = p.split('=', 1)
96 96 key = urlreq.unquote(key)
97 97 value = urlreq.unquote(value)
98 98 params[key] = value
99 99
100 100 return version, params
101 101
102 102
103 103 if strict and '-' not in spec:
104 104 raise error.InvalidBundleSpecification(
105 105 _('invalid bundle specification; '
106 106 'must be prefixed with compression: %s') % spec)
107 107
108 108 if '-' in spec:
109 109 compression, version = spec.split('-', 1)
110 110
111 111 if compression not in util.compengines.supportedbundlenames:
112 112 raise error.UnsupportedBundleSpecification(
113 113 _('%s compression is not supported') % compression)
114 114
115 115 version, params = parseparams(version)
116 116
117 117 if version not in _bundlespeccgversions:
118 118 raise error.UnsupportedBundleSpecification(
119 119 _('%s is not a recognized bundle version') % version)
120 120 else:
121 121 # Value could be just the compression or just the version, in which
122 122 # case some defaults are assumed (but only when not in strict mode).
123 123 assert not strict
124 124
125 125 spec, params = parseparams(spec)
126 126
127 127 if spec in util.compengines.supportedbundlenames:
128 128 compression = spec
129 129 version = 'v1'
130 130 if 'generaldelta' in repo.requirements:
131 131 version = 'v2'
132 132 elif spec in _bundlespeccgversions:
133 133 if spec == 'packed1':
134 134 compression = 'none'
135 135 else:
136 136 compression = 'bzip2'
137 137 version = spec
138 138 else:
139 139 raise error.UnsupportedBundleSpecification(
140 140 _('%s is not a recognized bundle specification') % spec)
141 141
142 142 # The specification for packed1 can optionally declare the data formats
143 143 # required to apply it. If we see this metadata, compare against what the
144 144 # repo supports and error if the bundle isn't compatible.
145 145 if version == 'packed1' and 'requirements' in params:
146 146 requirements = set(params['requirements'].split(','))
147 147 missingreqs = requirements - repo.supportedformats
148 148 if missingreqs:
149 149 raise error.UnsupportedBundleSpecification(
150 150 _('missing support for repository features: %s') %
151 151 ', '.join(sorted(missingreqs)))
152 152
153 153 if not externalnames:
154 154 engine = util.compengines.forbundlename(compression)
155 155 compression = engine.bundletype()[1]
156 156 version = _bundlespeccgversions[version]
157 157 return compression, version, params
158 158
159 159 def readbundle(ui, fh, fname, vfs=None):
160 160 header = changegroup.readexactly(fh, 4)
161 161
162 162 alg = None
163 163 if not fname:
164 164 fname = "stream"
165 165 if not header.startswith('HG') and header.startswith('\0'):
166 166 fh = changegroup.headerlessfixup(fh, header)
167 167 header = "HG10"
168 168 alg = 'UN'
169 169 elif vfs:
170 170 fname = vfs.join(fname)
171 171
172 172 magic, version = header[0:2], header[2:4]
173 173
174 174 if magic != 'HG':
175 175 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
176 176 if version == '10':
177 177 if alg is None:
178 178 alg = changegroup.readexactly(fh, 2)
179 179 return changegroup.cg1unpacker(fh, alg)
180 180 elif version.startswith('2'):
181 181 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
182 182 elif version == 'S1':
183 183 return streamclone.streamcloneapplier(fh)
184 184 else:
185 185 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
186 186
187 187 def getbundlespec(ui, fh):
188 188 """Infer the bundlespec from a bundle file handle.
189 189
190 190 The input file handle is seeked and the original seek position is not
191 191 restored.
192 192 """
193 193 def speccompression(alg):
194 194 try:
195 195 return util.compengines.forbundletype(alg).bundletype()[0]
196 196 except KeyError:
197 197 return None
198 198
199 199 b = readbundle(ui, fh, None)
200 200 if isinstance(b, changegroup.cg1unpacker):
201 201 alg = b._type
202 202 if alg == '_truncatedBZ':
203 203 alg = 'BZ'
204 204 comp = speccompression(alg)
205 205 if not comp:
206 206 raise error.Abort(_('unknown compression algorithm: %s') % alg)
207 207 return '%s-v1' % comp
208 208 elif isinstance(b, bundle2.unbundle20):
209 209 if 'Compression' in b.params:
210 210 comp = speccompression(b.params['Compression'])
211 211 if not comp:
212 212 raise error.Abort(_('unknown compression algorithm: %s') % comp)
213 213 else:
214 214 comp = 'none'
215 215
216 216 version = None
217 217 for part in b.iterparts():
218 218 if part.type == 'changegroup':
219 219 version = part.params['version']
220 220 if version in ('01', '02'):
221 221 version = 'v2'
222 222 else:
223 223 raise error.Abort(_('changegroup version %s does not have '
224 224 'a known bundlespec') % version,
225 225 hint=_('try upgrading your Mercurial '
226 226 'client'))
227 227
228 228 if not version:
229 229 raise error.Abort(_('could not identify changegroup version in '
230 230 'bundle'))
231 231
232 232 return '%s-%s' % (comp, version)
233 233 elif isinstance(b, streamclone.streamcloneapplier):
234 234 requirements = streamclone.readbundle1header(fh)[2]
235 235 params = 'requirements=%s' % ','.join(sorted(requirements))
236 236 return 'none-packed1;%s' % urlreq.quote(params)
237 237 else:
238 238 raise error.Abort(_('unknown bundle type: %s') % b)
239 239
240 240 def buildobsmarkerspart(bundler, markers):
241 241 """add an obsmarker part to the bundler with <markers>
242 242
243 243 No part is created if markers is empty.
244 244 Raises ValueError if the bundler doesn't support any known obsmarker format.
245 245 """
246 246 if markers:
247 247 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
248 248 version = obsolete.commonversion(remoteversions)
249 249 if version is None:
250 250 raise ValueError('bundler does not support common obsmarker format')
251 251 stream = obsolete.encodemarkers(markers, True, version=version)
252 252 return bundler.newpart('obsmarkers', data=stream)
253 253 return None
254 254
255 255 def _computeoutgoing(repo, heads, common):
256 256 """Computes which revs are outgoing given a set of common
257 257 and a set of heads.
258 258
259 259 This is a separate function so extensions can have access to
260 260 the logic.
261 261
262 262 Returns a discovery.outgoing object.
263 263 """
264 264 cl = repo.changelog
265 265 if common:
266 266 hasnode = cl.hasnode
267 267 common = [n for n in common if hasnode(n)]
268 268 else:
269 269 common = [nullid]
270 270 if not heads:
271 271 heads = cl.heads()
272 272 return discovery.outgoing(repo, common, heads)
273 273
274 274 def _forcebundle1(op):
275 275 """return true if a pull/push must use bundle1
276 276
277 277 This function is used to allow testing of the older bundle version"""
278 278 ui = op.repo.ui
279 279 forcebundle1 = False
280 280 # The goal is this config is to allow developer to choose the bundle
281 281 # version used during exchanged. This is especially handy during test.
282 282 # Value is a list of bundle version to be picked from, highest version
283 283 # should be used.
284 284 #
285 285 # developer config: devel.legacy.exchange
286 286 exchange = ui.configlist('devel', 'legacy.exchange')
287 287 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
288 288 return forcebundle1 or not op.remote.capable('bundle2')
289 289
290 290 class pushoperation(object):
291 291 """A object that represent a single push operation
292 292
293 293 Its purpose is to carry push related state and very common operations.
294 294
295 295 A new pushoperation should be created at the beginning of each push and
296 296 discarded afterward.
297 297 """
298 298
299 299 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
300 300 bookmarks=()):
301 301 # repo we push from
302 302 self.repo = repo
303 303 self.ui = repo.ui
304 304 # repo we push to
305 305 self.remote = remote
306 306 # force option provided
307 307 self.force = force
308 308 # revs to be pushed (None is "all")
309 309 self.revs = revs
310 310 # bookmark explicitly pushed
311 311 self.bookmarks = bookmarks
312 312 # allow push of new branch
313 313 self.newbranch = newbranch
314 314 # did a local lock get acquired?
315 315 self.locallocked = None
316 316 # step already performed
317 317 # (used to check what steps have been already performed through bundle2)
318 318 self.stepsdone = set()
319 319 # Integer version of the changegroup push result
320 320 # - None means nothing to push
321 321 # - 0 means HTTP error
322 322 # - 1 means we pushed and remote head count is unchanged *or*
323 323 # we have outgoing changesets but refused to push
324 324 # - other values as described by addchangegroup()
325 325 self.cgresult = None
326 326 # Boolean value for the bookmark push
327 327 self.bkresult = None
328 328 # discover.outgoing object (contains common and outgoing data)
329 329 self.outgoing = None
330 330 # all remote heads before the push
331 331 self.remoteheads = None
332 332 # testable as a boolean indicating if any nodes are missing locally.
333 333 self.incoming = None
334 334 # phases changes that must be pushed along side the changesets
335 335 self.outdatedphases = None
336 336 # phases changes that must be pushed if changeset push fails
337 337 self.fallbackoutdatedphases = None
338 338 # outgoing obsmarkers
339 339 self.outobsmarkers = set()
340 340 # outgoing bookmarks
341 341 self.outbookmarks = []
342 342 # transaction manager
343 343 self.trmanager = None
344 344 # map { pushkey partid -> callback handling failure}
345 345 # used to handle exception from mandatory pushkey part failure
346 346 self.pkfailcb = {}
347 347
348 348 @util.propertycache
349 349 def futureheads(self):
350 350 """future remote heads if the changeset push succeeds"""
351 351 return self.outgoing.missingheads
352 352
353 353 @util.propertycache
354 354 def fallbackheads(self):
355 355 """future remote heads if the changeset push fails"""
356 356 if self.revs is None:
357 357 # not target to push, all common are relevant
358 358 return self.outgoing.commonheads
359 359 unfi = self.repo.unfiltered()
360 360 # I want cheads = heads(::missingheads and ::commonheads)
361 361 # (missingheads is revs with secret changeset filtered out)
362 362 #
363 363 # This can be expressed as:
364 364 # cheads = ( (missingheads and ::commonheads)
365 365 # + (commonheads and ::missingheads))"
366 366 # )
367 367 #
368 368 # while trying to push we already computed the following:
369 369 # common = (::commonheads)
370 370 # missing = ((commonheads::missingheads) - commonheads)
371 371 #
372 372 # We can pick:
373 373 # * missingheads part of common (::commonheads)
374 374 common = self.outgoing.common
375 375 nm = self.repo.changelog.nodemap
376 376 cheads = [node for node in self.revs if nm[node] in common]
377 377 # and
378 378 # * commonheads parents on missing
379 379 revset = unfi.set('%ln and parents(roots(%ln))',
380 380 self.outgoing.commonheads,
381 381 self.outgoing.missing)
382 382 cheads.extend(c.node() for c in revset)
383 383 return cheads
384 384
385 385 @property
386 386 def commonheads(self):
387 387 """set of all common heads after changeset bundle push"""
388 388 if self.cgresult:
389 389 return self.futureheads
390 390 else:
391 391 return self.fallbackheads
392 392
393 393 # mapping of message used when pushing bookmark
394 394 bookmsgmap = {'update': (_("updating bookmark %s\n"),
395 395 _('updating bookmark %s failed!\n')),
396 396 'export': (_("exporting bookmark %s\n"),
397 397 _('exporting bookmark %s failed!\n')),
398 398 'delete': (_("deleting remote bookmark %s\n"),
399 399 _('deleting remote bookmark %s failed!\n')),
400 400 }
401 401
402 402
403 403 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
404 404 opargs=None):
405 405 '''Push outgoing changesets (limited by revs) from a local
406 406 repository to remote. Return an integer:
407 407 - None means nothing to push
408 408 - 0 means HTTP error
409 409 - 1 means we pushed and remote head count is unchanged *or*
410 410 we have outgoing changesets but refused to push
411 411 - other values as described by addchangegroup()
412 412 '''
413 413 if opargs is None:
414 414 opargs = {}
415 415 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
416 416 **opargs)
417 417 if pushop.remote.local():
418 418 missing = (set(pushop.repo.requirements)
419 419 - pushop.remote.local().supported)
420 420 if missing:
421 421 msg = _("required features are not"
422 422 " supported in the destination:"
423 423 " %s") % (', '.join(sorted(missing)))
424 424 raise error.Abort(msg)
425 425
426 426 # there are two ways to push to remote repo:
427 427 #
428 428 # addchangegroup assumes local user can lock remote
429 429 # repo (local filesystem, old ssh servers).
430 430 #
431 431 # unbundle assumes local user cannot lock remote repo (new ssh
432 432 # servers, http servers).
433 433
434 434 if not pushop.remote.canpush():
435 435 raise error.Abort(_("destination does not support push"))
436 436 # get local lock as we might write phase data
437 437 localwlock = locallock = None
438 438 try:
439 439 # bundle2 push may receive a reply bundle touching bookmarks or other
440 440 # things requiring the wlock. Take it now to ensure proper ordering.
441 441 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
442 442 if (not _forcebundle1(pushop)) and maypushback:
443 443 localwlock = pushop.repo.wlock()
444 444 locallock = pushop.repo.lock()
445 445 pushop.locallocked = True
446 446 except IOError as err:
447 447 pushop.locallocked = False
448 448 if err.errno != errno.EACCES:
449 449 raise
450 450 # source repo cannot be locked.
451 451 # We do not abort the push, but just disable the local phase
452 452 # synchronisation.
453 453 msg = 'cannot lock source repository: %s\n' % err
454 454 pushop.ui.debug(msg)
455 455 try:
456 456 if pushop.locallocked:
457 457 pushop.trmanager = transactionmanager(pushop.repo,
458 458 'push-response',
459 459 pushop.remote.url())
460 460 pushop.repo.checkpush(pushop)
461 461 lock = None
462 462 unbundle = pushop.remote.capable('unbundle')
463 463 if not unbundle:
464 464 lock = pushop.remote.lock()
465 465 try:
466 466 _pushdiscovery(pushop)
467 467 if not _forcebundle1(pushop):
468 468 _pushbundle2(pushop)
469 469 _pushchangeset(pushop)
470 470 _pushsyncphase(pushop)
471 471 _pushobsolete(pushop)
472 472 _pushbookmark(pushop)
473 473 finally:
474 474 if lock is not None:
475 475 lock.release()
476 476 if pushop.trmanager:
477 477 pushop.trmanager.close()
478 478 finally:
479 479 if pushop.trmanager:
480 480 pushop.trmanager.release()
481 481 if locallock is not None:
482 482 locallock.release()
483 483 if localwlock is not None:
484 484 localwlock.release()
485 485
486 486 return pushop
487 487
488 488 # list of steps to perform discovery before push
489 489 pushdiscoveryorder = []
490 490
491 491 # Mapping between step name and function
492 492 #
493 493 # This exists to help extensions wrap steps if necessary
494 494 pushdiscoverymapping = {}
495 495
496 496 def pushdiscovery(stepname):
497 497 """decorator for function performing discovery before push
498 498
499 499 The function is added to the step -> function mapping and appended to the
500 500 list of steps. Beware that decorated function will be added in order (this
501 501 may matter).
502 502
503 503 You can only use this decorator for a new step, if you want to wrap a step
504 504 from an extension, change the pushdiscovery dictionary directly."""
505 505 def dec(func):
506 506 assert stepname not in pushdiscoverymapping
507 507 pushdiscoverymapping[stepname] = func
508 508 pushdiscoveryorder.append(stepname)
509 509 return func
510 510 return dec
511 511
512 512 def _pushdiscovery(pushop):
513 513 """Run all discovery steps"""
514 514 for stepname in pushdiscoveryorder:
515 515 step = pushdiscoverymapping[stepname]
516 516 step(pushop)
517 517
518 518 @pushdiscovery('changeset')
519 519 def _pushdiscoverychangeset(pushop):
520 520 """discover the changeset that need to be pushed"""
521 521 fci = discovery.findcommonincoming
522 522 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
523 523 common, inc, remoteheads = commoninc
524 524 fco = discovery.findcommonoutgoing
525 525 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
526 526 commoninc=commoninc, force=pushop.force)
527 527 pushop.outgoing = outgoing
528 528 pushop.remoteheads = remoteheads
529 529 pushop.incoming = inc
530 530
531 531 @pushdiscovery('phase')
532 532 def _pushdiscoveryphase(pushop):
533 533 """discover the phase that needs to be pushed
534 534
535 535 (computed for both success and failure case for changesets push)"""
536 536 outgoing = pushop.outgoing
537 537 unfi = pushop.repo.unfiltered()
538 538 remotephases = pushop.remote.listkeys('phases')
539 539 publishing = remotephases.get('publishing', False)
540 540 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
541 541 and remotephases # server supports phases
542 542 and not pushop.outgoing.missing # no changesets to be pushed
543 543 and publishing):
544 544 # When:
545 545 # - this is a subrepo push
546 546 # - and remote support phase
547 547 # - and no changeset are to be pushed
548 548 # - and remote is publishing
549 549 # We may be in issue 3871 case!
550 550 # We drop the possible phase synchronisation done by
551 551 # courtesy to publish changesets possibly locally draft
552 552 # on the remote.
553 553 remotephases = {'publishing': 'True'}
554 554 ana = phases.analyzeremotephases(pushop.repo,
555 555 pushop.fallbackheads,
556 556 remotephases)
557 557 pheads, droots = ana
558 558 extracond = ''
559 559 if not publishing:
560 560 extracond = ' and public()'
561 561 revset = 'heads((%%ln::%%ln) %s)' % extracond
562 562 # Get the list of all revs draft on remote by public here.
563 563 # XXX Beware that revset break if droots is not strictly
564 564 # XXX root we may want to ensure it is but it is costly
565 565 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
566 566 if not outgoing.missing:
567 567 future = fallback
568 568 else:
569 569 # adds changeset we are going to push as draft
570 570 #
571 571 # should not be necessary for publishing server, but because of an
572 572 # issue fixed in xxxxx we have to do it anyway.
573 573 fdroots = list(unfi.set('roots(%ln + %ln::)',
574 574 outgoing.missing, droots))
575 575 fdroots = [f.node() for f in fdroots]
576 576 future = list(unfi.set(revset, fdroots, pushop.futureheads))
577 577 pushop.outdatedphases = future
578 578 pushop.fallbackoutdatedphases = fallback
579 579
580 580 @pushdiscovery('obsmarker')
581 581 def _pushdiscoveryobsmarkers(pushop):
582 582 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
583 583 and pushop.repo.obsstore
584 584 and 'obsolete' in pushop.remote.listkeys('namespaces')):
585 585 repo = pushop.repo
586 586 # very naive computation, that can be quite expensive on big repo.
587 587 # However: evolution is currently slow on them anyway.
588 588 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
589 589 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
590 590
591 591 @pushdiscovery('bookmarks')
592 592 def _pushdiscoverybookmarks(pushop):
593 593 ui = pushop.ui
594 594 repo = pushop.repo.unfiltered()
595 595 remote = pushop.remote
596 596 ui.debug("checking for updated bookmarks\n")
597 597 ancestors = ()
598 598 if pushop.revs:
599 599 revnums = map(repo.changelog.rev, pushop.revs)
600 600 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
601 601 remotebookmark = remote.listkeys('bookmarks')
602 602
603 603 explicit = set([repo._bookmarks.expandname(bookmark)
604 604 for bookmark in pushop.bookmarks])
605 605
606 606 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
607 607 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
608 608 for b, scid, dcid in advsrc:
609 609 if b in explicit:
610 610 explicit.remove(b)
611 611 if not ancestors or repo[scid].rev() in ancestors:
612 612 pushop.outbookmarks.append((b, dcid, scid))
613 613 # search added bookmark
614 614 for b, scid, dcid in addsrc:
615 615 if b in explicit:
616 616 explicit.remove(b)
617 617 pushop.outbookmarks.append((b, '', scid))
618 618 # search for overwritten bookmark
619 619 for b, scid, dcid in advdst + diverge + differ:
620 620 if b in explicit:
621 621 explicit.remove(b)
622 622 pushop.outbookmarks.append((b, dcid, scid))
623 623 # search for bookmark to delete
624 624 for b, scid, dcid in adddst:
625 625 if b in explicit:
626 626 explicit.remove(b)
627 627 # treat as "deleted locally"
628 628 pushop.outbookmarks.append((b, dcid, ''))
629 629 # identical bookmarks shouldn't get reported
630 630 for b, scid, dcid in same:
631 631 if b in explicit:
632 632 explicit.remove(b)
633 633
634 634 if explicit:
635 635 explicit = sorted(explicit)
636 636 # we should probably list all of them
637 637 ui.warn(_('bookmark %s does not exist on the local '
638 638 'or remote repository!\n') % explicit[0])
639 639 pushop.bkresult = 2
640 640
641 641 pushop.outbookmarks.sort()
642 642
643 643 def _pushcheckoutgoing(pushop):
644 644 outgoing = pushop.outgoing
645 645 unfi = pushop.repo.unfiltered()
646 646 if not outgoing.missing:
647 647 # nothing to push
648 648 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
649 649 return False
650 650 # something to push
651 651 if not pushop.force:
652 652 # if repo.obsstore == False --> no obsolete
653 653 # then, save the iteration
654 654 if unfi.obsstore:
655 655 # this message are here for 80 char limit reason
656 656 mso = _("push includes obsolete changeset: %s!")
657 657 mst = {"unstable": _("push includes unstable changeset: %s!"),
658 658 "bumped": _("push includes bumped changeset: %s!"),
659 659 "divergent": _("push includes divergent changeset: %s!")}
660 660 # If we are to push if there is at least one
661 661 # obsolete or unstable changeset in missing, at
662 662 # least one of the missinghead will be obsolete or
663 663 # unstable. So checking heads only is ok
664 664 for node in outgoing.missingheads:
665 665 ctx = unfi[node]
666 666 if ctx.obsolete():
667 667 raise error.Abort(mso % ctx)
668 668 elif ctx.troubled():
669 669 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
670 670
671 671 discovery.checkheads(pushop)
672 672 return True
673 673
674 674 # List of names of steps to perform for an outgoing bundle2, order matters.
675 675 b2partsgenorder = []
676 676
677 677 # Mapping between step name and function
678 678 #
679 679 # This exists to help extensions wrap steps if necessary
680 680 b2partsgenmapping = {}
681 681
682 682 def b2partsgenerator(stepname, idx=None):
683 683 """decorator for function generating bundle2 part
684 684
685 685 The function is added to the step -> function mapping and appended to the
686 686 list of steps. Beware that decorated functions will be added in order
687 687 (this may matter).
688 688
689 689 You can only use this decorator for new steps, if you want to wrap a step
690 690 from an extension, attack the b2partsgenmapping dictionary directly."""
691 691 def dec(func):
692 692 assert stepname not in b2partsgenmapping
693 693 b2partsgenmapping[stepname] = func
694 694 if idx is None:
695 695 b2partsgenorder.append(stepname)
696 696 else:
697 697 b2partsgenorder.insert(idx, stepname)
698 698 return func
699 699 return dec
700 700
701 701 def _pushb2ctxcheckheads(pushop, bundler):
702 702 """Generate race condition checking parts
703 703
704 704 Exists as an independent function to aid extensions
705 705 """
706 706 if not pushop.force:
707 707 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
708 708
709 709 @b2partsgenerator('changeset')
710 710 def _pushb2ctx(pushop, bundler):
711 711 """handle changegroup push through bundle2
712 712
713 713 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
714 714 """
715 715 if 'changesets' in pushop.stepsdone:
716 716 return
717 717 pushop.stepsdone.add('changesets')
718 718 # Send known heads to the server for race detection.
719 719 if not _pushcheckoutgoing(pushop):
720 720 return
721 721 pushop.repo.prepushoutgoinghooks(pushop)
722 722
723 723 _pushb2ctxcheckheads(pushop, bundler)
724 724
725 725 b2caps = bundle2.bundle2caps(pushop.remote)
726 726 version = '01'
727 727 cgversions = b2caps.get('changegroup')
728 728 if cgversions: # 3.1 and 3.2 ship with an empty value
729 729 cgversions = [v for v in cgversions
730 730 if v in changegroup.supportedoutgoingversions(
731 731 pushop.repo)]
732 732 if not cgversions:
733 733 raise ValueError(_('no common changegroup version'))
734 734 version = max(cgversions)
735 735 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
736 736 pushop.outgoing,
737 737 version=version)
738 738 cgpart = bundler.newpart('changegroup', data=cg)
739 739 if cgversions:
740 740 cgpart.addparam('version', version)
741 741 if 'treemanifest' in pushop.repo.requirements:
742 742 cgpart.addparam('treemanifest', '1')
743 743 def handlereply(op):
744 744 """extract addchangegroup returns from server reply"""
745 745 cgreplies = op.records.getreplies(cgpart.id)
746 746 assert len(cgreplies['changegroup']) == 1
747 747 pushop.cgresult = cgreplies['changegroup'][0]['return']
748 748 return handlereply
749 749
750 750 @b2partsgenerator('phase')
751 751 def _pushb2phases(pushop, bundler):
752 752 """handle phase push through bundle2"""
753 753 if 'phases' in pushop.stepsdone:
754 754 return
755 755 b2caps = bundle2.bundle2caps(pushop.remote)
756 756 if not 'pushkey' in b2caps:
757 757 return
758 758 pushop.stepsdone.add('phases')
759 759 part2node = []
760 760
761 761 def handlefailure(pushop, exc):
762 762 targetid = int(exc.partid)
763 763 for partid, node in part2node:
764 764 if partid == targetid:
765 765 raise error.Abort(_('updating %s to public failed') % node)
766 766
767 767 enc = pushkey.encode
768 768 for newremotehead in pushop.outdatedphases:
769 769 part = bundler.newpart('pushkey')
770 770 part.addparam('namespace', enc('phases'))
771 771 part.addparam('key', enc(newremotehead.hex()))
772 772 part.addparam('old', enc(str(phases.draft)))
773 773 part.addparam('new', enc(str(phases.public)))
774 774 part2node.append((part.id, newremotehead))
775 775 pushop.pkfailcb[part.id] = handlefailure
776 776
777 777 def handlereply(op):
778 778 for partid, node in part2node:
779 779 partrep = op.records.getreplies(partid)
780 780 results = partrep['pushkey']
781 781 assert len(results) <= 1
782 782 msg = None
783 783 if not results:
784 784 msg = _('server ignored update of %s to public!\n') % node
785 785 elif not int(results[0]['return']):
786 786 msg = _('updating %s to public failed!\n') % node
787 787 if msg is not None:
788 788 pushop.ui.warn(msg)
789 789 return handlereply
790 790
791 791 @b2partsgenerator('obsmarkers')
792 792 def _pushb2obsmarkers(pushop, bundler):
793 793 if 'obsmarkers' in pushop.stepsdone:
794 794 return
795 795 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
796 796 if obsolete.commonversion(remoteversions) is None:
797 797 return
798 798 pushop.stepsdone.add('obsmarkers')
799 799 if pushop.outobsmarkers:
800 800 markers = sorted(pushop.outobsmarkers)
801 801 buildobsmarkerspart(bundler, markers)
802 802
803 803 @b2partsgenerator('bookmarks')
804 804 def _pushb2bookmarks(pushop, bundler):
805 805 """handle bookmark push through bundle2"""
806 806 if 'bookmarks' in pushop.stepsdone:
807 807 return
808 808 b2caps = bundle2.bundle2caps(pushop.remote)
809 809 if 'pushkey' not in b2caps:
810 810 return
811 811 pushop.stepsdone.add('bookmarks')
812 812 part2book = []
813 813 enc = pushkey.encode
814 814
815 815 def handlefailure(pushop, exc):
816 816 targetid = int(exc.partid)
817 817 for partid, book, action in part2book:
818 818 if partid == targetid:
819 819 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
820 820 # we should not be called for part we did not generated
821 821 assert False
822 822
823 823 for book, old, new in pushop.outbookmarks:
824 824 part = bundler.newpart('pushkey')
825 825 part.addparam('namespace', enc('bookmarks'))
826 826 part.addparam('key', enc(book))
827 827 part.addparam('old', enc(old))
828 828 part.addparam('new', enc(new))
829 829 action = 'update'
830 830 if not old:
831 831 action = 'export'
832 832 elif not new:
833 833 action = 'delete'
834 834 part2book.append((part.id, book, action))
835 835 pushop.pkfailcb[part.id] = handlefailure
836 836
837 837 def handlereply(op):
838 838 ui = pushop.ui
839 839 for partid, book, action in part2book:
840 840 partrep = op.records.getreplies(partid)
841 841 results = partrep['pushkey']
842 842 assert len(results) <= 1
843 843 if not results:
844 844 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
845 845 else:
846 846 ret = int(results[0]['return'])
847 847 if ret:
848 848 ui.status(bookmsgmap[action][0] % book)
849 849 else:
850 850 ui.warn(bookmsgmap[action][1] % book)
851 851 if pushop.bkresult is not None:
852 852 pushop.bkresult = 1
853 853 return handlereply
854 854
855 855
856 856 def _pushbundle2(pushop):
857 857 """push data to the remote using bundle2
858 858
859 859 The only currently supported type of data is changegroup but this will
860 860 evolve in the future."""
861 861 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
862 862 pushback = (pushop.trmanager
863 863 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
864 864
865 865 # create reply capability
866 866 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
867 867 allowpushback=pushback))
868 868 bundler.newpart('replycaps', data=capsblob)
869 869 replyhandlers = []
870 870 for partgenname in b2partsgenorder:
871 871 partgen = b2partsgenmapping[partgenname]
872 872 ret = partgen(pushop, bundler)
873 873 if callable(ret):
874 874 replyhandlers.append(ret)
875 875 # do not push if nothing to push
876 876 if bundler.nbparts <= 1:
877 877 return
878 878 stream = util.chunkbuffer(bundler.getchunks())
879 879 try:
880 880 try:
881 881 reply = pushop.remote.unbundle(
882 882 stream, ['force'], pushop.remote.url())
883 883 except error.BundleValueError as exc:
884 884 raise error.Abort(_('missing support for %s') % exc)
885 885 try:
886 886 trgetter = None
887 887 if pushback:
888 888 trgetter = pushop.trmanager.transaction
889 889 op = bundle2.processbundle(pushop.repo, reply, trgetter)
890 890 except error.BundleValueError as exc:
891 891 raise error.Abort(_('missing support for %s') % exc)
892 892 except bundle2.AbortFromPart as exc:
893 893 pushop.ui.status(_('remote: %s\n') % exc)
894 894 raise error.Abort(_('push failed on remote'), hint=exc.hint)
895 895 except error.PushkeyFailed as exc:
896 896 partid = int(exc.partid)
897 897 if partid not in pushop.pkfailcb:
898 898 raise
899 899 pushop.pkfailcb[partid](pushop, exc)
900 900 for rephand in replyhandlers:
901 901 rephand(op)
902 902
903 903 def _pushchangeset(pushop):
904 904 """Make the actual push of changeset bundle to remote repo"""
905 905 if 'changesets' in pushop.stepsdone:
906 906 return
907 907 pushop.stepsdone.add('changesets')
908 908 if not _pushcheckoutgoing(pushop):
909 909 return
910 910 pushop.repo.prepushoutgoinghooks(pushop)
911 911 outgoing = pushop.outgoing
912 912 unbundle = pushop.remote.capable('unbundle')
913 913 # TODO: get bundlecaps from remote
914 914 bundlecaps = None
915 915 # create a changegroup from local
916 916 if pushop.revs is None and not (outgoing.excluded
917 917 or pushop.repo.changelog.filteredrevs):
918 918 # push everything,
919 919 # use the fast path, no race possible on push
920 920 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
921 921 cg = changegroup.getsubset(pushop.repo,
922 922 outgoing,
923 923 bundler,
924 924 'push',
925 925 fastpath=True)
926 926 else:
927 927 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
928 928 bundlecaps)
929 929
930 930 # apply changegroup to remote
931 931 if unbundle:
932 932 # local repo finds heads on server, finds out what
933 933 # revs it must push. once revs transferred, if server
934 934 # finds it has different heads (someone else won
935 935 # commit/push race), server aborts.
936 936 if pushop.force:
937 937 remoteheads = ['force']
938 938 else:
939 939 remoteheads = pushop.remoteheads
940 940 # ssh: return remote's addchangegroup()
941 941 # http: return remote's addchangegroup() or 0 for error
942 942 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
943 943 pushop.repo.url())
944 944 else:
945 945 # we return an integer indicating remote head count
946 946 # change
947 947 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
948 948 pushop.repo.url())
949 949
950 950 def _pushsyncphase(pushop):
951 951 """synchronise phase information locally and remotely"""
952 952 cheads = pushop.commonheads
953 953 # even when we don't push, exchanging phase data is useful
954 954 remotephases = pushop.remote.listkeys('phases')
955 955 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
956 956 and remotephases # server supports phases
957 957 and pushop.cgresult is None # nothing was pushed
958 958 and remotephases.get('publishing', False)):
959 959 # When:
960 960 # - this is a subrepo push
961 961 # - and remote support phase
962 962 # - and no changeset was pushed
963 963 # - and remote is publishing
964 964 # We may be in issue 3871 case!
965 965 # We drop the possible phase synchronisation done by
966 966 # courtesy to publish changesets possibly locally draft
967 967 # on the remote.
968 968 remotephases = {'publishing': 'True'}
969 969 if not remotephases: # old server or public only reply from non-publishing
970 970 _localphasemove(pushop, cheads)
971 971 # don't push any phase data as there is nothing to push
972 972 else:
973 973 ana = phases.analyzeremotephases(pushop.repo, cheads,
974 974 remotephases)
975 975 pheads, droots = ana
976 976 ### Apply remote phase on local
977 977 if remotephases.get('publishing', False):
978 978 _localphasemove(pushop, cheads)
979 979 else: # publish = False
980 980 _localphasemove(pushop, pheads)
981 981 _localphasemove(pushop, cheads, phases.draft)
982 982 ### Apply local phase on remote
983 983
984 984 if pushop.cgresult:
985 985 if 'phases' in pushop.stepsdone:
986 986 # phases already pushed though bundle2
987 987 return
988 988 outdated = pushop.outdatedphases
989 989 else:
990 990 outdated = pushop.fallbackoutdatedphases
991 991
992 992 pushop.stepsdone.add('phases')
993 993
994 994 # filter heads already turned public by the push
995 995 outdated = [c for c in outdated if c.node() not in pheads]
996 996 # fallback to independent pushkey command
997 997 for newremotehead in outdated:
998 998 r = pushop.remote.pushkey('phases',
999 999 newremotehead.hex(),
1000 1000 str(phases.draft),
1001 1001 str(phases.public))
1002 1002 if not r:
1003 1003 pushop.ui.warn(_('updating %s to public failed!\n')
1004 1004 % newremotehead)
1005 1005
1006 1006 def _localphasemove(pushop, nodes, phase=phases.public):
1007 1007 """move <nodes> to <phase> in the local source repo"""
1008 1008 if pushop.trmanager:
1009 1009 phases.advanceboundary(pushop.repo,
1010 1010 pushop.trmanager.transaction(),
1011 1011 phase,
1012 1012 nodes)
1013 1013 else:
1014 1014 # repo is not locked, do not change any phases!
1015 1015 # Informs the user that phases should have been moved when
1016 1016 # applicable.
1017 1017 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1018 1018 phasestr = phases.phasenames[phase]
1019 1019 if actualmoves:
1020 1020 pushop.ui.status(_('cannot lock source repo, skipping '
1021 1021 'local %s phase update\n') % phasestr)
1022 1022
1023 1023 def _pushobsolete(pushop):
1024 1024 """utility function to push obsolete markers to a remote"""
1025 1025 if 'obsmarkers' in pushop.stepsdone:
1026 1026 return
1027 1027 repo = pushop.repo
1028 1028 remote = pushop.remote
1029 1029 pushop.stepsdone.add('obsmarkers')
1030 1030 if pushop.outobsmarkers:
1031 1031 pushop.ui.debug('try to push obsolete markers to remote\n')
1032 1032 rslts = []
1033 1033 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1034 1034 for key in sorted(remotedata, reverse=True):
1035 1035 # reverse sort to ensure we end with dump0
1036 1036 data = remotedata[key]
1037 1037 rslts.append(remote.pushkey('obsolete', key, '', data))
1038 1038 if [r for r in rslts if not r]:
1039 1039 msg = _('failed to push some obsolete markers!\n')
1040 1040 repo.ui.warn(msg)
1041 1041
1042 1042 def _pushbookmark(pushop):
1043 1043 """Update bookmark position on remote"""
1044 1044 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1045 1045 return
1046 1046 pushop.stepsdone.add('bookmarks')
1047 1047 ui = pushop.ui
1048 1048 remote = pushop.remote
1049 1049
1050 1050 for b, old, new in pushop.outbookmarks:
1051 1051 action = 'update'
1052 1052 if not old:
1053 1053 action = 'export'
1054 1054 elif not new:
1055 1055 action = 'delete'
1056 1056 if remote.pushkey('bookmarks', b, old, new):
1057 1057 ui.status(bookmsgmap[action][0] % b)
1058 1058 else:
1059 1059 ui.warn(bookmsgmap[action][1] % b)
1060 1060 # discovery can have set the value form invalid entry
1061 1061 if pushop.bkresult is not None:
1062 1062 pushop.bkresult = 1
1063 1063
1064 1064 class pulloperation(object):
1065 1065 """A object that represent a single pull operation
1066 1066
1067 1067 It purpose is to carry pull related state and very common operation.
1068 1068
1069 1069 A new should be created at the beginning of each pull and discarded
1070 1070 afterward.
1071 1071 """
1072 1072
1073 1073 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1074 1074 remotebookmarks=None, streamclonerequested=None):
1075 1075 # repo we pull into
1076 1076 self.repo = repo
1077 1077 # repo we pull from
1078 1078 self.remote = remote
1079 1079 # revision we try to pull (None is "all")
1080 1080 self.heads = heads
1081 1081 # bookmark pulled explicitly
1082 1082 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1083 1083 for bookmark in bookmarks]
1084 1084 # do we force pull?
1085 1085 self.force = force
1086 1086 # whether a streaming clone was requested
1087 1087 self.streamclonerequested = streamclonerequested
1088 1088 # transaction manager
1089 1089 self.trmanager = None
1090 1090 # set of common changeset between local and remote before pull
1091 1091 self.common = None
1092 1092 # set of pulled head
1093 1093 self.rheads = None
1094 1094 # list of missing changeset to fetch remotely
1095 1095 self.fetch = None
1096 1096 # remote bookmarks data
1097 1097 self.remotebookmarks = remotebookmarks
1098 1098 # result of changegroup pulling (used as return code by pull)
1099 1099 self.cgresult = None
1100 1100 # list of step already done
1101 1101 self.stepsdone = set()
1102 1102 # Whether we attempted a clone from pre-generated bundles.
1103 1103 self.clonebundleattempted = False
1104 1104
1105 1105 @util.propertycache
1106 1106 def pulledsubset(self):
1107 1107 """heads of the set of changeset target by the pull"""
1108 1108 # compute target subset
1109 1109 if self.heads is None:
1110 1110 # We pulled every thing possible
1111 1111 # sync on everything common
1112 1112 c = set(self.common)
1113 1113 ret = list(self.common)
1114 1114 for n in self.rheads:
1115 1115 if n not in c:
1116 1116 ret.append(n)
1117 1117 return ret
1118 1118 else:
1119 1119 # We pulled a specific subset
1120 1120 # sync on this subset
1121 1121 return self.heads
1122 1122
1123 1123 @util.propertycache
1124 1124 def canusebundle2(self):
1125 1125 return not _forcebundle1(self)
1126 1126
1127 1127 @util.propertycache
1128 1128 def remotebundle2caps(self):
1129 1129 return bundle2.bundle2caps(self.remote)
1130 1130
1131 1131 def gettransaction(self):
1132 1132 # deprecated; talk to trmanager directly
1133 1133 return self.trmanager.transaction()
1134 1134
1135 1135 class transactionmanager(object):
1136 1136 """An object to manage the life cycle of a transaction
1137 1137
1138 1138 It creates the transaction on demand and calls the appropriate hooks when
1139 1139 closing the transaction."""
1140 1140 def __init__(self, repo, source, url):
1141 1141 self.repo = repo
1142 1142 self.source = source
1143 1143 self.url = url
1144 1144 self._tr = None
1145 1145
1146 1146 def transaction(self):
1147 1147 """Return an open transaction object, constructing if necessary"""
1148 1148 if not self._tr:
1149 1149 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1150 1150 self._tr = self.repo.transaction(trname)
1151 1151 self._tr.hookargs['source'] = self.source
1152 1152 self._tr.hookargs['url'] = self.url
1153 1153 return self._tr
1154 1154
1155 1155 def close(self):
1156 1156 """close transaction if created"""
1157 1157 if self._tr is not None:
1158 1158 self._tr.close()
1159 1159
1160 1160 def release(self):
1161 1161 """release transaction if created"""
1162 1162 if self._tr is not None:
1163 1163 self._tr.release()
1164 1164
1165 1165 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1166 1166 streamclonerequested=None):
1167 1167 """Fetch repository data from a remote.
1168 1168
1169 1169 This is the main function used to retrieve data from a remote repository.
1170 1170
1171 1171 ``repo`` is the local repository to clone into.
1172 1172 ``remote`` is a peer instance.
1173 1173 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1174 1174 default) means to pull everything from the remote.
1175 1175 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1176 1176 default, all remote bookmarks are pulled.
1177 1177 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1178 1178 initialization.
1179 1179 ``streamclonerequested`` is a boolean indicating whether a "streaming
1180 1180 clone" is requested. A "streaming clone" is essentially a raw file copy
1181 1181 of revlogs from the server. This only works when the local repository is
1182 1182 empty. The default value of ``None`` means to respect the server
1183 1183 configuration for preferring stream clones.
1184 1184
1185 1185 Returns the ``pulloperation`` created for this pull.
1186 1186 """
1187 1187 if opargs is None:
1188 1188 opargs = {}
1189 1189 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1190 1190 streamclonerequested=streamclonerequested, **opargs)
1191 1191 if pullop.remote.local():
1192 1192 missing = set(pullop.remote.requirements) - pullop.repo.supported
1193 1193 if missing:
1194 1194 msg = _("required features are not"
1195 1195 " supported in the destination:"
1196 1196 " %s") % (', '.join(sorted(missing)))
1197 1197 raise error.Abort(msg)
1198 1198
1199 1199 wlock = lock = None
1200 1200 try:
1201 1201 wlock = pullop.repo.wlock()
1202 1202 lock = pullop.repo.lock()
1203 1203 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1204 1204 streamclone.maybeperformlegacystreamclone(pullop)
1205 1205 # This should ideally be in _pullbundle2(). However, it needs to run
1206 1206 # before discovery to avoid extra work.
1207 1207 _maybeapplyclonebundle(pullop)
1208 1208 _pulldiscovery(pullop)
1209 1209 if pullop.canusebundle2:
1210 1210 _pullbundle2(pullop)
1211 1211 _pullchangeset(pullop)
1212 1212 _pullphase(pullop)
1213 1213 _pullbookmarks(pullop)
1214 1214 _pullobsolete(pullop)
1215 1215 pullop.trmanager.close()
1216 1216 finally:
1217 1217 lockmod.release(pullop.trmanager, lock, wlock)
1218 1218
1219 1219 return pullop
1220 1220
1221 1221 # list of steps to perform discovery before pull
1222 1222 pulldiscoveryorder = []
1223 1223
1224 1224 # Mapping between step name and function
1225 1225 #
1226 1226 # This exists to help extensions wrap steps if necessary
1227 1227 pulldiscoverymapping = {}
1228 1228
1229 1229 def pulldiscovery(stepname):
1230 1230 """decorator for function performing discovery before pull
1231 1231
1232 1232 The function is added to the step -> function mapping and appended to the
1233 1233 list of steps. Beware that decorated function will be added in order (this
1234 1234 may matter).
1235 1235
1236 1236 You can only use this decorator for a new step, if you want to wrap a step
1237 1237 from an extension, change the pulldiscovery dictionary directly."""
1238 1238 def dec(func):
1239 1239 assert stepname not in pulldiscoverymapping
1240 1240 pulldiscoverymapping[stepname] = func
1241 1241 pulldiscoveryorder.append(stepname)
1242 1242 return func
1243 1243 return dec
1244 1244
1245 1245 def _pulldiscovery(pullop):
1246 1246 """Run all discovery steps"""
1247 1247 for stepname in pulldiscoveryorder:
1248 1248 step = pulldiscoverymapping[stepname]
1249 1249 step(pullop)
1250 1250
1251 1251 @pulldiscovery('b1:bookmarks')
1252 1252 def _pullbookmarkbundle1(pullop):
1253 1253 """fetch bookmark data in bundle1 case
1254 1254
1255 1255 If not using bundle2, we have to fetch bookmarks before changeset
1256 1256 discovery to reduce the chance and impact of race conditions."""
1257 1257 if pullop.remotebookmarks is not None:
1258 1258 return
1259 1259 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1260 1260 # all known bundle2 servers now support listkeys, but lets be nice with
1261 1261 # new implementation.
1262 1262 return
1263 1263 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1264 1264
1265 1265
1266 1266 @pulldiscovery('changegroup')
1267 1267 def _pulldiscoverychangegroup(pullop):
1268 1268 """discovery phase for the pull
1269 1269
1270 1270 Current handle changeset discovery only, will change handle all discovery
1271 1271 at some point."""
1272 1272 tmp = discovery.findcommonincoming(pullop.repo,
1273 1273 pullop.remote,
1274 1274 heads=pullop.heads,
1275 1275 force=pullop.force)
1276 1276 common, fetch, rheads = tmp
1277 1277 nm = pullop.repo.unfiltered().changelog.nodemap
1278 1278 if fetch and rheads:
1279 1279 # If a remote heads in filtered locally, lets drop it from the unknown
1280 1280 # remote heads and put in back in common.
1281 1281 #
1282 1282 # This is a hackish solution to catch most of "common but locally
1283 1283 # hidden situation". We do not performs discovery on unfiltered
1284 1284 # repository because it end up doing a pathological amount of round
1285 1285 # trip for w huge amount of changeset we do not care about.
1286 1286 #
1287 1287 # If a set of such "common but filtered" changeset exist on the server
1288 1288 # but are not including a remote heads, we'll not be able to detect it,
1289 1289 scommon = set(common)
1290 1290 filteredrheads = []
1291 1291 for n in rheads:
1292 1292 if n in nm:
1293 1293 if n not in scommon:
1294 1294 common.append(n)
1295 1295 else:
1296 1296 filteredrheads.append(n)
1297 1297 if not filteredrheads:
1298 1298 fetch = []
1299 1299 rheads = filteredrheads
1300 1300 pullop.common = common
1301 1301 pullop.fetch = fetch
1302 1302 pullop.rheads = rheads
1303 1303
1304 1304 def _pullbundle2(pullop):
1305 1305 """pull data using bundle2
1306 1306
1307 1307 For now, the only supported data are changegroup."""
1308 1308 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1309 1309
1310 1310 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1311 1311
1312 1312 # pulling changegroup
1313 1313 pullop.stepsdone.add('changegroup')
1314 1314
1315 1315 kwargs['common'] = pullop.common
1316 1316 kwargs['heads'] = pullop.heads or pullop.rheads
1317 1317 kwargs['cg'] = pullop.fetch
1318 1318 if 'listkeys' in pullop.remotebundle2caps:
1319 1319 kwargs['listkeys'] = ['phases']
1320 1320 if pullop.remotebookmarks is None:
1321 1321 # make sure to always includes bookmark data when migrating
1322 1322 # `hg incoming --bundle` to using this function.
1323 1323 kwargs['listkeys'].append('bookmarks')
1324 1324
1325 1325 # If this is a full pull / clone and the server supports the clone bundles
1326 1326 # feature, tell the server whether we attempted a clone bundle. The
1327 1327 # presence of this flag indicates the client supports clone bundles. This
1328 1328 # will enable the server to treat clients that support clone bundles
1329 1329 # differently from those that don't.
1330 1330 if (pullop.remote.capable('clonebundles')
1331 1331 and pullop.heads is None and list(pullop.common) == [nullid]):
1332 1332 kwargs['cbattempted'] = pullop.clonebundleattempted
1333 1333
1334 1334 if streaming:
1335 1335 pullop.repo.ui.status(_('streaming all changes\n'))
1336 1336 elif not pullop.fetch:
1337 1337 pullop.repo.ui.status(_("no changes found\n"))
1338 1338 pullop.cgresult = 0
1339 1339 else:
1340 1340 if pullop.heads is None and list(pullop.common) == [nullid]:
1341 1341 pullop.repo.ui.status(_("requesting all changes\n"))
1342 1342 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1343 1343 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1344 1344 if obsolete.commonversion(remoteversions) is not None:
1345 1345 kwargs['obsmarkers'] = True
1346 1346 pullop.stepsdone.add('obsmarkers')
1347 1347 _pullbundle2extraprepare(pullop, kwargs)
1348 1348 bundle = pullop.remote.getbundle('pull', **kwargs)
1349 1349 try:
1350 1350 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1351 1351 except error.BundleValueError as exc:
1352 1352 raise error.Abort(_('missing support for %s') % exc)
1353 1353
1354 1354 if pullop.fetch:
1355 1355 results = [cg['return'] for cg in op.records['changegroup']]
1356 1356 pullop.cgresult = changegroup.combineresults(results)
1357 1357
1358 1358 # processing phases change
1359 1359 for namespace, value in op.records['listkeys']:
1360 1360 if namespace == 'phases':
1361 1361 _pullapplyphases(pullop, value)
1362 1362
1363 1363 # processing bookmark update
1364 1364 for namespace, value in op.records['listkeys']:
1365 1365 if namespace == 'bookmarks':
1366 1366 pullop.remotebookmarks = value
1367 1367
1368 1368 # bookmark data were either already there or pulled in the bundle
1369 1369 if pullop.remotebookmarks is not None:
1370 1370 _pullbookmarks(pullop)
1371 1371
1372 1372 def _pullbundle2extraprepare(pullop, kwargs):
1373 1373 """hook function so that extensions can extend the getbundle call"""
1374 1374 pass
1375 1375
1376 1376 def _pullchangeset(pullop):
1377 1377 """pull changeset from unbundle into the local repo"""
1378 1378 # We delay the open of the transaction as late as possible so we
1379 1379 # don't open transaction for nothing or you break future useful
1380 1380 # rollback call
1381 1381 if 'changegroup' in pullop.stepsdone:
1382 1382 return
1383 1383 pullop.stepsdone.add('changegroup')
1384 1384 if not pullop.fetch:
1385 1385 pullop.repo.ui.status(_("no changes found\n"))
1386 1386 pullop.cgresult = 0
1387 1387 return
1388 1388 pullop.gettransaction()
1389 1389 if pullop.heads is None and list(pullop.common) == [nullid]:
1390 1390 pullop.repo.ui.status(_("requesting all changes\n"))
1391 1391 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1392 1392 # issue1320, avoid a race if remote changed after discovery
1393 1393 pullop.heads = pullop.rheads
1394 1394
1395 1395 if pullop.remote.capable('getbundle'):
1396 1396 # TODO: get bundlecaps from remote
1397 1397 cg = pullop.remote.getbundle('pull', common=pullop.common,
1398 1398 heads=pullop.heads or pullop.rheads)
1399 1399 elif pullop.heads is None:
1400 1400 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1401 1401 elif not pullop.remote.capable('changegroupsubset'):
1402 1402 raise error.Abort(_("partial pull cannot be done because "
1403 1403 "other repository doesn't support "
1404 1404 "changegroupsubset."))
1405 1405 else:
1406 1406 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1407 1407 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1408 1408
1409 1409 def _pullphase(pullop):
1410 1410 # Get remote phases data from remote
1411 1411 if 'phases' in pullop.stepsdone:
1412 1412 return
1413 1413 remotephases = pullop.remote.listkeys('phases')
1414 1414 _pullapplyphases(pullop, remotephases)
1415 1415
1416 1416 def _pullapplyphases(pullop, remotephases):
1417 1417 """apply phase movement from observed remote state"""
1418 1418 if 'phases' in pullop.stepsdone:
1419 1419 return
1420 1420 pullop.stepsdone.add('phases')
1421 1421 publishing = bool(remotephases.get('publishing', False))
1422 1422 if remotephases and not publishing:
1423 1423 # remote is new and non-publishing
1424 1424 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1425 1425 pullop.pulledsubset,
1426 1426 remotephases)
1427 1427 dheads = pullop.pulledsubset
1428 1428 else:
1429 1429 # Remote is old or publishing all common changesets
1430 1430 # should be seen as public
1431 1431 pheads = pullop.pulledsubset
1432 1432 dheads = []
1433 1433 unfi = pullop.repo.unfiltered()
1434 1434 phase = unfi._phasecache.phase
1435 1435 rev = unfi.changelog.nodemap.get
1436 1436 public = phases.public
1437 1437 draft = phases.draft
1438 1438
1439 1439 # exclude changesets already public locally and update the others
1440 1440 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1441 1441 if pheads:
1442 1442 tr = pullop.gettransaction()
1443 1443 phases.advanceboundary(pullop.repo, tr, public, pheads)
1444 1444
1445 1445 # exclude changesets already draft locally and update the others
1446 1446 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1447 1447 if dheads:
1448 1448 tr = pullop.gettransaction()
1449 1449 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1450 1450
1451 1451 def _pullbookmarks(pullop):
1452 1452 """process the remote bookmark information to update the local one"""
1453 1453 if 'bookmarks' in pullop.stepsdone:
1454 1454 return
1455 1455 pullop.stepsdone.add('bookmarks')
1456 1456 repo = pullop.repo
1457 1457 remotebookmarks = pullop.remotebookmarks
1458 1458 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1459 1459 pullop.remote.url(),
1460 1460 pullop.gettransaction,
1461 1461 explicit=pullop.explicitbookmarks)
1462 1462
1463 1463 def _pullobsolete(pullop):
1464 1464 """utility function to pull obsolete markers from a remote
1465 1465
1466 1466 The `gettransaction` is function that return the pull transaction, creating
1467 1467 one if necessary. We return the transaction to inform the calling code that
1468 1468 a new transaction have been created (when applicable).
1469 1469
1470 1470 Exists mostly to allow overriding for experimentation purpose"""
1471 1471 if 'obsmarkers' in pullop.stepsdone:
1472 1472 return
1473 1473 pullop.stepsdone.add('obsmarkers')
1474 1474 tr = None
1475 1475 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1476 1476 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1477 1477 remoteobs = pullop.remote.listkeys('obsolete')
1478 1478 if 'dump0' in remoteobs:
1479 1479 tr = pullop.gettransaction()
1480 1480 markers = []
1481 1481 for key in sorted(remoteobs, reverse=True):
1482 1482 if key.startswith('dump'):
1483 1483 data = base85.b85decode(remoteobs[key])
1484 1484 version, newmarks = obsolete._readmarkers(data)
1485 1485 markers += newmarks
1486 1486 if markers:
1487 1487 pullop.repo.obsstore.add(tr, markers)
1488 1488 pullop.repo.invalidatevolatilesets()
1489 1489 return tr
1490 1490
1491 1491 def caps20to10(repo):
1492 1492 """return a set with appropriate options to use bundle20 during getbundle"""
1493 1493 caps = set(['HG20'])
1494 1494 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1495 1495 caps.add('bundle2=' + urlreq.quote(capsblob))
1496 1496 return caps
1497 1497
1498 1498 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1499 1499 getbundle2partsorder = []
1500 1500
1501 1501 # Mapping between step name and function
1502 1502 #
1503 1503 # This exists to help extensions wrap steps if necessary
1504 1504 getbundle2partsmapping = {}
1505 1505
1506 1506 def getbundle2partsgenerator(stepname, idx=None):
1507 1507 """decorator for function generating bundle2 part for getbundle
1508 1508
1509 1509 The function is added to the step -> function mapping and appended to the
1510 1510 list of steps. Beware that decorated functions will be added in order
1511 1511 (this may matter).
1512 1512
1513 1513 You can only use this decorator for new steps, if you want to wrap a step
1514 1514 from an extension, attack the getbundle2partsmapping dictionary directly."""
1515 1515 def dec(func):
1516 1516 assert stepname not in getbundle2partsmapping
1517 1517 getbundle2partsmapping[stepname] = func
1518 1518 if idx is None:
1519 1519 getbundle2partsorder.append(stepname)
1520 1520 else:
1521 1521 getbundle2partsorder.insert(idx, stepname)
1522 1522 return func
1523 1523 return dec
1524 1524
1525 1525 def bundle2requested(bundlecaps):
1526 1526 if bundlecaps is not None:
1527 1527 return any(cap.startswith('HG2') for cap in bundlecaps)
1528 1528 return False
1529 1529
1530 1530 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1531 1531 **kwargs):
1532 1532 """Return chunks constituting a bundle's raw data.
1533 1533
1534 1534 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1535 1535 passed.
1536 1536
1537 1537 Returns an iterator over raw chunks (of varying sizes).
1538 1538 """
1539 1539 usebundle2 = bundle2requested(bundlecaps)
1540 1540 # bundle10 case
1541 1541 if not usebundle2:
1542 1542 if bundlecaps and not kwargs.get('cg', True):
1543 1543 raise ValueError(_('request for bundle10 must include changegroup'))
1544 1544
1545 1545 if kwargs:
1546 1546 raise ValueError(_('unsupported getbundle arguments: %s')
1547 1547 % ', '.join(sorted(kwargs.keys())))
1548 1548 outgoing = _computeoutgoing(repo, heads, common)
1549 1549 bundler = changegroup.getbundler('01', repo, bundlecaps)
1550 1550 return changegroup.getsubsetraw(repo, outgoing, bundler, source)
1551 1551
1552 1552 # bundle20 case
1553 1553 b2caps = {}
1554 1554 for bcaps in bundlecaps:
1555 1555 if bcaps.startswith('bundle2='):
1556 1556 blob = urlreq.unquote(bcaps[len('bundle2='):])
1557 1557 b2caps.update(bundle2.decodecaps(blob))
1558 1558 bundler = bundle2.bundle20(repo.ui, b2caps)
1559 1559
1560 1560 kwargs['heads'] = heads
1561 1561 kwargs['common'] = common
1562 1562
1563 1563 for name in getbundle2partsorder:
1564 1564 func = getbundle2partsmapping[name]
1565 1565 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1566 1566 **kwargs)
1567 1567
1568 1568 return bundler.getchunks()
1569 1569
1570 1570 @getbundle2partsgenerator('changegroup')
1571 1571 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1572 1572 b2caps=None, heads=None, common=None, **kwargs):
1573 1573 """add a changegroup part to the requested bundle"""
1574 1574 cg = None
1575 1575 if kwargs.get('cg', True):
1576 1576 # build changegroup bundle here.
1577 1577 version = '01'
1578 1578 cgversions = b2caps.get('changegroup')
1579 1579 if cgversions: # 3.1 and 3.2 ship with an empty value
1580 1580 cgversions = [v for v in cgversions
1581 1581 if v in changegroup.supportedoutgoingversions(repo)]
1582 1582 if not cgversions:
1583 1583 raise ValueError(_('no common changegroup version'))
1584 1584 version = max(cgversions)
1585 1585 outgoing = _computeoutgoing(repo, heads, common)
1586 1586 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1587 1587 bundlecaps=bundlecaps,
1588 1588 version=version)
1589 1589
1590 1590 if cg:
1591 1591 part = bundler.newpart('changegroup', data=cg)
1592 1592 if cgversions:
1593 1593 part.addparam('version', version)
1594 1594 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1595 1595 if 'treemanifest' in repo.requirements:
1596 1596 part.addparam('treemanifest', '1')
1597 1597
1598 1598 @getbundle2partsgenerator('listkeys')
1599 1599 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1600 1600 b2caps=None, **kwargs):
1601 1601 """add parts containing listkeys namespaces to the requested bundle"""
1602 1602 listkeys = kwargs.get('listkeys', ())
1603 1603 for namespace in listkeys:
1604 1604 part = bundler.newpart('listkeys')
1605 1605 part.addparam('namespace', namespace)
1606 1606 keys = repo.listkeys(namespace).items()
1607 1607 part.data = pushkey.encodekeys(keys)
1608 1608
1609 1609 @getbundle2partsgenerator('obsmarkers')
1610 1610 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1611 1611 b2caps=None, heads=None, **kwargs):
1612 1612 """add an obsolescence markers part to the requested bundle"""
1613 1613 if kwargs.get('obsmarkers', False):
1614 1614 if heads is None:
1615 1615 heads = repo.heads()
1616 1616 subset = [c.node() for c in repo.set('::%ln', heads)]
1617 1617 markers = repo.obsstore.relevantmarkers(subset)
1618 1618 markers = sorted(markers)
1619 1619 buildobsmarkerspart(bundler, markers)
1620 1620
1621 1621 @getbundle2partsgenerator('hgtagsfnodes')
1622 1622 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1623 1623 b2caps=None, heads=None, common=None,
1624 1624 **kwargs):
1625 1625 """Transfer the .hgtags filenodes mapping.
1626 1626
1627 1627 Only values for heads in this bundle will be transferred.
1628 1628
1629 1629 The part data consists of pairs of 20 byte changeset node and .hgtags
1630 1630 filenodes raw values.
1631 1631 """
1632 1632 # Don't send unless:
1633 1633 # - changeset are being exchanged,
1634 1634 # - the client supports it.
1635 1635 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1636 1636 return
1637 1637
1638 1638 outgoing = _computeoutgoing(repo, heads, common)
1639 1639
1640 1640 if not outgoing.missingheads:
1641 1641 return
1642 1642
1643 1643 cache = tags.hgtagsfnodescache(repo.unfiltered())
1644 1644 chunks = []
1645 1645
1646 1646 # .hgtags fnodes are only relevant for head changesets. While we could
1647 1647 # transfer values for all known nodes, there will likely be little to
1648 1648 # no benefit.
1649 1649 #
1650 1650 # We don't bother using a generator to produce output data because
1651 1651 # a) we only have 40 bytes per head and even esoteric numbers of heads
1652 1652 # consume little memory (1M heads is 40MB) b) we don't want to send the
1653 1653 # part if we don't have entries and knowing if we have entries requires
1654 1654 # cache lookups.
1655 1655 for node in outgoing.missingheads:
1656 1656 # Don't compute missing, as this may slow down serving.
1657 1657 fnode = cache.getfnode(node, computemissing=False)
1658 1658 if fnode is not None:
1659 1659 chunks.extend([node, fnode])
1660 1660
1661 1661 if chunks:
1662 1662 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1663 1663
1664 def _getbookmarks(repo, **kwargs):
1665 """Returns bookmark to node mapping.
1666
1667 This function is primarily used to generate `bookmarks` bundle2 part.
1668 It is a separate function in order to make it easy to wrap it
1669 in extensions. Passing `kwargs` to the function makes it easy to
1670 add new parameters in extensions.
1671 """
1672
1673 return dict(bookmod.listbinbookmarks(repo))
1674
1664 1675 def check_heads(repo, their_heads, context):
1665 1676 """check if the heads of a repo have been modified
1666 1677
1667 1678 Used by peer for unbundling.
1668 1679 """
1669 1680 heads = repo.heads()
1670 1681 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1671 1682 if not (their_heads == ['force'] or their_heads == heads or
1672 1683 their_heads == ['hashed', heads_hash]):
1673 1684 # someone else committed/pushed/unbundled while we
1674 1685 # were transferring data
1675 1686 raise error.PushRaced('repository changed while %s - '
1676 1687 'please try again' % context)
1677 1688
1678 1689 def unbundle(repo, cg, heads, source, url):
1679 1690 """Apply a bundle to a repo.
1680 1691
1681 1692 this function makes sure the repo is locked during the application and have
1682 1693 mechanism to check that no push race occurred between the creation of the
1683 1694 bundle and its application.
1684 1695
1685 1696 If the push was raced as PushRaced exception is raised."""
1686 1697 r = 0
1687 1698 # need a transaction when processing a bundle2 stream
1688 1699 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1689 1700 lockandtr = [None, None, None]
1690 1701 recordout = None
1691 1702 # quick fix for output mismatch with bundle2 in 3.4
1692 1703 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1693 1704 False)
1694 1705 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1695 1706 captureoutput = True
1696 1707 try:
1697 1708 check_heads(repo, heads, 'uploading changes')
1698 1709 # push can proceed
1699 1710 if util.safehasattr(cg, 'params'):
1700 1711 r = None
1701 1712 try:
1702 1713 def gettransaction():
1703 1714 if not lockandtr[2]:
1704 1715 lockandtr[0] = repo.wlock()
1705 1716 lockandtr[1] = repo.lock()
1706 1717 lockandtr[2] = repo.transaction(source)
1707 1718 lockandtr[2].hookargs['source'] = source
1708 1719 lockandtr[2].hookargs['url'] = url
1709 1720 lockandtr[2].hookargs['bundle2'] = '1'
1710 1721 return lockandtr[2]
1711 1722
1712 1723 # Do greedy locking by default until we're satisfied with lazy
1713 1724 # locking.
1714 1725 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1715 1726 gettransaction()
1716 1727
1717 1728 op = bundle2.bundleoperation(repo, gettransaction,
1718 1729 captureoutput=captureoutput)
1719 1730 try:
1720 1731 op = bundle2.processbundle(repo, cg, op=op)
1721 1732 finally:
1722 1733 r = op.reply
1723 1734 if captureoutput and r is not None:
1724 1735 repo.ui.pushbuffer(error=True, subproc=True)
1725 1736 def recordout(output):
1726 1737 r.newpart('output', data=output, mandatory=False)
1727 1738 if lockandtr[2] is not None:
1728 1739 lockandtr[2].close()
1729 1740 except BaseException as exc:
1730 1741 exc.duringunbundle2 = True
1731 1742 if captureoutput and r is not None:
1732 1743 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1733 1744 def recordout(output):
1734 1745 part = bundle2.bundlepart('output', data=output,
1735 1746 mandatory=False)
1736 1747 parts.append(part)
1737 1748 raise
1738 1749 else:
1739 1750 lockandtr[1] = repo.lock()
1740 1751 r = cg.apply(repo, source, url)
1741 1752 finally:
1742 1753 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1743 1754 if recordout is not None:
1744 1755 recordout(repo.ui.popbuffer())
1745 1756 return r
1746 1757
1747 1758 def _maybeapplyclonebundle(pullop):
1748 1759 """Apply a clone bundle from a remote, if possible."""
1749 1760
1750 1761 repo = pullop.repo
1751 1762 remote = pullop.remote
1752 1763
1753 1764 if not repo.ui.configbool('ui', 'clonebundles', True):
1754 1765 return
1755 1766
1756 1767 # Only run if local repo is empty.
1757 1768 if len(repo):
1758 1769 return
1759 1770
1760 1771 if pullop.heads:
1761 1772 return
1762 1773
1763 1774 if not remote.capable('clonebundles'):
1764 1775 return
1765 1776
1766 1777 res = remote._call('clonebundles')
1767 1778
1768 1779 # If we call the wire protocol command, that's good enough to record the
1769 1780 # attempt.
1770 1781 pullop.clonebundleattempted = True
1771 1782
1772 1783 entries = parseclonebundlesmanifest(repo, res)
1773 1784 if not entries:
1774 1785 repo.ui.note(_('no clone bundles available on remote; '
1775 1786 'falling back to regular clone\n'))
1776 1787 return
1777 1788
1778 1789 entries = filterclonebundleentries(repo, entries)
1779 1790 if not entries:
1780 1791 # There is a thundering herd concern here. However, if a server
1781 1792 # operator doesn't advertise bundles appropriate for its clients,
1782 1793 # they deserve what's coming. Furthermore, from a client's
1783 1794 # perspective, no automatic fallback would mean not being able to
1784 1795 # clone!
1785 1796 repo.ui.warn(_('no compatible clone bundles available on server; '
1786 1797 'falling back to regular clone\n'))
1787 1798 repo.ui.warn(_('(you may want to report this to the server '
1788 1799 'operator)\n'))
1789 1800 return
1790 1801
1791 1802 entries = sortclonebundleentries(repo.ui, entries)
1792 1803
1793 1804 url = entries[0]['URL']
1794 1805 repo.ui.status(_('applying clone bundle from %s\n') % url)
1795 1806 if trypullbundlefromurl(repo.ui, repo, url):
1796 1807 repo.ui.status(_('finished applying clone bundle\n'))
1797 1808 # Bundle failed.
1798 1809 #
1799 1810 # We abort by default to avoid the thundering herd of
1800 1811 # clients flooding a server that was expecting expensive
1801 1812 # clone load to be offloaded.
1802 1813 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1803 1814 repo.ui.warn(_('falling back to normal clone\n'))
1804 1815 else:
1805 1816 raise error.Abort(_('error applying bundle'),
1806 1817 hint=_('if this error persists, consider contacting '
1807 1818 'the server operator or disable clone '
1808 1819 'bundles via '
1809 1820 '"--config ui.clonebundles=false"'))
1810 1821
1811 1822 def parseclonebundlesmanifest(repo, s):
1812 1823 """Parses the raw text of a clone bundles manifest.
1813 1824
1814 1825 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1815 1826 to the URL and other keys are the attributes for the entry.
1816 1827 """
1817 1828 m = []
1818 1829 for line in s.splitlines():
1819 1830 fields = line.split()
1820 1831 if not fields:
1821 1832 continue
1822 1833 attrs = {'URL': fields[0]}
1823 1834 for rawattr in fields[1:]:
1824 1835 key, value = rawattr.split('=', 1)
1825 1836 key = urlreq.unquote(key)
1826 1837 value = urlreq.unquote(value)
1827 1838 attrs[key] = value
1828 1839
1829 1840 # Parse BUNDLESPEC into components. This makes client-side
1830 1841 # preferences easier to specify since you can prefer a single
1831 1842 # component of the BUNDLESPEC.
1832 1843 if key == 'BUNDLESPEC':
1833 1844 try:
1834 1845 comp, version, params = parsebundlespec(repo, value,
1835 1846 externalnames=True)
1836 1847 attrs['COMPRESSION'] = comp
1837 1848 attrs['VERSION'] = version
1838 1849 except error.InvalidBundleSpecification:
1839 1850 pass
1840 1851 except error.UnsupportedBundleSpecification:
1841 1852 pass
1842 1853
1843 1854 m.append(attrs)
1844 1855
1845 1856 return m
1846 1857
1847 1858 def filterclonebundleentries(repo, entries):
1848 1859 """Remove incompatible clone bundle manifest entries.
1849 1860
1850 1861 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1851 1862 and returns a new list consisting of only the entries that this client
1852 1863 should be able to apply.
1853 1864
1854 1865 There is no guarantee we'll be able to apply all returned entries because
1855 1866 the metadata we use to filter on may be missing or wrong.
1856 1867 """
1857 1868 newentries = []
1858 1869 for entry in entries:
1859 1870 spec = entry.get('BUNDLESPEC')
1860 1871 if spec:
1861 1872 try:
1862 1873 parsebundlespec(repo, spec, strict=True)
1863 1874 except error.InvalidBundleSpecification as e:
1864 1875 repo.ui.debug(str(e) + '\n')
1865 1876 continue
1866 1877 except error.UnsupportedBundleSpecification as e:
1867 1878 repo.ui.debug('filtering %s because unsupported bundle '
1868 1879 'spec: %s\n' % (entry['URL'], str(e)))
1869 1880 continue
1870 1881
1871 1882 if 'REQUIRESNI' in entry and not sslutil.hassni:
1872 1883 repo.ui.debug('filtering %s because SNI not supported\n' %
1873 1884 entry['URL'])
1874 1885 continue
1875 1886
1876 1887 newentries.append(entry)
1877 1888
1878 1889 return newentries
1879 1890
1880 1891 def sortclonebundleentries(ui, entries):
1881 1892 prefers = ui.configlist('ui', 'clonebundleprefers', default=[])
1882 1893 if not prefers:
1883 1894 return list(entries)
1884 1895
1885 1896 prefers = [p.split('=', 1) for p in prefers]
1886 1897
1887 1898 # Our sort function.
1888 1899 def compareentry(a, b):
1889 1900 for prefkey, prefvalue in prefers:
1890 1901 avalue = a.get(prefkey)
1891 1902 bvalue = b.get(prefkey)
1892 1903
1893 1904 # Special case for b missing attribute and a matches exactly.
1894 1905 if avalue is not None and bvalue is None and avalue == prefvalue:
1895 1906 return -1
1896 1907
1897 1908 # Special case for a missing attribute and b matches exactly.
1898 1909 if bvalue is not None and avalue is None and bvalue == prefvalue:
1899 1910 return 1
1900 1911
1901 1912 # We can't compare unless attribute present on both.
1902 1913 if avalue is None or bvalue is None:
1903 1914 continue
1904 1915
1905 1916 # Same values should fall back to next attribute.
1906 1917 if avalue == bvalue:
1907 1918 continue
1908 1919
1909 1920 # Exact matches come first.
1910 1921 if avalue == prefvalue:
1911 1922 return -1
1912 1923 if bvalue == prefvalue:
1913 1924 return 1
1914 1925
1915 1926 # Fall back to next attribute.
1916 1927 continue
1917 1928
1918 1929 # If we got here we couldn't sort by attributes and prefers. Fall
1919 1930 # back to index order.
1920 1931 return 0
1921 1932
1922 1933 return sorted(entries, cmp=compareentry)
1923 1934
1924 1935 def trypullbundlefromurl(ui, repo, url):
1925 1936 """Attempt to apply a bundle from a URL."""
1926 1937 lock = repo.lock()
1927 1938 try:
1928 1939 tr = repo.transaction('bundleurl')
1929 1940 try:
1930 1941 try:
1931 1942 fh = urlmod.open(ui, url)
1932 1943 cg = readbundle(ui, fh, 'stream')
1933 1944
1934 1945 if isinstance(cg, bundle2.unbundle20):
1935 1946 bundle2.processbundle(repo, cg, lambda: tr)
1936 1947 elif isinstance(cg, streamclone.streamcloneapplier):
1937 1948 cg.apply(repo)
1938 1949 else:
1939 1950 cg.apply(repo, 'clonebundles', url)
1940 1951 tr.close()
1941 1952 return True
1942 1953 except urlerr.httperror as e:
1943 1954 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1944 1955 except urlerr.urlerror as e:
1945 1956 ui.warn(_('error fetching bundle: %s\n') % e.reason[1])
1946 1957
1947 1958 return False
1948 1959 finally:
1949 1960 tr.release()
1950 1961 finally:
1951 1962 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now