##// END OF EJS Templates
exchange: remove need for "locked" variable...
Martin von Zweigbergk -
r33789:5904511f default
parent child Browse files
Show More
@@ -1,2011 +1,2009
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 hex,
16 16 nullid,
17 17 )
18 18 from . import (
19 19 bookmarks as bookmod,
20 20 bundle2,
21 21 changegroup,
22 22 discovery,
23 23 error,
24 24 lock as lockmod,
25 25 obsolete,
26 26 phases,
27 27 pushkey,
28 28 pycompat,
29 29 scmutil,
30 30 sslutil,
31 31 streamclone,
32 32 url as urlmod,
33 33 util,
34 34 )
35 35
36 36 urlerr = util.urlerr
37 37 urlreq = util.urlreq
38 38
39 39 # Maps bundle version human names to changegroup versions.
40 40 _bundlespeccgversions = {'v1': '01',
41 41 'v2': '02',
42 42 'packed1': 's1',
43 43 'bundle2': '02', #legacy
44 44 }
45 45
46 46 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
47 47 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
48 48
49 49 def parsebundlespec(repo, spec, strict=True, externalnames=False):
50 50 """Parse a bundle string specification into parts.
51 51
52 52 Bundle specifications denote a well-defined bundle/exchange format.
53 53 The content of a given specification should not change over time in
54 54 order to ensure that bundles produced by a newer version of Mercurial are
55 55 readable from an older version.
56 56
57 57 The string currently has the form:
58 58
59 59 <compression>-<type>[;<parameter0>[;<parameter1>]]
60 60
61 61 Where <compression> is one of the supported compression formats
62 62 and <type> is (currently) a version string. A ";" can follow the type and
63 63 all text afterwards is interpreted as URI encoded, ";" delimited key=value
64 64 pairs.
65 65
66 66 If ``strict`` is True (the default) <compression> is required. Otherwise,
67 67 it is optional.
68 68
69 69 If ``externalnames`` is False (the default), the human-centric names will
70 70 be converted to their internal representation.
71 71
72 72 Returns a 3-tuple of (compression, version, parameters). Compression will
73 73 be ``None`` if not in strict mode and a compression isn't defined.
74 74
75 75 An ``InvalidBundleSpecification`` is raised when the specification is
76 76 not syntactically well formed.
77 77
78 78 An ``UnsupportedBundleSpecification`` is raised when the compression or
79 79 bundle type/version is not recognized.
80 80
81 81 Note: this function will likely eventually return a more complex data
82 82 structure, including bundle2 part information.
83 83 """
84 84 def parseparams(s):
85 85 if ';' not in s:
86 86 return s, {}
87 87
88 88 params = {}
89 89 version, paramstr = s.split(';', 1)
90 90
91 91 for p in paramstr.split(';'):
92 92 if '=' not in p:
93 93 raise error.InvalidBundleSpecification(
94 94 _('invalid bundle specification: '
95 95 'missing "=" in parameter: %s') % p)
96 96
97 97 key, value = p.split('=', 1)
98 98 key = urlreq.unquote(key)
99 99 value = urlreq.unquote(value)
100 100 params[key] = value
101 101
102 102 return version, params
103 103
104 104
105 105 if strict and '-' not in spec:
106 106 raise error.InvalidBundleSpecification(
107 107 _('invalid bundle specification; '
108 108 'must be prefixed with compression: %s') % spec)
109 109
110 110 if '-' in spec:
111 111 compression, version = spec.split('-', 1)
112 112
113 113 if compression not in util.compengines.supportedbundlenames:
114 114 raise error.UnsupportedBundleSpecification(
115 115 _('%s compression is not supported') % compression)
116 116
117 117 version, params = parseparams(version)
118 118
119 119 if version not in _bundlespeccgversions:
120 120 raise error.UnsupportedBundleSpecification(
121 121 _('%s is not a recognized bundle version') % version)
122 122 else:
123 123 # Value could be just the compression or just the version, in which
124 124 # case some defaults are assumed (but only when not in strict mode).
125 125 assert not strict
126 126
127 127 spec, params = parseparams(spec)
128 128
129 129 if spec in util.compengines.supportedbundlenames:
130 130 compression = spec
131 131 version = 'v1'
132 132 # Generaldelta repos require v2.
133 133 if 'generaldelta' in repo.requirements:
134 134 version = 'v2'
135 135 # Modern compression engines require v2.
136 136 if compression not in _bundlespecv1compengines:
137 137 version = 'v2'
138 138 elif spec in _bundlespeccgversions:
139 139 if spec == 'packed1':
140 140 compression = 'none'
141 141 else:
142 142 compression = 'bzip2'
143 143 version = spec
144 144 else:
145 145 raise error.UnsupportedBundleSpecification(
146 146 _('%s is not a recognized bundle specification') % spec)
147 147
148 148 # Bundle version 1 only supports a known set of compression engines.
149 149 if version == 'v1' and compression not in _bundlespecv1compengines:
150 150 raise error.UnsupportedBundleSpecification(
151 151 _('compression engine %s is not supported on v1 bundles') %
152 152 compression)
153 153
154 154 # The specification for packed1 can optionally declare the data formats
155 155 # required to apply it. If we see this metadata, compare against what the
156 156 # repo supports and error if the bundle isn't compatible.
157 157 if version == 'packed1' and 'requirements' in params:
158 158 requirements = set(params['requirements'].split(','))
159 159 missingreqs = requirements - repo.supportedformats
160 160 if missingreqs:
161 161 raise error.UnsupportedBundleSpecification(
162 162 _('missing support for repository features: %s') %
163 163 ', '.join(sorted(missingreqs)))
164 164
165 165 if not externalnames:
166 166 engine = util.compengines.forbundlename(compression)
167 167 compression = engine.bundletype()[1]
168 168 version = _bundlespeccgversions[version]
169 169 return compression, version, params
170 170
171 171 def readbundle(ui, fh, fname, vfs=None):
172 172 header = changegroup.readexactly(fh, 4)
173 173
174 174 alg = None
175 175 if not fname:
176 176 fname = "stream"
177 177 if not header.startswith('HG') and header.startswith('\0'):
178 178 fh = changegroup.headerlessfixup(fh, header)
179 179 header = "HG10"
180 180 alg = 'UN'
181 181 elif vfs:
182 182 fname = vfs.join(fname)
183 183
184 184 magic, version = header[0:2], header[2:4]
185 185
186 186 if magic != 'HG':
187 187 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
188 188 if version == '10':
189 189 if alg is None:
190 190 alg = changegroup.readexactly(fh, 2)
191 191 return changegroup.cg1unpacker(fh, alg)
192 192 elif version.startswith('2'):
193 193 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
194 194 elif version == 'S1':
195 195 return streamclone.streamcloneapplier(fh)
196 196 else:
197 197 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
198 198
199 199 def getbundlespec(ui, fh):
200 200 """Infer the bundlespec from a bundle file handle.
201 201
202 202 The input file handle is seeked and the original seek position is not
203 203 restored.
204 204 """
205 205 def speccompression(alg):
206 206 try:
207 207 return util.compengines.forbundletype(alg).bundletype()[0]
208 208 except KeyError:
209 209 return None
210 210
211 211 b = readbundle(ui, fh, None)
212 212 if isinstance(b, changegroup.cg1unpacker):
213 213 alg = b._type
214 214 if alg == '_truncatedBZ':
215 215 alg = 'BZ'
216 216 comp = speccompression(alg)
217 217 if not comp:
218 218 raise error.Abort(_('unknown compression algorithm: %s') % alg)
219 219 return '%s-v1' % comp
220 220 elif isinstance(b, bundle2.unbundle20):
221 221 if 'Compression' in b.params:
222 222 comp = speccompression(b.params['Compression'])
223 223 if not comp:
224 224 raise error.Abort(_('unknown compression algorithm: %s') % comp)
225 225 else:
226 226 comp = 'none'
227 227
228 228 version = None
229 229 for part in b.iterparts():
230 230 if part.type == 'changegroup':
231 231 version = part.params['version']
232 232 if version in ('01', '02'):
233 233 version = 'v2'
234 234 else:
235 235 raise error.Abort(_('changegroup version %s does not have '
236 236 'a known bundlespec') % version,
237 237 hint=_('try upgrading your Mercurial '
238 238 'client'))
239 239
240 240 if not version:
241 241 raise error.Abort(_('could not identify changegroup version in '
242 242 'bundle'))
243 243
244 244 return '%s-%s' % (comp, version)
245 245 elif isinstance(b, streamclone.streamcloneapplier):
246 246 requirements = streamclone.readbundle1header(fh)[2]
247 247 params = 'requirements=%s' % ','.join(sorted(requirements))
248 248 return 'none-packed1;%s' % urlreq.quote(params)
249 249 else:
250 250 raise error.Abort(_('unknown bundle type: %s') % b)
251 251
252 252 def _computeoutgoing(repo, heads, common):
253 253 """Computes which revs are outgoing given a set of common
254 254 and a set of heads.
255 255
256 256 This is a separate function so extensions can have access to
257 257 the logic.
258 258
259 259 Returns a discovery.outgoing object.
260 260 """
261 261 cl = repo.changelog
262 262 if common:
263 263 hasnode = cl.hasnode
264 264 common = [n for n in common if hasnode(n)]
265 265 else:
266 266 common = [nullid]
267 267 if not heads:
268 268 heads = cl.heads()
269 269 return discovery.outgoing(repo, common, heads)
270 270
271 271 def _forcebundle1(op):
272 272 """return true if a pull/push must use bundle1
273 273
274 274 This function is used to allow testing of the older bundle version"""
275 275 ui = op.repo.ui
276 276 forcebundle1 = False
277 277 # The goal is this config is to allow developer to choose the bundle
278 278 # version used during exchanged. This is especially handy during test.
279 279 # Value is a list of bundle version to be picked from, highest version
280 280 # should be used.
281 281 #
282 282 # developer config: devel.legacy.exchange
283 283 exchange = ui.configlist('devel', 'legacy.exchange')
284 284 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
285 285 return forcebundle1 or not op.remote.capable('bundle2')
286 286
287 287 class pushoperation(object):
288 288 """A object that represent a single push operation
289 289
290 290 Its purpose is to carry push related state and very common operations.
291 291
292 292 A new pushoperation should be created at the beginning of each push and
293 293 discarded afterward.
294 294 """
295 295
296 296 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
297 297 bookmarks=()):
298 298 # repo we push from
299 299 self.repo = repo
300 300 self.ui = repo.ui
301 301 # repo we push to
302 302 self.remote = remote
303 303 # force option provided
304 304 self.force = force
305 305 # revs to be pushed (None is "all")
306 306 self.revs = revs
307 307 # bookmark explicitly pushed
308 308 self.bookmarks = bookmarks
309 309 # allow push of new branch
310 310 self.newbranch = newbranch
311 311 # step already performed
312 312 # (used to check what steps have been already performed through bundle2)
313 313 self.stepsdone = set()
314 314 # Integer version of the changegroup push result
315 315 # - None means nothing to push
316 316 # - 0 means HTTP error
317 317 # - 1 means we pushed and remote head count is unchanged *or*
318 318 # we have outgoing changesets but refused to push
319 319 # - other values as described by addchangegroup()
320 320 self.cgresult = None
321 321 # Boolean value for the bookmark push
322 322 self.bkresult = None
323 323 # discover.outgoing object (contains common and outgoing data)
324 324 self.outgoing = None
325 325 # all remote topological heads before the push
326 326 self.remoteheads = None
327 327 # Details of the remote branch pre and post push
328 328 #
329 329 # mapping: {'branch': ([remoteheads],
330 330 # [newheads],
331 331 # [unsyncedheads],
332 332 # [discardedheads])}
333 333 # - branch: the branch name
334 334 # - remoteheads: the list of remote heads known locally
335 335 # None if the branch is new
336 336 # - newheads: the new remote heads (known locally) with outgoing pushed
337 337 # - unsyncedheads: the list of remote heads unknown locally.
338 338 # - discardedheads: the list of remote heads made obsolete by the push
339 339 self.pushbranchmap = None
340 340 # testable as a boolean indicating if any nodes are missing locally.
341 341 self.incoming = None
342 342 # phases changes that must be pushed along side the changesets
343 343 self.outdatedphases = None
344 344 # phases changes that must be pushed if changeset push fails
345 345 self.fallbackoutdatedphases = None
346 346 # outgoing obsmarkers
347 347 self.outobsmarkers = set()
348 348 # outgoing bookmarks
349 349 self.outbookmarks = []
350 350 # transaction manager
351 351 self.trmanager = None
352 352 # map { pushkey partid -> callback handling failure}
353 353 # used to handle exception from mandatory pushkey part failure
354 354 self.pkfailcb = {}
355 355
356 356 @util.propertycache
357 357 def futureheads(self):
358 358 """future remote heads if the changeset push succeeds"""
359 359 return self.outgoing.missingheads
360 360
361 361 @util.propertycache
362 362 def fallbackheads(self):
363 363 """future remote heads if the changeset push fails"""
364 364 if self.revs is None:
365 365 # not target to push, all common are relevant
366 366 return self.outgoing.commonheads
367 367 unfi = self.repo.unfiltered()
368 368 # I want cheads = heads(::missingheads and ::commonheads)
369 369 # (missingheads is revs with secret changeset filtered out)
370 370 #
371 371 # This can be expressed as:
372 372 # cheads = ( (missingheads and ::commonheads)
373 373 # + (commonheads and ::missingheads))"
374 374 # )
375 375 #
376 376 # while trying to push we already computed the following:
377 377 # common = (::commonheads)
378 378 # missing = ((commonheads::missingheads) - commonheads)
379 379 #
380 380 # We can pick:
381 381 # * missingheads part of common (::commonheads)
382 382 common = self.outgoing.common
383 383 nm = self.repo.changelog.nodemap
384 384 cheads = [node for node in self.revs if nm[node] in common]
385 385 # and
386 386 # * commonheads parents on missing
387 387 revset = unfi.set('%ln and parents(roots(%ln))',
388 388 self.outgoing.commonheads,
389 389 self.outgoing.missing)
390 390 cheads.extend(c.node() for c in revset)
391 391 return cheads
392 392
393 393 @property
394 394 def commonheads(self):
395 395 """set of all common heads after changeset bundle push"""
396 396 if self.cgresult:
397 397 return self.futureheads
398 398 else:
399 399 return self.fallbackheads
400 400
401 401 # mapping of message used when pushing bookmark
402 402 bookmsgmap = {'update': (_("updating bookmark %s\n"),
403 403 _('updating bookmark %s failed!\n')),
404 404 'export': (_("exporting bookmark %s\n"),
405 405 _('exporting bookmark %s failed!\n')),
406 406 'delete': (_("deleting remote bookmark %s\n"),
407 407 _('deleting remote bookmark %s failed!\n')),
408 408 }
409 409
410 410
411 411 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
412 412 opargs=None):
413 413 '''Push outgoing changesets (limited by revs) from a local
414 414 repository to remote. Return an integer:
415 415 - None means nothing to push
416 416 - 0 means HTTP error
417 417 - 1 means we pushed and remote head count is unchanged *or*
418 418 we have outgoing changesets but refused to push
419 419 - other values as described by addchangegroup()
420 420 '''
421 421 if opargs is None:
422 422 opargs = {}
423 423 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
424 424 **opargs)
425 425 if pushop.remote.local():
426 426 missing = (set(pushop.repo.requirements)
427 427 - pushop.remote.local().supported)
428 428 if missing:
429 429 msg = _("required features are not"
430 430 " supported in the destination:"
431 431 " %s") % (', '.join(sorted(missing)))
432 432 raise error.Abort(msg)
433 433
434 434 if not pushop.remote.canpush():
435 435 raise error.Abort(_("destination does not support push"))
436 436
437 437 if not pushop.remote.capable('unbundle'):
438 438 raise error.Abort(_('cannot push: destination does not support the '
439 439 'unbundle wire protocol command'))
440 440
441 441 # get lock as we might write phase data
442 442 wlock = lock = None
443 locked = False
444 443 try:
445 444 # bundle2 push may receive a reply bundle touching bookmarks or other
446 445 # things requiring the wlock. Take it now to ensure proper ordering.
447 446 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
448 447 if (not _forcebundle1(pushop)) and maypushback:
449 448 wlock = pushop.repo.wlock()
450 449 lock = pushop.repo.lock()
451 locked = True
450 pushop.trmanager = transactionmanager(pushop.repo,
451 'push-response',
452 pushop.remote.url())
452 453 except IOError as err:
453 454 if err.errno != errno.EACCES:
454 455 raise
455 456 # source repo cannot be locked.
456 457 # We do not abort the push, but just disable the local phase
457 458 # synchronisation.
458 459 msg = 'cannot lock source repository: %s\n' % err
459 460 pushop.ui.debug(msg)
461
460 462 try:
461 if locked:
462 pushop.trmanager = transactionmanager(pushop.repo,
463 'push-response',
464 pushop.remote.url())
465 463 pushop.repo.checkpush(pushop)
466 464 _pushdiscovery(pushop)
467 465 if not _forcebundle1(pushop):
468 466 _pushbundle2(pushop)
469 467 _pushchangeset(pushop)
470 468 _pushsyncphase(pushop)
471 469 _pushobsolete(pushop)
472 470 _pushbookmark(pushop)
473 471
474 472 if pushop.trmanager:
475 473 pushop.trmanager.close()
476 474 finally:
477 475 if pushop.trmanager:
478 476 pushop.trmanager.release()
479 477 if lock is not None:
480 478 lock.release()
481 479 if wlock is not None:
482 480 wlock.release()
483 481
484 482 return pushop
485 483
486 484 # list of steps to perform discovery before push
487 485 pushdiscoveryorder = []
488 486
489 487 # Mapping between step name and function
490 488 #
491 489 # This exists to help extensions wrap steps if necessary
492 490 pushdiscoverymapping = {}
493 491
494 492 def pushdiscovery(stepname):
495 493 """decorator for function performing discovery before push
496 494
497 495 The function is added to the step -> function mapping and appended to the
498 496 list of steps. Beware that decorated function will be added in order (this
499 497 may matter).
500 498
501 499 You can only use this decorator for a new step, if you want to wrap a step
502 500 from an extension, change the pushdiscovery dictionary directly."""
503 501 def dec(func):
504 502 assert stepname not in pushdiscoverymapping
505 503 pushdiscoverymapping[stepname] = func
506 504 pushdiscoveryorder.append(stepname)
507 505 return func
508 506 return dec
509 507
510 508 def _pushdiscovery(pushop):
511 509 """Run all discovery steps"""
512 510 for stepname in pushdiscoveryorder:
513 511 step = pushdiscoverymapping[stepname]
514 512 step(pushop)
515 513
516 514 @pushdiscovery('changeset')
517 515 def _pushdiscoverychangeset(pushop):
518 516 """discover the changeset that need to be pushed"""
519 517 fci = discovery.findcommonincoming
520 518 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
521 519 common, inc, remoteheads = commoninc
522 520 fco = discovery.findcommonoutgoing
523 521 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
524 522 commoninc=commoninc, force=pushop.force)
525 523 pushop.outgoing = outgoing
526 524 pushop.remoteheads = remoteheads
527 525 pushop.incoming = inc
528 526
529 527 @pushdiscovery('phase')
530 528 def _pushdiscoveryphase(pushop):
531 529 """discover the phase that needs to be pushed
532 530
533 531 (computed for both success and failure case for changesets push)"""
534 532 outgoing = pushop.outgoing
535 533 unfi = pushop.repo.unfiltered()
536 534 remotephases = pushop.remote.listkeys('phases')
537 535 publishing = remotephases.get('publishing', False)
538 536 if (pushop.ui.configbool('ui', '_usedassubrepo')
539 537 and remotephases # server supports phases
540 538 and not pushop.outgoing.missing # no changesets to be pushed
541 539 and publishing):
542 540 # When:
543 541 # - this is a subrepo push
544 542 # - and remote support phase
545 543 # - and no changeset are to be pushed
546 544 # - and remote is publishing
547 545 # We may be in issue 3871 case!
548 546 # We drop the possible phase synchronisation done by
549 547 # courtesy to publish changesets possibly locally draft
550 548 # on the remote.
551 549 remotephases = {'publishing': 'True'}
552 550 ana = phases.analyzeremotephases(pushop.repo,
553 551 pushop.fallbackheads,
554 552 remotephases)
555 553 pheads, droots = ana
556 554 extracond = ''
557 555 if not publishing:
558 556 extracond = ' and public()'
559 557 revset = 'heads((%%ln::%%ln) %s)' % extracond
560 558 # Get the list of all revs draft on remote by public here.
561 559 # XXX Beware that revset break if droots is not strictly
562 560 # XXX root we may want to ensure it is but it is costly
563 561 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
564 562 if not outgoing.missing:
565 563 future = fallback
566 564 else:
567 565 # adds changeset we are going to push as draft
568 566 #
569 567 # should not be necessary for publishing server, but because of an
570 568 # issue fixed in xxxxx we have to do it anyway.
571 569 fdroots = list(unfi.set('roots(%ln + %ln::)',
572 570 outgoing.missing, droots))
573 571 fdroots = [f.node() for f in fdroots]
574 572 future = list(unfi.set(revset, fdroots, pushop.futureheads))
575 573 pushop.outdatedphases = future
576 574 pushop.fallbackoutdatedphases = fallback
577 575
578 576 @pushdiscovery('obsmarker')
579 577 def _pushdiscoveryobsmarkers(pushop):
580 578 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
581 579 and pushop.repo.obsstore
582 580 and 'obsolete' in pushop.remote.listkeys('namespaces')):
583 581 repo = pushop.repo
584 582 # very naive computation, that can be quite expensive on big repo.
585 583 # However: evolution is currently slow on them anyway.
586 584 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
587 585 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
588 586
589 587 @pushdiscovery('bookmarks')
590 588 def _pushdiscoverybookmarks(pushop):
591 589 ui = pushop.ui
592 590 repo = pushop.repo.unfiltered()
593 591 remote = pushop.remote
594 592 ui.debug("checking for updated bookmarks\n")
595 593 ancestors = ()
596 594 if pushop.revs:
597 595 revnums = map(repo.changelog.rev, pushop.revs)
598 596 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
599 597 remotebookmark = remote.listkeys('bookmarks')
600 598
601 599 explicit = set([repo._bookmarks.expandname(bookmark)
602 600 for bookmark in pushop.bookmarks])
603 601
604 602 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
605 603 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
606 604
607 605 def safehex(x):
608 606 if x is None:
609 607 return x
610 608 return hex(x)
611 609
612 610 def hexifycompbookmarks(bookmarks):
613 611 for b, scid, dcid in bookmarks:
614 612 yield b, safehex(scid), safehex(dcid)
615 613
616 614 comp = [hexifycompbookmarks(marks) for marks in comp]
617 615 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
618 616
619 617 for b, scid, dcid in advsrc:
620 618 if b in explicit:
621 619 explicit.remove(b)
622 620 if not ancestors or repo[scid].rev() in ancestors:
623 621 pushop.outbookmarks.append((b, dcid, scid))
624 622 # search added bookmark
625 623 for b, scid, dcid in addsrc:
626 624 if b in explicit:
627 625 explicit.remove(b)
628 626 pushop.outbookmarks.append((b, '', scid))
629 627 # search for overwritten bookmark
630 628 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
631 629 if b in explicit:
632 630 explicit.remove(b)
633 631 pushop.outbookmarks.append((b, dcid, scid))
634 632 # search for bookmark to delete
635 633 for b, scid, dcid in adddst:
636 634 if b in explicit:
637 635 explicit.remove(b)
638 636 # treat as "deleted locally"
639 637 pushop.outbookmarks.append((b, dcid, ''))
640 638 # identical bookmarks shouldn't get reported
641 639 for b, scid, dcid in same:
642 640 if b in explicit:
643 641 explicit.remove(b)
644 642
645 643 if explicit:
646 644 explicit = sorted(explicit)
647 645 # we should probably list all of them
648 646 ui.warn(_('bookmark %s does not exist on the local '
649 647 'or remote repository!\n') % explicit[0])
650 648 pushop.bkresult = 2
651 649
652 650 pushop.outbookmarks.sort()
653 651
654 652 def _pushcheckoutgoing(pushop):
655 653 outgoing = pushop.outgoing
656 654 unfi = pushop.repo.unfiltered()
657 655 if not outgoing.missing:
658 656 # nothing to push
659 657 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
660 658 return False
661 659 # something to push
662 660 if not pushop.force:
663 661 # if repo.obsstore == False --> no obsolete
664 662 # then, save the iteration
665 663 if unfi.obsstore:
666 664 # this message are here for 80 char limit reason
667 665 mso = _("push includes obsolete changeset: %s!")
668 666 mspd = _("push includes phase-divergent changeset: %s!")
669 667 mscd = _("push includes content-divergent changeset: %s!")
670 668 mst = {"orphan": _("push includes orphan changeset: %s!"),
671 669 "phase-divergent": mspd,
672 670 "content-divergent": mscd}
673 671 # If we are to push if there is at least one
674 672 # obsolete or unstable changeset in missing, at
675 673 # least one of the missinghead will be obsolete or
676 674 # unstable. So checking heads only is ok
677 675 for node in outgoing.missingheads:
678 676 ctx = unfi[node]
679 677 if ctx.obsolete():
680 678 raise error.Abort(mso % ctx)
681 679 elif ctx.isunstable():
682 680 # TODO print more than one instability in the abort
683 681 # message
684 682 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
685 683
686 684 discovery.checkheads(pushop)
687 685 return True
688 686
689 687 # List of names of steps to perform for an outgoing bundle2, order matters.
690 688 b2partsgenorder = []
691 689
692 690 # Mapping between step name and function
693 691 #
694 692 # This exists to help extensions wrap steps if necessary
695 693 b2partsgenmapping = {}
696 694
697 695 def b2partsgenerator(stepname, idx=None):
698 696 """decorator for function generating bundle2 part
699 697
700 698 The function is added to the step -> function mapping and appended to the
701 699 list of steps. Beware that decorated functions will be added in order
702 700 (this may matter).
703 701
704 702 You can only use this decorator for new steps, if you want to wrap a step
705 703 from an extension, attack the b2partsgenmapping dictionary directly."""
706 704 def dec(func):
707 705 assert stepname not in b2partsgenmapping
708 706 b2partsgenmapping[stepname] = func
709 707 if idx is None:
710 708 b2partsgenorder.append(stepname)
711 709 else:
712 710 b2partsgenorder.insert(idx, stepname)
713 711 return func
714 712 return dec
715 713
716 714 def _pushb2ctxcheckheads(pushop, bundler):
717 715 """Generate race condition checking parts
718 716
719 717 Exists as an independent function to aid extensions
720 718 """
721 719 # * 'force' do not check for push race,
722 720 # * if we don't push anything, there are nothing to check.
723 721 if not pushop.force and pushop.outgoing.missingheads:
724 722 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
725 723 emptyremote = pushop.pushbranchmap is None
726 724 if not allowunrelated or emptyremote:
727 725 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
728 726 else:
729 727 affected = set()
730 728 for branch, heads in pushop.pushbranchmap.iteritems():
731 729 remoteheads, newheads, unsyncedheads, discardedheads = heads
732 730 if remoteheads is not None:
733 731 remote = set(remoteheads)
734 732 affected |= set(discardedheads) & remote
735 733 affected |= remote - set(newheads)
736 734 if affected:
737 735 data = iter(sorted(affected))
738 736 bundler.newpart('check:updated-heads', data=data)
739 737
740 738 @b2partsgenerator('changeset')
741 739 def _pushb2ctx(pushop, bundler):
742 740 """handle changegroup push through bundle2
743 741
744 742 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
745 743 """
746 744 if 'changesets' in pushop.stepsdone:
747 745 return
748 746 pushop.stepsdone.add('changesets')
749 747 # Send known heads to the server for race detection.
750 748 if not _pushcheckoutgoing(pushop):
751 749 return
752 750 pushop.repo.prepushoutgoinghooks(pushop)
753 751
754 752 _pushb2ctxcheckheads(pushop, bundler)
755 753
756 754 b2caps = bundle2.bundle2caps(pushop.remote)
757 755 version = '01'
758 756 cgversions = b2caps.get('changegroup')
759 757 if cgversions: # 3.1 and 3.2 ship with an empty value
760 758 cgversions = [v for v in cgversions
761 759 if v in changegroup.supportedoutgoingversions(
762 760 pushop.repo)]
763 761 if not cgversions:
764 762 raise ValueError(_('no common changegroup version'))
765 763 version = max(cgversions)
766 764 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
767 765 pushop.outgoing,
768 766 version=version)
769 767 cgpart = bundler.newpart('changegroup', data=cg)
770 768 if cgversions:
771 769 cgpart.addparam('version', version)
772 770 if 'treemanifest' in pushop.repo.requirements:
773 771 cgpart.addparam('treemanifest', '1')
774 772 def handlereply(op):
775 773 """extract addchangegroup returns from server reply"""
776 774 cgreplies = op.records.getreplies(cgpart.id)
777 775 assert len(cgreplies['changegroup']) == 1
778 776 pushop.cgresult = cgreplies['changegroup'][0]['return']
779 777 return handlereply
780 778
781 779 @b2partsgenerator('phase')
782 780 def _pushb2phases(pushop, bundler):
783 781 """handle phase push through bundle2"""
784 782 if 'phases' in pushop.stepsdone:
785 783 return
786 784 b2caps = bundle2.bundle2caps(pushop.remote)
787 785 if not 'pushkey' in b2caps:
788 786 return
789 787 pushop.stepsdone.add('phases')
790 788 part2node = []
791 789
792 790 def handlefailure(pushop, exc):
793 791 targetid = int(exc.partid)
794 792 for partid, node in part2node:
795 793 if partid == targetid:
796 794 raise error.Abort(_('updating %s to public failed') % node)
797 795
798 796 enc = pushkey.encode
799 797 for newremotehead in pushop.outdatedphases:
800 798 part = bundler.newpart('pushkey')
801 799 part.addparam('namespace', enc('phases'))
802 800 part.addparam('key', enc(newremotehead.hex()))
803 801 part.addparam('old', enc(str(phases.draft)))
804 802 part.addparam('new', enc(str(phases.public)))
805 803 part2node.append((part.id, newremotehead))
806 804 pushop.pkfailcb[part.id] = handlefailure
807 805
808 806 def handlereply(op):
809 807 for partid, node in part2node:
810 808 partrep = op.records.getreplies(partid)
811 809 results = partrep['pushkey']
812 810 assert len(results) <= 1
813 811 msg = None
814 812 if not results:
815 813 msg = _('server ignored update of %s to public!\n') % node
816 814 elif not int(results[0]['return']):
817 815 msg = _('updating %s to public failed!\n') % node
818 816 if msg is not None:
819 817 pushop.ui.warn(msg)
820 818 return handlereply
821 819
822 820 @b2partsgenerator('obsmarkers')
823 821 def _pushb2obsmarkers(pushop, bundler):
824 822 if 'obsmarkers' in pushop.stepsdone:
825 823 return
826 824 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
827 825 if obsolete.commonversion(remoteversions) is None:
828 826 return
829 827 pushop.stepsdone.add('obsmarkers')
830 828 if pushop.outobsmarkers:
831 829 markers = sorted(pushop.outobsmarkers)
832 830 bundle2.buildobsmarkerspart(bundler, markers)
833 831
834 832 @b2partsgenerator('bookmarks')
835 833 def _pushb2bookmarks(pushop, bundler):
836 834 """handle bookmark push through bundle2"""
837 835 if 'bookmarks' in pushop.stepsdone:
838 836 return
839 837 b2caps = bundle2.bundle2caps(pushop.remote)
840 838 if 'pushkey' not in b2caps:
841 839 return
842 840 pushop.stepsdone.add('bookmarks')
843 841 part2book = []
844 842 enc = pushkey.encode
845 843
846 844 def handlefailure(pushop, exc):
847 845 targetid = int(exc.partid)
848 846 for partid, book, action in part2book:
849 847 if partid == targetid:
850 848 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
851 849 # we should not be called for part we did not generated
852 850 assert False
853 851
854 852 for book, old, new in pushop.outbookmarks:
855 853 part = bundler.newpart('pushkey')
856 854 part.addparam('namespace', enc('bookmarks'))
857 855 part.addparam('key', enc(book))
858 856 part.addparam('old', enc(old))
859 857 part.addparam('new', enc(new))
860 858 action = 'update'
861 859 if not old:
862 860 action = 'export'
863 861 elif not new:
864 862 action = 'delete'
865 863 part2book.append((part.id, book, action))
866 864 pushop.pkfailcb[part.id] = handlefailure
867 865
868 866 def handlereply(op):
869 867 ui = pushop.ui
870 868 for partid, book, action in part2book:
871 869 partrep = op.records.getreplies(partid)
872 870 results = partrep['pushkey']
873 871 assert len(results) <= 1
874 872 if not results:
875 873 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
876 874 else:
877 875 ret = int(results[0]['return'])
878 876 if ret:
879 877 ui.status(bookmsgmap[action][0] % book)
880 878 else:
881 879 ui.warn(bookmsgmap[action][1] % book)
882 880 if pushop.bkresult is not None:
883 881 pushop.bkresult = 1
884 882 return handlereply
885 883
886 884 @b2partsgenerator('pushvars', idx=0)
887 885 def _getbundlesendvars(pushop, bundler):
888 886 '''send shellvars via bundle2'''
889 887 if getattr(pushop.repo, '_shellvars', ()):
890 888 part = bundler.newpart('pushvars')
891 889
892 890 for key, value in pushop.repo._shellvars.iteritems():
893 891 part.addparam(key, value, mandatory=False)
894 892
895 893 def _pushbundle2(pushop):
896 894 """push data to the remote using bundle2
897 895
898 896 The only currently supported type of data is changegroup but this will
899 897 evolve in the future."""
900 898 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
901 899 pushback = (pushop.trmanager
902 900 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
903 901
904 902 # create reply capability
905 903 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
906 904 allowpushback=pushback))
907 905 bundler.newpart('replycaps', data=capsblob)
908 906 replyhandlers = []
909 907 for partgenname in b2partsgenorder:
910 908 partgen = b2partsgenmapping[partgenname]
911 909 ret = partgen(pushop, bundler)
912 910 if callable(ret):
913 911 replyhandlers.append(ret)
914 912 # do not push if nothing to push
915 913 if bundler.nbparts <= 1:
916 914 return
917 915 stream = util.chunkbuffer(bundler.getchunks())
918 916 try:
919 917 try:
920 918 reply = pushop.remote.unbundle(
921 919 stream, ['force'], pushop.remote.url())
922 920 except error.BundleValueError as exc:
923 921 raise error.Abort(_('missing support for %s') % exc)
924 922 try:
925 923 trgetter = None
926 924 if pushback:
927 925 trgetter = pushop.trmanager.transaction
928 926 op = bundle2.processbundle(pushop.repo, reply, trgetter)
929 927 except error.BundleValueError as exc:
930 928 raise error.Abort(_('missing support for %s') % exc)
931 929 except bundle2.AbortFromPart as exc:
932 930 pushop.ui.status(_('remote: %s\n') % exc)
933 931 if exc.hint is not None:
934 932 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
935 933 raise error.Abort(_('push failed on remote'))
936 934 except error.PushkeyFailed as exc:
937 935 partid = int(exc.partid)
938 936 if partid not in pushop.pkfailcb:
939 937 raise
940 938 pushop.pkfailcb[partid](pushop, exc)
941 939 for rephand in replyhandlers:
942 940 rephand(op)
943 941
944 942 def _pushchangeset(pushop):
945 943 """Make the actual push of changeset bundle to remote repo"""
946 944 if 'changesets' in pushop.stepsdone:
947 945 return
948 946 pushop.stepsdone.add('changesets')
949 947 if not _pushcheckoutgoing(pushop):
950 948 return
951 949
952 950 # Should have verified this in push().
953 951 assert pushop.remote.capable('unbundle')
954 952
955 953 pushop.repo.prepushoutgoinghooks(pushop)
956 954 outgoing = pushop.outgoing
957 955 # TODO: get bundlecaps from remote
958 956 bundlecaps = None
959 957 # create a changegroup from local
960 958 if pushop.revs is None and not (outgoing.excluded
961 959 or pushop.repo.changelog.filteredrevs):
962 960 # push everything,
963 961 # use the fast path, no race possible on push
964 962 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
965 963 cg = changegroup.getsubset(pushop.repo,
966 964 outgoing,
967 965 bundler,
968 966 'push',
969 967 fastpath=True)
970 968 else:
971 969 cg = changegroup.getchangegroup(pushop.repo, 'push', outgoing,
972 970 bundlecaps=bundlecaps)
973 971
974 972 # apply changegroup to remote
975 973 # local repo finds heads on server, finds out what
976 974 # revs it must push. once revs transferred, if server
977 975 # finds it has different heads (someone else won
978 976 # commit/push race), server aborts.
979 977 if pushop.force:
980 978 remoteheads = ['force']
981 979 else:
982 980 remoteheads = pushop.remoteheads
983 981 # ssh: return remote's addchangegroup()
984 982 # http: return remote's addchangegroup() or 0 for error
985 983 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
986 984 pushop.repo.url())
987 985
988 986 def _pushsyncphase(pushop):
989 987 """synchronise phase information locally and remotely"""
990 988 cheads = pushop.commonheads
991 989 # even when we don't push, exchanging phase data is useful
992 990 remotephases = pushop.remote.listkeys('phases')
993 991 if (pushop.ui.configbool('ui', '_usedassubrepo')
994 992 and remotephases # server supports phases
995 993 and pushop.cgresult is None # nothing was pushed
996 994 and remotephases.get('publishing', False)):
997 995 # When:
998 996 # - this is a subrepo push
999 997 # - and remote support phase
1000 998 # - and no changeset was pushed
1001 999 # - and remote is publishing
1002 1000 # We may be in issue 3871 case!
1003 1001 # We drop the possible phase synchronisation done by
1004 1002 # courtesy to publish changesets possibly locally draft
1005 1003 # on the remote.
1006 1004 remotephases = {'publishing': 'True'}
1007 1005 if not remotephases: # old server or public only reply from non-publishing
1008 1006 _localphasemove(pushop, cheads)
1009 1007 # don't push any phase data as there is nothing to push
1010 1008 else:
1011 1009 ana = phases.analyzeremotephases(pushop.repo, cheads,
1012 1010 remotephases)
1013 1011 pheads, droots = ana
1014 1012 ### Apply remote phase on local
1015 1013 if remotephases.get('publishing', False):
1016 1014 _localphasemove(pushop, cheads)
1017 1015 else: # publish = False
1018 1016 _localphasemove(pushop, pheads)
1019 1017 _localphasemove(pushop, cheads, phases.draft)
1020 1018 ### Apply local phase on remote
1021 1019
1022 1020 if pushop.cgresult:
1023 1021 if 'phases' in pushop.stepsdone:
1024 1022 # phases already pushed though bundle2
1025 1023 return
1026 1024 outdated = pushop.outdatedphases
1027 1025 else:
1028 1026 outdated = pushop.fallbackoutdatedphases
1029 1027
1030 1028 pushop.stepsdone.add('phases')
1031 1029
1032 1030 # filter heads already turned public by the push
1033 1031 outdated = [c for c in outdated if c.node() not in pheads]
1034 1032 # fallback to independent pushkey command
1035 1033 for newremotehead in outdated:
1036 1034 r = pushop.remote.pushkey('phases',
1037 1035 newremotehead.hex(),
1038 1036 str(phases.draft),
1039 1037 str(phases.public))
1040 1038 if not r:
1041 1039 pushop.ui.warn(_('updating %s to public failed!\n')
1042 1040 % newremotehead)
1043 1041
1044 1042 def _localphasemove(pushop, nodes, phase=phases.public):
1045 1043 """move <nodes> to <phase> in the local source repo"""
1046 1044 if pushop.trmanager:
1047 1045 phases.advanceboundary(pushop.repo,
1048 1046 pushop.trmanager.transaction(),
1049 1047 phase,
1050 1048 nodes)
1051 1049 else:
1052 1050 # repo is not locked, do not change any phases!
1053 1051 # Informs the user that phases should have been moved when
1054 1052 # applicable.
1055 1053 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1056 1054 phasestr = phases.phasenames[phase]
1057 1055 if actualmoves:
1058 1056 pushop.ui.status(_('cannot lock source repo, skipping '
1059 1057 'local %s phase update\n') % phasestr)
1060 1058
1061 1059 def _pushobsolete(pushop):
1062 1060 """utility function to push obsolete markers to a remote"""
1063 1061 if 'obsmarkers' in pushop.stepsdone:
1064 1062 return
1065 1063 repo = pushop.repo
1066 1064 remote = pushop.remote
1067 1065 pushop.stepsdone.add('obsmarkers')
1068 1066 if pushop.outobsmarkers:
1069 1067 pushop.ui.debug('try to push obsolete markers to remote\n')
1070 1068 rslts = []
1071 1069 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1072 1070 for key in sorted(remotedata, reverse=True):
1073 1071 # reverse sort to ensure we end with dump0
1074 1072 data = remotedata[key]
1075 1073 rslts.append(remote.pushkey('obsolete', key, '', data))
1076 1074 if [r for r in rslts if not r]:
1077 1075 msg = _('failed to push some obsolete markers!\n')
1078 1076 repo.ui.warn(msg)
1079 1077
1080 1078 def _pushbookmark(pushop):
1081 1079 """Update bookmark position on remote"""
1082 1080 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1083 1081 return
1084 1082 pushop.stepsdone.add('bookmarks')
1085 1083 ui = pushop.ui
1086 1084 remote = pushop.remote
1087 1085
1088 1086 for b, old, new in pushop.outbookmarks:
1089 1087 action = 'update'
1090 1088 if not old:
1091 1089 action = 'export'
1092 1090 elif not new:
1093 1091 action = 'delete'
1094 1092 if remote.pushkey('bookmarks', b, old, new):
1095 1093 ui.status(bookmsgmap[action][0] % b)
1096 1094 else:
1097 1095 ui.warn(bookmsgmap[action][1] % b)
1098 1096 # discovery can have set the value form invalid entry
1099 1097 if pushop.bkresult is not None:
1100 1098 pushop.bkresult = 1
1101 1099
1102 1100 class pulloperation(object):
1103 1101 """A object that represent a single pull operation
1104 1102
1105 1103 It purpose is to carry pull related state and very common operation.
1106 1104
1107 1105 A new should be created at the beginning of each pull and discarded
1108 1106 afterward.
1109 1107 """
1110 1108
1111 1109 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1112 1110 remotebookmarks=None, streamclonerequested=None):
1113 1111 # repo we pull into
1114 1112 self.repo = repo
1115 1113 # repo we pull from
1116 1114 self.remote = remote
1117 1115 # revision we try to pull (None is "all")
1118 1116 self.heads = heads
1119 1117 # bookmark pulled explicitly
1120 1118 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1121 1119 for bookmark in bookmarks]
1122 1120 # do we force pull?
1123 1121 self.force = force
1124 1122 # whether a streaming clone was requested
1125 1123 self.streamclonerequested = streamclonerequested
1126 1124 # transaction manager
1127 1125 self.trmanager = None
1128 1126 # set of common changeset between local and remote before pull
1129 1127 self.common = None
1130 1128 # set of pulled head
1131 1129 self.rheads = None
1132 1130 # list of missing changeset to fetch remotely
1133 1131 self.fetch = None
1134 1132 # remote bookmarks data
1135 1133 self.remotebookmarks = remotebookmarks
1136 1134 # result of changegroup pulling (used as return code by pull)
1137 1135 self.cgresult = None
1138 1136 # list of step already done
1139 1137 self.stepsdone = set()
1140 1138 # Whether we attempted a clone from pre-generated bundles.
1141 1139 self.clonebundleattempted = False
1142 1140
1143 1141 @util.propertycache
1144 1142 def pulledsubset(self):
1145 1143 """heads of the set of changeset target by the pull"""
1146 1144 # compute target subset
1147 1145 if self.heads is None:
1148 1146 # We pulled every thing possible
1149 1147 # sync on everything common
1150 1148 c = set(self.common)
1151 1149 ret = list(self.common)
1152 1150 for n in self.rheads:
1153 1151 if n not in c:
1154 1152 ret.append(n)
1155 1153 return ret
1156 1154 else:
1157 1155 # We pulled a specific subset
1158 1156 # sync on this subset
1159 1157 return self.heads
1160 1158
1161 1159 @util.propertycache
1162 1160 def canusebundle2(self):
1163 1161 return not _forcebundle1(self)
1164 1162
1165 1163 @util.propertycache
1166 1164 def remotebundle2caps(self):
1167 1165 return bundle2.bundle2caps(self.remote)
1168 1166
1169 1167 def gettransaction(self):
1170 1168 # deprecated; talk to trmanager directly
1171 1169 return self.trmanager.transaction()
1172 1170
1173 1171 class transactionmanager(object):
1174 1172 """An object to manage the life cycle of a transaction
1175 1173
1176 1174 It creates the transaction on demand and calls the appropriate hooks when
1177 1175 closing the transaction."""
1178 1176 def __init__(self, repo, source, url):
1179 1177 self.repo = repo
1180 1178 self.source = source
1181 1179 self.url = url
1182 1180 self._tr = None
1183 1181
1184 1182 def transaction(self):
1185 1183 """Return an open transaction object, constructing if necessary"""
1186 1184 if not self._tr:
1187 1185 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1188 1186 self._tr = self.repo.transaction(trname)
1189 1187 self._tr.hookargs['source'] = self.source
1190 1188 self._tr.hookargs['url'] = self.url
1191 1189 return self._tr
1192 1190
1193 1191 def close(self):
1194 1192 """close transaction if created"""
1195 1193 if self._tr is not None:
1196 1194 self._tr.close()
1197 1195
1198 1196 def release(self):
1199 1197 """release transaction if created"""
1200 1198 if self._tr is not None:
1201 1199 self._tr.release()
1202 1200
1203 1201 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1204 1202 streamclonerequested=None):
1205 1203 """Fetch repository data from a remote.
1206 1204
1207 1205 This is the main function used to retrieve data from a remote repository.
1208 1206
1209 1207 ``repo`` is the local repository to clone into.
1210 1208 ``remote`` is a peer instance.
1211 1209 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1212 1210 default) means to pull everything from the remote.
1213 1211 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1214 1212 default, all remote bookmarks are pulled.
1215 1213 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1216 1214 initialization.
1217 1215 ``streamclonerequested`` is a boolean indicating whether a "streaming
1218 1216 clone" is requested. A "streaming clone" is essentially a raw file copy
1219 1217 of revlogs from the server. This only works when the local repository is
1220 1218 empty. The default value of ``None`` means to respect the server
1221 1219 configuration for preferring stream clones.
1222 1220
1223 1221 Returns the ``pulloperation`` created for this pull.
1224 1222 """
1225 1223 if opargs is None:
1226 1224 opargs = {}
1227 1225 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1228 1226 streamclonerequested=streamclonerequested, **opargs)
1229 1227
1230 1228 peerlocal = pullop.remote.local()
1231 1229 if peerlocal:
1232 1230 missing = set(peerlocal.requirements) - pullop.repo.supported
1233 1231 if missing:
1234 1232 msg = _("required features are not"
1235 1233 " supported in the destination:"
1236 1234 " %s") % (', '.join(sorted(missing)))
1237 1235 raise error.Abort(msg)
1238 1236
1239 1237 wlock = lock = None
1240 1238 try:
1241 1239 wlock = pullop.repo.wlock()
1242 1240 lock = pullop.repo.lock()
1243 1241 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1244 1242 streamclone.maybeperformlegacystreamclone(pullop)
1245 1243 # This should ideally be in _pullbundle2(). However, it needs to run
1246 1244 # before discovery to avoid extra work.
1247 1245 _maybeapplyclonebundle(pullop)
1248 1246 _pulldiscovery(pullop)
1249 1247 if pullop.canusebundle2:
1250 1248 _pullbundle2(pullop)
1251 1249 _pullchangeset(pullop)
1252 1250 _pullphase(pullop)
1253 1251 _pullbookmarks(pullop)
1254 1252 _pullobsolete(pullop)
1255 1253 pullop.trmanager.close()
1256 1254 finally:
1257 1255 lockmod.release(pullop.trmanager, lock, wlock)
1258 1256
1259 1257 return pullop
1260 1258
1261 1259 # list of steps to perform discovery before pull
1262 1260 pulldiscoveryorder = []
1263 1261
1264 1262 # Mapping between step name and function
1265 1263 #
1266 1264 # This exists to help extensions wrap steps if necessary
1267 1265 pulldiscoverymapping = {}
1268 1266
1269 1267 def pulldiscovery(stepname):
1270 1268 """decorator for function performing discovery before pull
1271 1269
1272 1270 The function is added to the step -> function mapping and appended to the
1273 1271 list of steps. Beware that decorated function will be added in order (this
1274 1272 may matter).
1275 1273
1276 1274 You can only use this decorator for a new step, if you want to wrap a step
1277 1275 from an extension, change the pulldiscovery dictionary directly."""
1278 1276 def dec(func):
1279 1277 assert stepname not in pulldiscoverymapping
1280 1278 pulldiscoverymapping[stepname] = func
1281 1279 pulldiscoveryorder.append(stepname)
1282 1280 return func
1283 1281 return dec
1284 1282
1285 1283 def _pulldiscovery(pullop):
1286 1284 """Run all discovery steps"""
1287 1285 for stepname in pulldiscoveryorder:
1288 1286 step = pulldiscoverymapping[stepname]
1289 1287 step(pullop)
1290 1288
1291 1289 @pulldiscovery('b1:bookmarks')
1292 1290 def _pullbookmarkbundle1(pullop):
1293 1291 """fetch bookmark data in bundle1 case
1294 1292
1295 1293 If not using bundle2, we have to fetch bookmarks before changeset
1296 1294 discovery to reduce the chance and impact of race conditions."""
1297 1295 if pullop.remotebookmarks is not None:
1298 1296 return
1299 1297 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1300 1298 # all known bundle2 servers now support listkeys, but lets be nice with
1301 1299 # new implementation.
1302 1300 return
1303 1301 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1304 1302
1305 1303
1306 1304 @pulldiscovery('changegroup')
1307 1305 def _pulldiscoverychangegroup(pullop):
1308 1306 """discovery phase for the pull
1309 1307
1310 1308 Current handle changeset discovery only, will change handle all discovery
1311 1309 at some point."""
1312 1310 tmp = discovery.findcommonincoming(pullop.repo,
1313 1311 pullop.remote,
1314 1312 heads=pullop.heads,
1315 1313 force=pullop.force)
1316 1314 common, fetch, rheads = tmp
1317 1315 nm = pullop.repo.unfiltered().changelog.nodemap
1318 1316 if fetch and rheads:
1319 1317 # If a remote heads in filtered locally, lets drop it from the unknown
1320 1318 # remote heads and put in back in common.
1321 1319 #
1322 1320 # This is a hackish solution to catch most of "common but locally
1323 1321 # hidden situation". We do not performs discovery on unfiltered
1324 1322 # repository because it end up doing a pathological amount of round
1325 1323 # trip for w huge amount of changeset we do not care about.
1326 1324 #
1327 1325 # If a set of such "common but filtered" changeset exist on the server
1328 1326 # but are not including a remote heads, we'll not be able to detect it,
1329 1327 scommon = set(common)
1330 1328 filteredrheads = []
1331 1329 for n in rheads:
1332 1330 if n in nm:
1333 1331 if n not in scommon:
1334 1332 common.append(n)
1335 1333 else:
1336 1334 filteredrheads.append(n)
1337 1335 if not filteredrheads:
1338 1336 fetch = []
1339 1337 rheads = filteredrheads
1340 1338 pullop.common = common
1341 1339 pullop.fetch = fetch
1342 1340 pullop.rheads = rheads
1343 1341
1344 1342 def _pullbundle2(pullop):
1345 1343 """pull data using bundle2
1346 1344
1347 1345 For now, the only supported data are changegroup."""
1348 1346 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1349 1347
1350 1348 # At the moment we don't do stream clones over bundle2. If that is
1351 1349 # implemented then here's where the check for that will go.
1352 1350 streaming = False
1353 1351
1354 1352 # pulling changegroup
1355 1353 pullop.stepsdone.add('changegroup')
1356 1354
1357 1355 kwargs['common'] = pullop.common
1358 1356 kwargs['heads'] = pullop.heads or pullop.rheads
1359 1357 kwargs['cg'] = pullop.fetch
1360 1358 if 'listkeys' in pullop.remotebundle2caps:
1361 1359 kwargs['listkeys'] = ['phases']
1362 1360 if pullop.remotebookmarks is None:
1363 1361 # make sure to always includes bookmark data when migrating
1364 1362 # `hg incoming --bundle` to using this function.
1365 1363 kwargs['listkeys'].append('bookmarks')
1366 1364
1367 1365 # If this is a full pull / clone and the server supports the clone bundles
1368 1366 # feature, tell the server whether we attempted a clone bundle. The
1369 1367 # presence of this flag indicates the client supports clone bundles. This
1370 1368 # will enable the server to treat clients that support clone bundles
1371 1369 # differently from those that don't.
1372 1370 if (pullop.remote.capable('clonebundles')
1373 1371 and pullop.heads is None and list(pullop.common) == [nullid]):
1374 1372 kwargs['cbattempted'] = pullop.clonebundleattempted
1375 1373
1376 1374 if streaming:
1377 1375 pullop.repo.ui.status(_('streaming all changes\n'))
1378 1376 elif not pullop.fetch:
1379 1377 pullop.repo.ui.status(_("no changes found\n"))
1380 1378 pullop.cgresult = 0
1381 1379 else:
1382 1380 if pullop.heads is None and list(pullop.common) == [nullid]:
1383 1381 pullop.repo.ui.status(_("requesting all changes\n"))
1384 1382 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1385 1383 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1386 1384 if obsolete.commonversion(remoteversions) is not None:
1387 1385 kwargs['obsmarkers'] = True
1388 1386 pullop.stepsdone.add('obsmarkers')
1389 1387 _pullbundle2extraprepare(pullop, kwargs)
1390 1388 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1391 1389 try:
1392 1390 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1393 1391 except bundle2.AbortFromPart as exc:
1394 1392 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1395 1393 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1396 1394 except error.BundleValueError as exc:
1397 1395 raise error.Abort(_('missing support for %s') % exc)
1398 1396
1399 1397 if pullop.fetch:
1400 1398 pullop.cgresult = bundle2.combinechangegroupresults(op)
1401 1399
1402 1400 # processing phases change
1403 1401 for namespace, value in op.records['listkeys']:
1404 1402 if namespace == 'phases':
1405 1403 _pullapplyphases(pullop, value)
1406 1404
1407 1405 # processing bookmark update
1408 1406 for namespace, value in op.records['listkeys']:
1409 1407 if namespace == 'bookmarks':
1410 1408 pullop.remotebookmarks = value
1411 1409
1412 1410 # bookmark data were either already there or pulled in the bundle
1413 1411 if pullop.remotebookmarks is not None:
1414 1412 _pullbookmarks(pullop)
1415 1413
1416 1414 def _pullbundle2extraprepare(pullop, kwargs):
1417 1415 """hook function so that extensions can extend the getbundle call"""
1418 1416 pass
1419 1417
1420 1418 def _pullchangeset(pullop):
1421 1419 """pull changeset from unbundle into the local repo"""
1422 1420 # We delay the open of the transaction as late as possible so we
1423 1421 # don't open transaction for nothing or you break future useful
1424 1422 # rollback call
1425 1423 if 'changegroup' in pullop.stepsdone:
1426 1424 return
1427 1425 pullop.stepsdone.add('changegroup')
1428 1426 if not pullop.fetch:
1429 1427 pullop.repo.ui.status(_("no changes found\n"))
1430 1428 pullop.cgresult = 0
1431 1429 return
1432 1430 tr = pullop.gettransaction()
1433 1431 if pullop.heads is None and list(pullop.common) == [nullid]:
1434 1432 pullop.repo.ui.status(_("requesting all changes\n"))
1435 1433 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1436 1434 # issue1320, avoid a race if remote changed after discovery
1437 1435 pullop.heads = pullop.rheads
1438 1436
1439 1437 if pullop.remote.capable('getbundle'):
1440 1438 # TODO: get bundlecaps from remote
1441 1439 cg = pullop.remote.getbundle('pull', common=pullop.common,
1442 1440 heads=pullop.heads or pullop.rheads)
1443 1441 elif pullop.heads is None:
1444 1442 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1445 1443 elif not pullop.remote.capable('changegroupsubset'):
1446 1444 raise error.Abort(_("partial pull cannot be done because "
1447 1445 "other repository doesn't support "
1448 1446 "changegroupsubset."))
1449 1447 else:
1450 1448 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1451 1449 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1452 1450 pullop.remote.url())
1453 1451 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1454 1452
1455 1453 def _pullphase(pullop):
1456 1454 # Get remote phases data from remote
1457 1455 if 'phases' in pullop.stepsdone:
1458 1456 return
1459 1457 remotephases = pullop.remote.listkeys('phases')
1460 1458 _pullapplyphases(pullop, remotephases)
1461 1459
1462 1460 def _pullapplyphases(pullop, remotephases):
1463 1461 """apply phase movement from observed remote state"""
1464 1462 if 'phases' in pullop.stepsdone:
1465 1463 return
1466 1464 pullop.stepsdone.add('phases')
1467 1465 publishing = bool(remotephases.get('publishing', False))
1468 1466 if remotephases and not publishing:
1469 1467 # remote is new and non-publishing
1470 1468 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1471 1469 pullop.pulledsubset,
1472 1470 remotephases)
1473 1471 dheads = pullop.pulledsubset
1474 1472 else:
1475 1473 # Remote is old or publishing all common changesets
1476 1474 # should be seen as public
1477 1475 pheads = pullop.pulledsubset
1478 1476 dheads = []
1479 1477 unfi = pullop.repo.unfiltered()
1480 1478 phase = unfi._phasecache.phase
1481 1479 rev = unfi.changelog.nodemap.get
1482 1480 public = phases.public
1483 1481 draft = phases.draft
1484 1482
1485 1483 # exclude changesets already public locally and update the others
1486 1484 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1487 1485 if pheads:
1488 1486 tr = pullop.gettransaction()
1489 1487 phases.advanceboundary(pullop.repo, tr, public, pheads)
1490 1488
1491 1489 # exclude changesets already draft locally and update the others
1492 1490 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1493 1491 if dheads:
1494 1492 tr = pullop.gettransaction()
1495 1493 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1496 1494
1497 1495 def _pullbookmarks(pullop):
1498 1496 """process the remote bookmark information to update the local one"""
1499 1497 if 'bookmarks' in pullop.stepsdone:
1500 1498 return
1501 1499 pullop.stepsdone.add('bookmarks')
1502 1500 repo = pullop.repo
1503 1501 remotebookmarks = pullop.remotebookmarks
1504 1502 remotebookmarks = bookmod.unhexlifybookmarks(remotebookmarks)
1505 1503 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1506 1504 pullop.remote.url(),
1507 1505 pullop.gettransaction,
1508 1506 explicit=pullop.explicitbookmarks)
1509 1507
1510 1508 def _pullobsolete(pullop):
1511 1509 """utility function to pull obsolete markers from a remote
1512 1510
1513 1511 The `gettransaction` is function that return the pull transaction, creating
1514 1512 one if necessary. We return the transaction to inform the calling code that
1515 1513 a new transaction have been created (when applicable).
1516 1514
1517 1515 Exists mostly to allow overriding for experimentation purpose"""
1518 1516 if 'obsmarkers' in pullop.stepsdone:
1519 1517 return
1520 1518 pullop.stepsdone.add('obsmarkers')
1521 1519 tr = None
1522 1520 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1523 1521 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1524 1522 remoteobs = pullop.remote.listkeys('obsolete')
1525 1523 if 'dump0' in remoteobs:
1526 1524 tr = pullop.gettransaction()
1527 1525 markers = []
1528 1526 for key in sorted(remoteobs, reverse=True):
1529 1527 if key.startswith('dump'):
1530 1528 data = util.b85decode(remoteobs[key])
1531 1529 version, newmarks = obsolete._readmarkers(data)
1532 1530 markers += newmarks
1533 1531 if markers:
1534 1532 pullop.repo.obsstore.add(tr, markers)
1535 1533 pullop.repo.invalidatevolatilesets()
1536 1534 return tr
1537 1535
1538 1536 def caps20to10(repo):
1539 1537 """return a set with appropriate options to use bundle20 during getbundle"""
1540 1538 caps = {'HG20'}
1541 1539 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1542 1540 caps.add('bundle2=' + urlreq.quote(capsblob))
1543 1541 return caps
1544 1542
1545 1543 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1546 1544 getbundle2partsorder = []
1547 1545
1548 1546 # Mapping between step name and function
1549 1547 #
1550 1548 # This exists to help extensions wrap steps if necessary
1551 1549 getbundle2partsmapping = {}
1552 1550
1553 1551 def getbundle2partsgenerator(stepname, idx=None):
1554 1552 """decorator for function generating bundle2 part for getbundle
1555 1553
1556 1554 The function is added to the step -> function mapping and appended to the
1557 1555 list of steps. Beware that decorated functions will be added in order
1558 1556 (this may matter).
1559 1557
1560 1558 You can only use this decorator for new steps, if you want to wrap a step
1561 1559 from an extension, attack the getbundle2partsmapping dictionary directly."""
1562 1560 def dec(func):
1563 1561 assert stepname not in getbundle2partsmapping
1564 1562 getbundle2partsmapping[stepname] = func
1565 1563 if idx is None:
1566 1564 getbundle2partsorder.append(stepname)
1567 1565 else:
1568 1566 getbundle2partsorder.insert(idx, stepname)
1569 1567 return func
1570 1568 return dec
1571 1569
1572 1570 def bundle2requested(bundlecaps):
1573 1571 if bundlecaps is not None:
1574 1572 return any(cap.startswith('HG2') for cap in bundlecaps)
1575 1573 return False
1576 1574
1577 1575 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1578 1576 **kwargs):
1579 1577 """Return chunks constituting a bundle's raw data.
1580 1578
1581 1579 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1582 1580 passed.
1583 1581
1584 1582 Returns an iterator over raw chunks (of varying sizes).
1585 1583 """
1586 1584 kwargs = pycompat.byteskwargs(kwargs)
1587 1585 usebundle2 = bundle2requested(bundlecaps)
1588 1586 # bundle10 case
1589 1587 if not usebundle2:
1590 1588 if bundlecaps and not kwargs.get('cg', True):
1591 1589 raise ValueError(_('request for bundle10 must include changegroup'))
1592 1590
1593 1591 if kwargs:
1594 1592 raise ValueError(_('unsupported getbundle arguments: %s')
1595 1593 % ', '.join(sorted(kwargs.keys())))
1596 1594 outgoing = _computeoutgoing(repo, heads, common)
1597 1595 bundler = changegroup.getbundler('01', repo, bundlecaps)
1598 1596 return changegroup.getsubsetraw(repo, outgoing, bundler, source)
1599 1597
1600 1598 # bundle20 case
1601 1599 b2caps = {}
1602 1600 for bcaps in bundlecaps:
1603 1601 if bcaps.startswith('bundle2='):
1604 1602 blob = urlreq.unquote(bcaps[len('bundle2='):])
1605 1603 b2caps.update(bundle2.decodecaps(blob))
1606 1604 bundler = bundle2.bundle20(repo.ui, b2caps)
1607 1605
1608 1606 kwargs['heads'] = heads
1609 1607 kwargs['common'] = common
1610 1608
1611 1609 for name in getbundle2partsorder:
1612 1610 func = getbundle2partsmapping[name]
1613 1611 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1614 1612 **pycompat.strkwargs(kwargs))
1615 1613
1616 1614 return bundler.getchunks()
1617 1615
1618 1616 @getbundle2partsgenerator('changegroup')
1619 1617 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1620 1618 b2caps=None, heads=None, common=None, **kwargs):
1621 1619 """add a changegroup part to the requested bundle"""
1622 1620 cg = None
1623 1621 if kwargs.get('cg', True):
1624 1622 # build changegroup bundle here.
1625 1623 version = '01'
1626 1624 cgversions = b2caps.get('changegroup')
1627 1625 if cgversions: # 3.1 and 3.2 ship with an empty value
1628 1626 cgversions = [v for v in cgversions
1629 1627 if v in changegroup.supportedoutgoingversions(repo)]
1630 1628 if not cgversions:
1631 1629 raise ValueError(_('no common changegroup version'))
1632 1630 version = max(cgversions)
1633 1631 outgoing = _computeoutgoing(repo, heads, common)
1634 1632 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1635 1633 bundlecaps=bundlecaps,
1636 1634 version=version)
1637 1635
1638 1636 if cg:
1639 1637 part = bundler.newpart('changegroup', data=cg)
1640 1638 if cgversions:
1641 1639 part.addparam('version', version)
1642 1640 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1643 1641 if 'treemanifest' in repo.requirements:
1644 1642 part.addparam('treemanifest', '1')
1645 1643
1646 1644 @getbundle2partsgenerator('listkeys')
1647 1645 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1648 1646 b2caps=None, **kwargs):
1649 1647 """add parts containing listkeys namespaces to the requested bundle"""
1650 1648 listkeys = kwargs.get('listkeys', ())
1651 1649 for namespace in listkeys:
1652 1650 part = bundler.newpart('listkeys')
1653 1651 part.addparam('namespace', namespace)
1654 1652 keys = repo.listkeys(namespace).items()
1655 1653 part.data = pushkey.encodekeys(keys)
1656 1654
1657 1655 @getbundle2partsgenerator('obsmarkers')
1658 1656 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1659 1657 b2caps=None, heads=None, **kwargs):
1660 1658 """add an obsolescence markers part to the requested bundle"""
1661 1659 if kwargs.get('obsmarkers', False):
1662 1660 if heads is None:
1663 1661 heads = repo.heads()
1664 1662 subset = [c.node() for c in repo.set('::%ln', heads)]
1665 1663 markers = repo.obsstore.relevantmarkers(subset)
1666 1664 markers = sorted(markers)
1667 1665 bundle2.buildobsmarkerspart(bundler, markers)
1668 1666
1669 1667 @getbundle2partsgenerator('hgtagsfnodes')
1670 1668 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1671 1669 b2caps=None, heads=None, common=None,
1672 1670 **kwargs):
1673 1671 """Transfer the .hgtags filenodes mapping.
1674 1672
1675 1673 Only values for heads in this bundle will be transferred.
1676 1674
1677 1675 The part data consists of pairs of 20 byte changeset node and .hgtags
1678 1676 filenodes raw values.
1679 1677 """
1680 1678 # Don't send unless:
1681 1679 # - changeset are being exchanged,
1682 1680 # - the client supports it.
1683 1681 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1684 1682 return
1685 1683
1686 1684 outgoing = _computeoutgoing(repo, heads, common)
1687 1685 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1688 1686
1689 1687 def _getbookmarks(repo, **kwargs):
1690 1688 """Returns bookmark to node mapping.
1691 1689
1692 1690 This function is primarily used to generate `bookmarks` bundle2 part.
1693 1691 It is a separate function in order to make it easy to wrap it
1694 1692 in extensions. Passing `kwargs` to the function makes it easy to
1695 1693 add new parameters in extensions.
1696 1694 """
1697 1695
1698 1696 return dict(bookmod.listbinbookmarks(repo))
1699 1697
1700 1698 def check_heads(repo, their_heads, context):
1701 1699 """check if the heads of a repo have been modified
1702 1700
1703 1701 Used by peer for unbundling.
1704 1702 """
1705 1703 heads = repo.heads()
1706 1704 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1707 1705 if not (their_heads == ['force'] or their_heads == heads or
1708 1706 their_heads == ['hashed', heads_hash]):
1709 1707 # someone else committed/pushed/unbundled while we
1710 1708 # were transferring data
1711 1709 raise error.PushRaced('repository changed while %s - '
1712 1710 'please try again' % context)
1713 1711
1714 1712 def unbundle(repo, cg, heads, source, url):
1715 1713 """Apply a bundle to a repo.
1716 1714
1717 1715 this function makes sure the repo is locked during the application and have
1718 1716 mechanism to check that no push race occurred between the creation of the
1719 1717 bundle and its application.
1720 1718
1721 1719 If the push was raced as PushRaced exception is raised."""
1722 1720 r = 0
1723 1721 # need a transaction when processing a bundle2 stream
1724 1722 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1725 1723 lockandtr = [None, None, None]
1726 1724 recordout = None
1727 1725 # quick fix for output mismatch with bundle2 in 3.4
1728 1726 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
1729 1727 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1730 1728 captureoutput = True
1731 1729 try:
1732 1730 # note: outside bundle1, 'heads' is expected to be empty and this
1733 1731 # 'check_heads' call wil be a no-op
1734 1732 check_heads(repo, heads, 'uploading changes')
1735 1733 # push can proceed
1736 1734 if not isinstance(cg, bundle2.unbundle20):
1737 1735 # legacy case: bundle1 (changegroup 01)
1738 1736 txnname = "\n".join([source, util.hidepassword(url)])
1739 1737 with repo.lock(), repo.transaction(txnname) as tr:
1740 1738 op = bundle2.applybundle(repo, cg, tr, source, url)
1741 1739 r = bundle2.combinechangegroupresults(op)
1742 1740 else:
1743 1741 r = None
1744 1742 try:
1745 1743 def gettransaction():
1746 1744 if not lockandtr[2]:
1747 1745 lockandtr[0] = repo.wlock()
1748 1746 lockandtr[1] = repo.lock()
1749 1747 lockandtr[2] = repo.transaction(source)
1750 1748 lockandtr[2].hookargs['source'] = source
1751 1749 lockandtr[2].hookargs['url'] = url
1752 1750 lockandtr[2].hookargs['bundle2'] = '1'
1753 1751 return lockandtr[2]
1754 1752
1755 1753 # Do greedy locking by default until we're satisfied with lazy
1756 1754 # locking.
1757 1755 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1758 1756 gettransaction()
1759 1757
1760 1758 op = bundle2.bundleoperation(repo, gettransaction,
1761 1759 captureoutput=captureoutput)
1762 1760 try:
1763 1761 op = bundle2.processbundle(repo, cg, op=op)
1764 1762 finally:
1765 1763 r = op.reply
1766 1764 if captureoutput and r is not None:
1767 1765 repo.ui.pushbuffer(error=True, subproc=True)
1768 1766 def recordout(output):
1769 1767 r.newpart('output', data=output, mandatory=False)
1770 1768 if lockandtr[2] is not None:
1771 1769 lockandtr[2].close()
1772 1770 except BaseException as exc:
1773 1771 exc.duringunbundle2 = True
1774 1772 if captureoutput and r is not None:
1775 1773 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1776 1774 def recordout(output):
1777 1775 part = bundle2.bundlepart('output', data=output,
1778 1776 mandatory=False)
1779 1777 parts.append(part)
1780 1778 raise
1781 1779 finally:
1782 1780 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1783 1781 if recordout is not None:
1784 1782 recordout(repo.ui.popbuffer())
1785 1783 return r
1786 1784
1787 1785 def _maybeapplyclonebundle(pullop):
1788 1786 """Apply a clone bundle from a remote, if possible."""
1789 1787
1790 1788 repo = pullop.repo
1791 1789 remote = pullop.remote
1792 1790
1793 1791 if not repo.ui.configbool('ui', 'clonebundles'):
1794 1792 return
1795 1793
1796 1794 # Only run if local repo is empty.
1797 1795 if len(repo):
1798 1796 return
1799 1797
1800 1798 if pullop.heads:
1801 1799 return
1802 1800
1803 1801 if not remote.capable('clonebundles'):
1804 1802 return
1805 1803
1806 1804 res = remote._call('clonebundles')
1807 1805
1808 1806 # If we call the wire protocol command, that's good enough to record the
1809 1807 # attempt.
1810 1808 pullop.clonebundleattempted = True
1811 1809
1812 1810 entries = parseclonebundlesmanifest(repo, res)
1813 1811 if not entries:
1814 1812 repo.ui.note(_('no clone bundles available on remote; '
1815 1813 'falling back to regular clone\n'))
1816 1814 return
1817 1815
1818 1816 entries = filterclonebundleentries(repo, entries)
1819 1817 if not entries:
1820 1818 # There is a thundering herd concern here. However, if a server
1821 1819 # operator doesn't advertise bundles appropriate for its clients,
1822 1820 # they deserve what's coming. Furthermore, from a client's
1823 1821 # perspective, no automatic fallback would mean not being able to
1824 1822 # clone!
1825 1823 repo.ui.warn(_('no compatible clone bundles available on server; '
1826 1824 'falling back to regular clone\n'))
1827 1825 repo.ui.warn(_('(you may want to report this to the server '
1828 1826 'operator)\n'))
1829 1827 return
1830 1828
1831 1829 entries = sortclonebundleentries(repo.ui, entries)
1832 1830
1833 1831 url = entries[0]['URL']
1834 1832 repo.ui.status(_('applying clone bundle from %s\n') % url)
1835 1833 if trypullbundlefromurl(repo.ui, repo, url):
1836 1834 repo.ui.status(_('finished applying clone bundle\n'))
1837 1835 # Bundle failed.
1838 1836 #
1839 1837 # We abort by default to avoid the thundering herd of
1840 1838 # clients flooding a server that was expecting expensive
1841 1839 # clone load to be offloaded.
1842 1840 elif repo.ui.configbool('ui', 'clonebundlefallback'):
1843 1841 repo.ui.warn(_('falling back to normal clone\n'))
1844 1842 else:
1845 1843 raise error.Abort(_('error applying bundle'),
1846 1844 hint=_('if this error persists, consider contacting '
1847 1845 'the server operator or disable clone '
1848 1846 'bundles via '
1849 1847 '"--config ui.clonebundles=false"'))
1850 1848
1851 1849 def parseclonebundlesmanifest(repo, s):
1852 1850 """Parses the raw text of a clone bundles manifest.
1853 1851
1854 1852 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1855 1853 to the URL and other keys are the attributes for the entry.
1856 1854 """
1857 1855 m = []
1858 1856 for line in s.splitlines():
1859 1857 fields = line.split()
1860 1858 if not fields:
1861 1859 continue
1862 1860 attrs = {'URL': fields[0]}
1863 1861 for rawattr in fields[1:]:
1864 1862 key, value = rawattr.split('=', 1)
1865 1863 key = urlreq.unquote(key)
1866 1864 value = urlreq.unquote(value)
1867 1865 attrs[key] = value
1868 1866
1869 1867 # Parse BUNDLESPEC into components. This makes client-side
1870 1868 # preferences easier to specify since you can prefer a single
1871 1869 # component of the BUNDLESPEC.
1872 1870 if key == 'BUNDLESPEC':
1873 1871 try:
1874 1872 comp, version, params = parsebundlespec(repo, value,
1875 1873 externalnames=True)
1876 1874 attrs['COMPRESSION'] = comp
1877 1875 attrs['VERSION'] = version
1878 1876 except error.InvalidBundleSpecification:
1879 1877 pass
1880 1878 except error.UnsupportedBundleSpecification:
1881 1879 pass
1882 1880
1883 1881 m.append(attrs)
1884 1882
1885 1883 return m
1886 1884
1887 1885 def filterclonebundleentries(repo, entries):
1888 1886 """Remove incompatible clone bundle manifest entries.
1889 1887
1890 1888 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1891 1889 and returns a new list consisting of only the entries that this client
1892 1890 should be able to apply.
1893 1891
1894 1892 There is no guarantee we'll be able to apply all returned entries because
1895 1893 the metadata we use to filter on may be missing or wrong.
1896 1894 """
1897 1895 newentries = []
1898 1896 for entry in entries:
1899 1897 spec = entry.get('BUNDLESPEC')
1900 1898 if spec:
1901 1899 try:
1902 1900 parsebundlespec(repo, spec, strict=True)
1903 1901 except error.InvalidBundleSpecification as e:
1904 1902 repo.ui.debug(str(e) + '\n')
1905 1903 continue
1906 1904 except error.UnsupportedBundleSpecification as e:
1907 1905 repo.ui.debug('filtering %s because unsupported bundle '
1908 1906 'spec: %s\n' % (entry['URL'], str(e)))
1909 1907 continue
1910 1908
1911 1909 if 'REQUIRESNI' in entry and not sslutil.hassni:
1912 1910 repo.ui.debug('filtering %s because SNI not supported\n' %
1913 1911 entry['URL'])
1914 1912 continue
1915 1913
1916 1914 newentries.append(entry)
1917 1915
1918 1916 return newentries
1919 1917
1920 1918 class clonebundleentry(object):
1921 1919 """Represents an item in a clone bundles manifest.
1922 1920
1923 1921 This rich class is needed to support sorting since sorted() in Python 3
1924 1922 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
1925 1923 won't work.
1926 1924 """
1927 1925
1928 1926 def __init__(self, value, prefers):
1929 1927 self.value = value
1930 1928 self.prefers = prefers
1931 1929
1932 1930 def _cmp(self, other):
1933 1931 for prefkey, prefvalue in self.prefers:
1934 1932 avalue = self.value.get(prefkey)
1935 1933 bvalue = other.value.get(prefkey)
1936 1934
1937 1935 # Special case for b missing attribute and a matches exactly.
1938 1936 if avalue is not None and bvalue is None and avalue == prefvalue:
1939 1937 return -1
1940 1938
1941 1939 # Special case for a missing attribute and b matches exactly.
1942 1940 if bvalue is not None and avalue is None and bvalue == prefvalue:
1943 1941 return 1
1944 1942
1945 1943 # We can't compare unless attribute present on both.
1946 1944 if avalue is None or bvalue is None:
1947 1945 continue
1948 1946
1949 1947 # Same values should fall back to next attribute.
1950 1948 if avalue == bvalue:
1951 1949 continue
1952 1950
1953 1951 # Exact matches come first.
1954 1952 if avalue == prefvalue:
1955 1953 return -1
1956 1954 if bvalue == prefvalue:
1957 1955 return 1
1958 1956
1959 1957 # Fall back to next attribute.
1960 1958 continue
1961 1959
1962 1960 # If we got here we couldn't sort by attributes and prefers. Fall
1963 1961 # back to index order.
1964 1962 return 0
1965 1963
1966 1964 def __lt__(self, other):
1967 1965 return self._cmp(other) < 0
1968 1966
1969 1967 def __gt__(self, other):
1970 1968 return self._cmp(other) > 0
1971 1969
1972 1970 def __eq__(self, other):
1973 1971 return self._cmp(other) == 0
1974 1972
1975 1973 def __le__(self, other):
1976 1974 return self._cmp(other) <= 0
1977 1975
1978 1976 def __ge__(self, other):
1979 1977 return self._cmp(other) >= 0
1980 1978
1981 1979 def __ne__(self, other):
1982 1980 return self._cmp(other) != 0
1983 1981
1984 1982 def sortclonebundleentries(ui, entries):
1985 1983 prefers = ui.configlist('ui', 'clonebundleprefers')
1986 1984 if not prefers:
1987 1985 return list(entries)
1988 1986
1989 1987 prefers = [p.split('=', 1) for p in prefers]
1990 1988
1991 1989 items = sorted(clonebundleentry(v, prefers) for v in entries)
1992 1990 return [i.value for i in items]
1993 1991
1994 1992 def trypullbundlefromurl(ui, repo, url):
1995 1993 """Attempt to apply a bundle from a URL."""
1996 1994 with repo.lock(), repo.transaction('bundleurl') as tr:
1997 1995 try:
1998 1996 fh = urlmod.open(ui, url)
1999 1997 cg = readbundle(ui, fh, 'stream')
2000 1998
2001 1999 if isinstance(cg, streamclone.streamcloneapplier):
2002 2000 cg.apply(repo)
2003 2001 else:
2004 2002 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2005 2003 return True
2006 2004 except urlerr.httperror as e:
2007 2005 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2008 2006 except urlerr.urlerror as e:
2009 2007 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2010 2008
2011 2009 return False
General Comments 0
You need to be logged in to leave comments. Login now