##// END OF EJS Templates
clonebundle: update hook arguments (BC)...
Martin von Zweigbergk -
r32957:067173e3 default
parent child Browse files
Show More
@@ -1,2011 +1,2011 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 hex,
16 16 nullid,
17 17 )
18 18 from . import (
19 19 bookmarks as bookmod,
20 20 bundle2,
21 21 changegroup,
22 22 discovery,
23 23 error,
24 24 lock as lockmod,
25 25 obsolete,
26 26 phases,
27 27 pushkey,
28 28 pycompat,
29 29 scmutil,
30 30 sslutil,
31 31 streamclone,
32 32 url as urlmod,
33 33 util,
34 34 )
35 35
36 36 urlerr = util.urlerr
37 37 urlreq = util.urlreq
38 38
39 39 # Maps bundle version human names to changegroup versions.
40 40 _bundlespeccgversions = {'v1': '01',
41 41 'v2': '02',
42 42 'packed1': 's1',
43 43 'bundle2': '02', #legacy
44 44 }
45 45
46 46 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
47 47 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
48 48
49 49 def parsebundlespec(repo, spec, strict=True, externalnames=False):
50 50 """Parse a bundle string specification into parts.
51 51
52 52 Bundle specifications denote a well-defined bundle/exchange format.
53 53 The content of a given specification should not change over time in
54 54 order to ensure that bundles produced by a newer version of Mercurial are
55 55 readable from an older version.
56 56
57 57 The string currently has the form:
58 58
59 59 <compression>-<type>[;<parameter0>[;<parameter1>]]
60 60
61 61 Where <compression> is one of the supported compression formats
62 62 and <type> is (currently) a version string. A ";" can follow the type and
63 63 all text afterwards is interpreted as URI encoded, ";" delimited key=value
64 64 pairs.
65 65
66 66 If ``strict`` is True (the default) <compression> is required. Otherwise,
67 67 it is optional.
68 68
69 69 If ``externalnames`` is False (the default), the human-centric names will
70 70 be converted to their internal representation.
71 71
72 72 Returns a 3-tuple of (compression, version, parameters). Compression will
73 73 be ``None`` if not in strict mode and a compression isn't defined.
74 74
75 75 An ``InvalidBundleSpecification`` is raised when the specification is
76 76 not syntactically well formed.
77 77
78 78 An ``UnsupportedBundleSpecification`` is raised when the compression or
79 79 bundle type/version is not recognized.
80 80
81 81 Note: this function will likely eventually return a more complex data
82 82 structure, including bundle2 part information.
83 83 """
84 84 def parseparams(s):
85 85 if ';' not in s:
86 86 return s, {}
87 87
88 88 params = {}
89 89 version, paramstr = s.split(';', 1)
90 90
91 91 for p in paramstr.split(';'):
92 92 if '=' not in p:
93 93 raise error.InvalidBundleSpecification(
94 94 _('invalid bundle specification: '
95 95 'missing "=" in parameter: %s') % p)
96 96
97 97 key, value = p.split('=', 1)
98 98 key = urlreq.unquote(key)
99 99 value = urlreq.unquote(value)
100 100 params[key] = value
101 101
102 102 return version, params
103 103
104 104
105 105 if strict and '-' not in spec:
106 106 raise error.InvalidBundleSpecification(
107 107 _('invalid bundle specification; '
108 108 'must be prefixed with compression: %s') % spec)
109 109
110 110 if '-' in spec:
111 111 compression, version = spec.split('-', 1)
112 112
113 113 if compression not in util.compengines.supportedbundlenames:
114 114 raise error.UnsupportedBundleSpecification(
115 115 _('%s compression is not supported') % compression)
116 116
117 117 version, params = parseparams(version)
118 118
119 119 if version not in _bundlespeccgversions:
120 120 raise error.UnsupportedBundleSpecification(
121 121 _('%s is not a recognized bundle version') % version)
122 122 else:
123 123 # Value could be just the compression or just the version, in which
124 124 # case some defaults are assumed (but only when not in strict mode).
125 125 assert not strict
126 126
127 127 spec, params = parseparams(spec)
128 128
129 129 if spec in util.compengines.supportedbundlenames:
130 130 compression = spec
131 131 version = 'v1'
132 132 # Generaldelta repos require v2.
133 133 if 'generaldelta' in repo.requirements:
134 134 version = 'v2'
135 135 # Modern compression engines require v2.
136 136 if compression not in _bundlespecv1compengines:
137 137 version = 'v2'
138 138 elif spec in _bundlespeccgversions:
139 139 if spec == 'packed1':
140 140 compression = 'none'
141 141 else:
142 142 compression = 'bzip2'
143 143 version = spec
144 144 else:
145 145 raise error.UnsupportedBundleSpecification(
146 146 _('%s is not a recognized bundle specification') % spec)
147 147
148 148 # Bundle version 1 only supports a known set of compression engines.
149 149 if version == 'v1' and compression not in _bundlespecv1compengines:
150 150 raise error.UnsupportedBundleSpecification(
151 151 _('compression engine %s is not supported on v1 bundles') %
152 152 compression)
153 153
154 154 # The specification for packed1 can optionally declare the data formats
155 155 # required to apply it. If we see this metadata, compare against what the
156 156 # repo supports and error if the bundle isn't compatible.
157 157 if version == 'packed1' and 'requirements' in params:
158 158 requirements = set(params['requirements'].split(','))
159 159 missingreqs = requirements - repo.supportedformats
160 160 if missingreqs:
161 161 raise error.UnsupportedBundleSpecification(
162 162 _('missing support for repository features: %s') %
163 163 ', '.join(sorted(missingreqs)))
164 164
165 165 if not externalnames:
166 166 engine = util.compengines.forbundlename(compression)
167 167 compression = engine.bundletype()[1]
168 168 version = _bundlespeccgversions[version]
169 169 return compression, version, params
170 170
171 171 def readbundle(ui, fh, fname, vfs=None):
172 172 header = changegroup.readexactly(fh, 4)
173 173
174 174 alg = None
175 175 if not fname:
176 176 fname = "stream"
177 177 if not header.startswith('HG') and header.startswith('\0'):
178 178 fh = changegroup.headerlessfixup(fh, header)
179 179 header = "HG10"
180 180 alg = 'UN'
181 181 elif vfs:
182 182 fname = vfs.join(fname)
183 183
184 184 magic, version = header[0:2], header[2:4]
185 185
186 186 if magic != 'HG':
187 187 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
188 188 if version == '10':
189 189 if alg is None:
190 190 alg = changegroup.readexactly(fh, 2)
191 191 return changegroup.cg1unpacker(fh, alg)
192 192 elif version.startswith('2'):
193 193 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
194 194 elif version == 'S1':
195 195 return streamclone.streamcloneapplier(fh)
196 196 else:
197 197 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
198 198
199 199 def getbundlespec(ui, fh):
200 200 """Infer the bundlespec from a bundle file handle.
201 201
202 202 The input file handle is seeked and the original seek position is not
203 203 restored.
204 204 """
205 205 def speccompression(alg):
206 206 try:
207 207 return util.compengines.forbundletype(alg).bundletype()[0]
208 208 except KeyError:
209 209 return None
210 210
211 211 b = readbundle(ui, fh, None)
212 212 if isinstance(b, changegroup.cg1unpacker):
213 213 alg = b._type
214 214 if alg == '_truncatedBZ':
215 215 alg = 'BZ'
216 216 comp = speccompression(alg)
217 217 if not comp:
218 218 raise error.Abort(_('unknown compression algorithm: %s') % alg)
219 219 return '%s-v1' % comp
220 220 elif isinstance(b, bundle2.unbundle20):
221 221 if 'Compression' in b.params:
222 222 comp = speccompression(b.params['Compression'])
223 223 if not comp:
224 224 raise error.Abort(_('unknown compression algorithm: %s') % comp)
225 225 else:
226 226 comp = 'none'
227 227
228 228 version = None
229 229 for part in b.iterparts():
230 230 if part.type == 'changegroup':
231 231 version = part.params['version']
232 232 if version in ('01', '02'):
233 233 version = 'v2'
234 234 else:
235 235 raise error.Abort(_('changegroup version %s does not have '
236 236 'a known bundlespec') % version,
237 237 hint=_('try upgrading your Mercurial '
238 238 'client'))
239 239
240 240 if not version:
241 241 raise error.Abort(_('could not identify changegroup version in '
242 242 'bundle'))
243 243
244 244 return '%s-%s' % (comp, version)
245 245 elif isinstance(b, streamclone.streamcloneapplier):
246 246 requirements = streamclone.readbundle1header(fh)[2]
247 247 params = 'requirements=%s' % ','.join(sorted(requirements))
248 248 return 'none-packed1;%s' % urlreq.quote(params)
249 249 else:
250 250 raise error.Abort(_('unknown bundle type: %s') % b)
251 251
252 252 def _computeoutgoing(repo, heads, common):
253 253 """Computes which revs are outgoing given a set of common
254 254 and a set of heads.
255 255
256 256 This is a separate function so extensions can have access to
257 257 the logic.
258 258
259 259 Returns a discovery.outgoing object.
260 260 """
261 261 cl = repo.changelog
262 262 if common:
263 263 hasnode = cl.hasnode
264 264 common = [n for n in common if hasnode(n)]
265 265 else:
266 266 common = [nullid]
267 267 if not heads:
268 268 heads = cl.heads()
269 269 return discovery.outgoing(repo, common, heads)
270 270
271 271 def _forcebundle1(op):
272 272 """return true if a pull/push must use bundle1
273 273
274 274 This function is used to allow testing of the older bundle version"""
275 275 ui = op.repo.ui
276 276 forcebundle1 = False
277 277 # The goal is this config is to allow developer to choose the bundle
278 278 # version used during exchanged. This is especially handy during test.
279 279 # Value is a list of bundle version to be picked from, highest version
280 280 # should be used.
281 281 #
282 282 # developer config: devel.legacy.exchange
283 283 exchange = ui.configlist('devel', 'legacy.exchange')
284 284 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
285 285 return forcebundle1 or not op.remote.capable('bundle2')
286 286
287 287 class pushoperation(object):
288 288 """A object that represent a single push operation
289 289
290 290 Its purpose is to carry push related state and very common operations.
291 291
292 292 A new pushoperation should be created at the beginning of each push and
293 293 discarded afterward.
294 294 """
295 295
296 296 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
297 297 bookmarks=()):
298 298 # repo we push from
299 299 self.repo = repo
300 300 self.ui = repo.ui
301 301 # repo we push to
302 302 self.remote = remote
303 303 # force option provided
304 304 self.force = force
305 305 # revs to be pushed (None is "all")
306 306 self.revs = revs
307 307 # bookmark explicitly pushed
308 308 self.bookmarks = bookmarks
309 309 # allow push of new branch
310 310 self.newbranch = newbranch
311 311 # did a local lock get acquired?
312 312 self.locallocked = None
313 313 # step already performed
314 314 # (used to check what steps have been already performed through bundle2)
315 315 self.stepsdone = set()
316 316 # Integer version of the changegroup push result
317 317 # - None means nothing to push
318 318 # - 0 means HTTP error
319 319 # - 1 means we pushed and remote head count is unchanged *or*
320 320 # we have outgoing changesets but refused to push
321 321 # - other values as described by addchangegroup()
322 322 self.cgresult = None
323 323 # Boolean value for the bookmark push
324 324 self.bkresult = None
325 325 # discover.outgoing object (contains common and outgoing data)
326 326 self.outgoing = None
327 327 # all remote topological heads before the push
328 328 self.remoteheads = None
329 329 # Details of the remote branch pre and post push
330 330 #
331 331 # mapping: {'branch': ([remoteheads],
332 332 # [newheads],
333 333 # [unsyncedheads],
334 334 # [discardedheads])}
335 335 # - branch: the branch name
336 336 # - remoteheads: the list of remote heads known locally
337 337 # None if the branch is new
338 338 # - newheads: the new remote heads (known locally) with outgoing pushed
339 339 # - unsyncedheads: the list of remote heads unknown locally.
340 340 # - discardedheads: the list of remote heads made obsolete by the push
341 341 self.pushbranchmap = None
342 342 # testable as a boolean indicating if any nodes are missing locally.
343 343 self.incoming = None
344 344 # phases changes that must be pushed along side the changesets
345 345 self.outdatedphases = None
346 346 # phases changes that must be pushed if changeset push fails
347 347 self.fallbackoutdatedphases = None
348 348 # outgoing obsmarkers
349 349 self.outobsmarkers = set()
350 350 # outgoing bookmarks
351 351 self.outbookmarks = []
352 352 # transaction manager
353 353 self.trmanager = None
354 354 # map { pushkey partid -> callback handling failure}
355 355 # used to handle exception from mandatory pushkey part failure
356 356 self.pkfailcb = {}
357 357
358 358 @util.propertycache
359 359 def futureheads(self):
360 360 """future remote heads if the changeset push succeeds"""
361 361 return self.outgoing.missingheads
362 362
363 363 @util.propertycache
364 364 def fallbackheads(self):
365 365 """future remote heads if the changeset push fails"""
366 366 if self.revs is None:
367 367 # not target to push, all common are relevant
368 368 return self.outgoing.commonheads
369 369 unfi = self.repo.unfiltered()
370 370 # I want cheads = heads(::missingheads and ::commonheads)
371 371 # (missingheads is revs with secret changeset filtered out)
372 372 #
373 373 # This can be expressed as:
374 374 # cheads = ( (missingheads and ::commonheads)
375 375 # + (commonheads and ::missingheads))"
376 376 # )
377 377 #
378 378 # while trying to push we already computed the following:
379 379 # common = (::commonheads)
380 380 # missing = ((commonheads::missingheads) - commonheads)
381 381 #
382 382 # We can pick:
383 383 # * missingheads part of common (::commonheads)
384 384 common = self.outgoing.common
385 385 nm = self.repo.changelog.nodemap
386 386 cheads = [node for node in self.revs if nm[node] in common]
387 387 # and
388 388 # * commonheads parents on missing
389 389 revset = unfi.set('%ln and parents(roots(%ln))',
390 390 self.outgoing.commonheads,
391 391 self.outgoing.missing)
392 392 cheads.extend(c.node() for c in revset)
393 393 return cheads
394 394
395 395 @property
396 396 def commonheads(self):
397 397 """set of all common heads after changeset bundle push"""
398 398 if self.cgresult:
399 399 return self.futureheads
400 400 else:
401 401 return self.fallbackheads
402 402
403 403 # mapping of message used when pushing bookmark
404 404 bookmsgmap = {'update': (_("updating bookmark %s\n"),
405 405 _('updating bookmark %s failed!\n')),
406 406 'export': (_("exporting bookmark %s\n"),
407 407 _('exporting bookmark %s failed!\n')),
408 408 'delete': (_("deleting remote bookmark %s\n"),
409 409 _('deleting remote bookmark %s failed!\n')),
410 410 }
411 411
412 412
413 413 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
414 414 opargs=None):
415 415 '''Push outgoing changesets (limited by revs) from a local
416 416 repository to remote. Return an integer:
417 417 - None means nothing to push
418 418 - 0 means HTTP error
419 419 - 1 means we pushed and remote head count is unchanged *or*
420 420 we have outgoing changesets but refused to push
421 421 - other values as described by addchangegroup()
422 422 '''
423 423 if opargs is None:
424 424 opargs = {}
425 425 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
426 426 **opargs)
427 427 if pushop.remote.local():
428 428 missing = (set(pushop.repo.requirements)
429 429 - pushop.remote.local().supported)
430 430 if missing:
431 431 msg = _("required features are not"
432 432 " supported in the destination:"
433 433 " %s") % (', '.join(sorted(missing)))
434 434 raise error.Abort(msg)
435 435
436 436 # there are two ways to push to remote repo:
437 437 #
438 438 # addchangegroup assumes local user can lock remote
439 439 # repo (local filesystem, old ssh servers).
440 440 #
441 441 # unbundle assumes local user cannot lock remote repo (new ssh
442 442 # servers, http servers).
443 443
444 444 if not pushop.remote.canpush():
445 445 raise error.Abort(_("destination does not support push"))
446 446 # get local lock as we might write phase data
447 447 localwlock = locallock = None
448 448 try:
449 449 # bundle2 push may receive a reply bundle touching bookmarks or other
450 450 # things requiring the wlock. Take it now to ensure proper ordering.
451 451 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
452 452 if (not _forcebundle1(pushop)) and maypushback:
453 453 localwlock = pushop.repo.wlock()
454 454 locallock = pushop.repo.lock()
455 455 pushop.locallocked = True
456 456 except IOError as err:
457 457 pushop.locallocked = False
458 458 if err.errno != errno.EACCES:
459 459 raise
460 460 # source repo cannot be locked.
461 461 # We do not abort the push, but just disable the local phase
462 462 # synchronisation.
463 463 msg = 'cannot lock source repository: %s\n' % err
464 464 pushop.ui.debug(msg)
465 465 try:
466 466 if pushop.locallocked:
467 467 pushop.trmanager = transactionmanager(pushop.repo,
468 468 'push-response',
469 469 pushop.remote.url())
470 470 pushop.repo.checkpush(pushop)
471 471 lock = None
472 472 unbundle = pushop.remote.capable('unbundle')
473 473 if not unbundle:
474 474 lock = pushop.remote.lock()
475 475 try:
476 476 _pushdiscovery(pushop)
477 477 if not _forcebundle1(pushop):
478 478 _pushbundle2(pushop)
479 479 _pushchangeset(pushop)
480 480 _pushsyncphase(pushop)
481 481 _pushobsolete(pushop)
482 482 _pushbookmark(pushop)
483 483 finally:
484 484 if lock is not None:
485 485 lock.release()
486 486 if pushop.trmanager:
487 487 pushop.trmanager.close()
488 488 finally:
489 489 if pushop.trmanager:
490 490 pushop.trmanager.release()
491 491 if locallock is not None:
492 492 locallock.release()
493 493 if localwlock is not None:
494 494 localwlock.release()
495 495
496 496 return pushop
497 497
498 498 # list of steps to perform discovery before push
499 499 pushdiscoveryorder = []
500 500
501 501 # Mapping between step name and function
502 502 #
503 503 # This exists to help extensions wrap steps if necessary
504 504 pushdiscoverymapping = {}
505 505
506 506 def pushdiscovery(stepname):
507 507 """decorator for function performing discovery before push
508 508
509 509 The function is added to the step -> function mapping and appended to the
510 510 list of steps. Beware that decorated function will be added in order (this
511 511 may matter).
512 512
513 513 You can only use this decorator for a new step, if you want to wrap a step
514 514 from an extension, change the pushdiscovery dictionary directly."""
515 515 def dec(func):
516 516 assert stepname not in pushdiscoverymapping
517 517 pushdiscoverymapping[stepname] = func
518 518 pushdiscoveryorder.append(stepname)
519 519 return func
520 520 return dec
521 521
522 522 def _pushdiscovery(pushop):
523 523 """Run all discovery steps"""
524 524 for stepname in pushdiscoveryorder:
525 525 step = pushdiscoverymapping[stepname]
526 526 step(pushop)
527 527
528 528 @pushdiscovery('changeset')
529 529 def _pushdiscoverychangeset(pushop):
530 530 """discover the changeset that need to be pushed"""
531 531 fci = discovery.findcommonincoming
532 532 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
533 533 common, inc, remoteheads = commoninc
534 534 fco = discovery.findcommonoutgoing
535 535 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
536 536 commoninc=commoninc, force=pushop.force)
537 537 pushop.outgoing = outgoing
538 538 pushop.remoteheads = remoteheads
539 539 pushop.incoming = inc
540 540
541 541 @pushdiscovery('phase')
542 542 def _pushdiscoveryphase(pushop):
543 543 """discover the phase that needs to be pushed
544 544
545 545 (computed for both success and failure case for changesets push)"""
546 546 outgoing = pushop.outgoing
547 547 unfi = pushop.repo.unfiltered()
548 548 remotephases = pushop.remote.listkeys('phases')
549 549 publishing = remotephases.get('publishing', False)
550 550 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
551 551 and remotephases # server supports phases
552 552 and not pushop.outgoing.missing # no changesets to be pushed
553 553 and publishing):
554 554 # When:
555 555 # - this is a subrepo push
556 556 # - and remote support phase
557 557 # - and no changeset are to be pushed
558 558 # - and remote is publishing
559 559 # We may be in issue 3871 case!
560 560 # We drop the possible phase synchronisation done by
561 561 # courtesy to publish changesets possibly locally draft
562 562 # on the remote.
563 563 remotephases = {'publishing': 'True'}
564 564 ana = phases.analyzeremotephases(pushop.repo,
565 565 pushop.fallbackheads,
566 566 remotephases)
567 567 pheads, droots = ana
568 568 extracond = ''
569 569 if not publishing:
570 570 extracond = ' and public()'
571 571 revset = 'heads((%%ln::%%ln) %s)' % extracond
572 572 # Get the list of all revs draft on remote by public here.
573 573 # XXX Beware that revset break if droots is not strictly
574 574 # XXX root we may want to ensure it is but it is costly
575 575 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
576 576 if not outgoing.missing:
577 577 future = fallback
578 578 else:
579 579 # adds changeset we are going to push as draft
580 580 #
581 581 # should not be necessary for publishing server, but because of an
582 582 # issue fixed in xxxxx we have to do it anyway.
583 583 fdroots = list(unfi.set('roots(%ln + %ln::)',
584 584 outgoing.missing, droots))
585 585 fdroots = [f.node() for f in fdroots]
586 586 future = list(unfi.set(revset, fdroots, pushop.futureheads))
587 587 pushop.outdatedphases = future
588 588 pushop.fallbackoutdatedphases = fallback
589 589
590 590 @pushdiscovery('obsmarker')
591 591 def _pushdiscoveryobsmarkers(pushop):
592 592 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
593 593 and pushop.repo.obsstore
594 594 and 'obsolete' in pushop.remote.listkeys('namespaces')):
595 595 repo = pushop.repo
596 596 # very naive computation, that can be quite expensive on big repo.
597 597 # However: evolution is currently slow on them anyway.
598 598 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
599 599 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
600 600
601 601 @pushdiscovery('bookmarks')
602 602 def _pushdiscoverybookmarks(pushop):
603 603 ui = pushop.ui
604 604 repo = pushop.repo.unfiltered()
605 605 remote = pushop.remote
606 606 ui.debug("checking for updated bookmarks\n")
607 607 ancestors = ()
608 608 if pushop.revs:
609 609 revnums = map(repo.changelog.rev, pushop.revs)
610 610 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
611 611 remotebookmark = remote.listkeys('bookmarks')
612 612
613 613 explicit = set([repo._bookmarks.expandname(bookmark)
614 614 for bookmark in pushop.bookmarks])
615 615
616 616 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
617 617 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
618 618
619 619 def safehex(x):
620 620 if x is None:
621 621 return x
622 622 return hex(x)
623 623
624 624 def hexifycompbookmarks(bookmarks):
625 625 for b, scid, dcid in bookmarks:
626 626 yield b, safehex(scid), safehex(dcid)
627 627
628 628 comp = [hexifycompbookmarks(marks) for marks in comp]
629 629 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
630 630
631 631 for b, scid, dcid in advsrc:
632 632 if b in explicit:
633 633 explicit.remove(b)
634 634 if not ancestors or repo[scid].rev() in ancestors:
635 635 pushop.outbookmarks.append((b, dcid, scid))
636 636 # search added bookmark
637 637 for b, scid, dcid in addsrc:
638 638 if b in explicit:
639 639 explicit.remove(b)
640 640 pushop.outbookmarks.append((b, '', scid))
641 641 # search for overwritten bookmark
642 642 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
643 643 if b in explicit:
644 644 explicit.remove(b)
645 645 pushop.outbookmarks.append((b, dcid, scid))
646 646 # search for bookmark to delete
647 647 for b, scid, dcid in adddst:
648 648 if b in explicit:
649 649 explicit.remove(b)
650 650 # treat as "deleted locally"
651 651 pushop.outbookmarks.append((b, dcid, ''))
652 652 # identical bookmarks shouldn't get reported
653 653 for b, scid, dcid in same:
654 654 if b in explicit:
655 655 explicit.remove(b)
656 656
657 657 if explicit:
658 658 explicit = sorted(explicit)
659 659 # we should probably list all of them
660 660 ui.warn(_('bookmark %s does not exist on the local '
661 661 'or remote repository!\n') % explicit[0])
662 662 pushop.bkresult = 2
663 663
664 664 pushop.outbookmarks.sort()
665 665
666 666 def _pushcheckoutgoing(pushop):
667 667 outgoing = pushop.outgoing
668 668 unfi = pushop.repo.unfiltered()
669 669 if not outgoing.missing:
670 670 # nothing to push
671 671 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
672 672 return False
673 673 # something to push
674 674 if not pushop.force:
675 675 # if repo.obsstore == False --> no obsolete
676 676 # then, save the iteration
677 677 if unfi.obsstore:
678 678 # this message are here for 80 char limit reason
679 679 mso = _("push includes obsolete changeset: %s!")
680 680 mst = {"unstable": _("push includes unstable changeset: %s!"),
681 681 "bumped": _("push includes bumped changeset: %s!"),
682 682 "divergent": _("push includes divergent changeset: %s!")}
683 683 # If we are to push if there is at least one
684 684 # obsolete or unstable changeset in missing, at
685 685 # least one of the missinghead will be obsolete or
686 686 # unstable. So checking heads only is ok
687 687 for node in outgoing.missingheads:
688 688 ctx = unfi[node]
689 689 if ctx.obsolete():
690 690 raise error.Abort(mso % ctx)
691 691 elif ctx.troubled():
692 692 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
693 693
694 694 discovery.checkheads(pushop)
695 695 return True
696 696
697 697 # List of names of steps to perform for an outgoing bundle2, order matters.
698 698 b2partsgenorder = []
699 699
700 700 # Mapping between step name and function
701 701 #
702 702 # This exists to help extensions wrap steps if necessary
703 703 b2partsgenmapping = {}
704 704
705 705 def b2partsgenerator(stepname, idx=None):
706 706 """decorator for function generating bundle2 part
707 707
708 708 The function is added to the step -> function mapping and appended to the
709 709 list of steps. Beware that decorated functions will be added in order
710 710 (this may matter).
711 711
712 712 You can only use this decorator for new steps, if you want to wrap a step
713 713 from an extension, attack the b2partsgenmapping dictionary directly."""
714 714 def dec(func):
715 715 assert stepname not in b2partsgenmapping
716 716 b2partsgenmapping[stepname] = func
717 717 if idx is None:
718 718 b2partsgenorder.append(stepname)
719 719 else:
720 720 b2partsgenorder.insert(idx, stepname)
721 721 return func
722 722 return dec
723 723
724 724 def _pushb2ctxcheckheads(pushop, bundler):
725 725 """Generate race condition checking parts
726 726
727 727 Exists as an independent function to aid extensions
728 728 """
729 729 # * 'force' do not check for push race,
730 730 # * if we don't push anything, there are nothing to check.
731 731 if not pushop.force and pushop.outgoing.missingheads:
732 732 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
733 733 if not allowunrelated:
734 734 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
735 735 else:
736 736 affected = set()
737 737 for branch, heads in pushop.pushbranchmap.iteritems():
738 738 remoteheads, newheads, unsyncedheads, discardedheads = heads
739 739 if remoteheads is not None:
740 740 remote = set(remoteheads)
741 741 affected |= set(discardedheads) & remote
742 742 affected |= remote - set(newheads)
743 743 if affected:
744 744 data = iter(sorted(affected))
745 745 bundler.newpart('check:updated-heads', data=data)
746 746
747 747 @b2partsgenerator('changeset')
748 748 def _pushb2ctx(pushop, bundler):
749 749 """handle changegroup push through bundle2
750 750
751 751 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
752 752 """
753 753 if 'changesets' in pushop.stepsdone:
754 754 return
755 755 pushop.stepsdone.add('changesets')
756 756 # Send known heads to the server for race detection.
757 757 if not _pushcheckoutgoing(pushop):
758 758 return
759 759 pushop.repo.prepushoutgoinghooks(pushop)
760 760
761 761 _pushb2ctxcheckheads(pushop, bundler)
762 762
763 763 b2caps = bundle2.bundle2caps(pushop.remote)
764 764 version = '01'
765 765 cgversions = b2caps.get('changegroup')
766 766 if cgversions: # 3.1 and 3.2 ship with an empty value
767 767 cgversions = [v for v in cgversions
768 768 if v in changegroup.supportedoutgoingversions(
769 769 pushop.repo)]
770 770 if not cgversions:
771 771 raise ValueError(_('no common changegroup version'))
772 772 version = max(cgversions)
773 773 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
774 774 pushop.outgoing,
775 775 version=version)
776 776 cgpart = bundler.newpart('changegroup', data=cg)
777 777 if cgversions:
778 778 cgpart.addparam('version', version)
779 779 if 'treemanifest' in pushop.repo.requirements:
780 780 cgpart.addparam('treemanifest', '1')
781 781 def handlereply(op):
782 782 """extract addchangegroup returns from server reply"""
783 783 cgreplies = op.records.getreplies(cgpart.id)
784 784 assert len(cgreplies['changegroup']) == 1
785 785 pushop.cgresult = cgreplies['changegroup'][0]['return']
786 786 return handlereply
787 787
788 788 @b2partsgenerator('phase')
789 789 def _pushb2phases(pushop, bundler):
790 790 """handle phase push through bundle2"""
791 791 if 'phases' in pushop.stepsdone:
792 792 return
793 793 b2caps = bundle2.bundle2caps(pushop.remote)
794 794 if not 'pushkey' in b2caps:
795 795 return
796 796 pushop.stepsdone.add('phases')
797 797 part2node = []
798 798
799 799 def handlefailure(pushop, exc):
800 800 targetid = int(exc.partid)
801 801 for partid, node in part2node:
802 802 if partid == targetid:
803 803 raise error.Abort(_('updating %s to public failed') % node)
804 804
805 805 enc = pushkey.encode
806 806 for newremotehead in pushop.outdatedphases:
807 807 part = bundler.newpart('pushkey')
808 808 part.addparam('namespace', enc('phases'))
809 809 part.addparam('key', enc(newremotehead.hex()))
810 810 part.addparam('old', enc(str(phases.draft)))
811 811 part.addparam('new', enc(str(phases.public)))
812 812 part2node.append((part.id, newremotehead))
813 813 pushop.pkfailcb[part.id] = handlefailure
814 814
815 815 def handlereply(op):
816 816 for partid, node in part2node:
817 817 partrep = op.records.getreplies(partid)
818 818 results = partrep['pushkey']
819 819 assert len(results) <= 1
820 820 msg = None
821 821 if not results:
822 822 msg = _('server ignored update of %s to public!\n') % node
823 823 elif not int(results[0]['return']):
824 824 msg = _('updating %s to public failed!\n') % node
825 825 if msg is not None:
826 826 pushop.ui.warn(msg)
827 827 return handlereply
828 828
829 829 @b2partsgenerator('obsmarkers')
830 830 def _pushb2obsmarkers(pushop, bundler):
831 831 if 'obsmarkers' in pushop.stepsdone:
832 832 return
833 833 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
834 834 if obsolete.commonversion(remoteversions) is None:
835 835 return
836 836 pushop.stepsdone.add('obsmarkers')
837 837 if pushop.outobsmarkers:
838 838 markers = sorted(pushop.outobsmarkers)
839 839 bundle2.buildobsmarkerspart(bundler, markers)
840 840
841 841 @b2partsgenerator('bookmarks')
842 842 def _pushb2bookmarks(pushop, bundler):
843 843 """handle bookmark push through bundle2"""
844 844 if 'bookmarks' in pushop.stepsdone:
845 845 return
846 846 b2caps = bundle2.bundle2caps(pushop.remote)
847 847 if 'pushkey' not in b2caps:
848 848 return
849 849 pushop.stepsdone.add('bookmarks')
850 850 part2book = []
851 851 enc = pushkey.encode
852 852
853 853 def handlefailure(pushop, exc):
854 854 targetid = int(exc.partid)
855 855 for partid, book, action in part2book:
856 856 if partid == targetid:
857 857 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
858 858 # we should not be called for part we did not generated
859 859 assert False
860 860
861 861 for book, old, new in pushop.outbookmarks:
862 862 part = bundler.newpart('pushkey')
863 863 part.addparam('namespace', enc('bookmarks'))
864 864 part.addparam('key', enc(book))
865 865 part.addparam('old', enc(old))
866 866 part.addparam('new', enc(new))
867 867 action = 'update'
868 868 if not old:
869 869 action = 'export'
870 870 elif not new:
871 871 action = 'delete'
872 872 part2book.append((part.id, book, action))
873 873 pushop.pkfailcb[part.id] = handlefailure
874 874
875 875 def handlereply(op):
876 876 ui = pushop.ui
877 877 for partid, book, action in part2book:
878 878 partrep = op.records.getreplies(partid)
879 879 results = partrep['pushkey']
880 880 assert len(results) <= 1
881 881 if not results:
882 882 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
883 883 else:
884 884 ret = int(results[0]['return'])
885 885 if ret:
886 886 ui.status(bookmsgmap[action][0] % book)
887 887 else:
888 888 ui.warn(bookmsgmap[action][1] % book)
889 889 if pushop.bkresult is not None:
890 890 pushop.bkresult = 1
891 891 return handlereply
892 892
893 893
894 894 def _pushbundle2(pushop):
895 895 """push data to the remote using bundle2
896 896
897 897 The only currently supported type of data is changegroup but this will
898 898 evolve in the future."""
899 899 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
900 900 pushback = (pushop.trmanager
901 901 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
902 902
903 903 # create reply capability
904 904 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
905 905 allowpushback=pushback))
906 906 bundler.newpart('replycaps', data=capsblob)
907 907 replyhandlers = []
908 908 for partgenname in b2partsgenorder:
909 909 partgen = b2partsgenmapping[partgenname]
910 910 ret = partgen(pushop, bundler)
911 911 if callable(ret):
912 912 replyhandlers.append(ret)
913 913 # do not push if nothing to push
914 914 if bundler.nbparts <= 1:
915 915 return
916 916 stream = util.chunkbuffer(bundler.getchunks())
917 917 try:
918 918 try:
919 919 reply = pushop.remote.unbundle(
920 920 stream, ['force'], pushop.remote.url())
921 921 except error.BundleValueError as exc:
922 922 raise error.Abort(_('missing support for %s') % exc)
923 923 try:
924 924 trgetter = None
925 925 if pushback:
926 926 trgetter = pushop.trmanager.transaction
927 927 op = bundle2.processbundle(pushop.repo, reply, trgetter)
928 928 except error.BundleValueError as exc:
929 929 raise error.Abort(_('missing support for %s') % exc)
930 930 except bundle2.AbortFromPart as exc:
931 931 pushop.ui.status(_('remote: %s\n') % exc)
932 932 if exc.hint is not None:
933 933 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
934 934 raise error.Abort(_('push failed on remote'))
935 935 except error.PushkeyFailed as exc:
936 936 partid = int(exc.partid)
937 937 if partid not in pushop.pkfailcb:
938 938 raise
939 939 pushop.pkfailcb[partid](pushop, exc)
940 940 for rephand in replyhandlers:
941 941 rephand(op)
942 942
943 943 def _pushchangeset(pushop):
944 944 """Make the actual push of changeset bundle to remote repo"""
945 945 if 'changesets' in pushop.stepsdone:
946 946 return
947 947 pushop.stepsdone.add('changesets')
948 948 if not _pushcheckoutgoing(pushop):
949 949 return
950 950 pushop.repo.prepushoutgoinghooks(pushop)
951 951 outgoing = pushop.outgoing
952 952 unbundle = pushop.remote.capable('unbundle')
953 953 # TODO: get bundlecaps from remote
954 954 bundlecaps = None
955 955 # create a changegroup from local
956 956 if pushop.revs is None and not (outgoing.excluded
957 957 or pushop.repo.changelog.filteredrevs):
958 958 # push everything,
959 959 # use the fast path, no race possible on push
960 960 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
961 961 cg = changegroup.getsubset(pushop.repo,
962 962 outgoing,
963 963 bundler,
964 964 'push',
965 965 fastpath=True)
966 966 else:
967 967 cg = changegroup.getchangegroup(pushop.repo, 'push', outgoing,
968 968 bundlecaps=bundlecaps)
969 969
970 970 # apply changegroup to remote
971 971 if unbundle:
972 972 # local repo finds heads on server, finds out what
973 973 # revs it must push. once revs transferred, if server
974 974 # finds it has different heads (someone else won
975 975 # commit/push race), server aborts.
976 976 if pushop.force:
977 977 remoteheads = ['force']
978 978 else:
979 979 remoteheads = pushop.remoteheads
980 980 # ssh: return remote's addchangegroup()
981 981 # http: return remote's addchangegroup() or 0 for error
982 982 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
983 983 pushop.repo.url())
984 984 else:
985 985 # we return an integer indicating remote head count
986 986 # change
987 987 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
988 988 pushop.repo.url())
989 989
990 990 def _pushsyncphase(pushop):
991 991 """synchronise phase information locally and remotely"""
992 992 cheads = pushop.commonheads
993 993 # even when we don't push, exchanging phase data is useful
994 994 remotephases = pushop.remote.listkeys('phases')
995 995 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
996 996 and remotephases # server supports phases
997 997 and pushop.cgresult is None # nothing was pushed
998 998 and remotephases.get('publishing', False)):
999 999 # When:
1000 1000 # - this is a subrepo push
1001 1001 # - and remote support phase
1002 1002 # - and no changeset was pushed
1003 1003 # - and remote is publishing
1004 1004 # We may be in issue 3871 case!
1005 1005 # We drop the possible phase synchronisation done by
1006 1006 # courtesy to publish changesets possibly locally draft
1007 1007 # on the remote.
1008 1008 remotephases = {'publishing': 'True'}
1009 1009 if not remotephases: # old server or public only reply from non-publishing
1010 1010 _localphasemove(pushop, cheads)
1011 1011 # don't push any phase data as there is nothing to push
1012 1012 else:
1013 1013 ana = phases.analyzeremotephases(pushop.repo, cheads,
1014 1014 remotephases)
1015 1015 pheads, droots = ana
1016 1016 ### Apply remote phase on local
1017 1017 if remotephases.get('publishing', False):
1018 1018 _localphasemove(pushop, cheads)
1019 1019 else: # publish = False
1020 1020 _localphasemove(pushop, pheads)
1021 1021 _localphasemove(pushop, cheads, phases.draft)
1022 1022 ### Apply local phase on remote
1023 1023
1024 1024 if pushop.cgresult:
1025 1025 if 'phases' in pushop.stepsdone:
1026 1026 # phases already pushed though bundle2
1027 1027 return
1028 1028 outdated = pushop.outdatedphases
1029 1029 else:
1030 1030 outdated = pushop.fallbackoutdatedphases
1031 1031
1032 1032 pushop.stepsdone.add('phases')
1033 1033
1034 1034 # filter heads already turned public by the push
1035 1035 outdated = [c for c in outdated if c.node() not in pheads]
1036 1036 # fallback to independent pushkey command
1037 1037 for newremotehead in outdated:
1038 1038 r = pushop.remote.pushkey('phases',
1039 1039 newremotehead.hex(),
1040 1040 str(phases.draft),
1041 1041 str(phases.public))
1042 1042 if not r:
1043 1043 pushop.ui.warn(_('updating %s to public failed!\n')
1044 1044 % newremotehead)
1045 1045
1046 1046 def _localphasemove(pushop, nodes, phase=phases.public):
1047 1047 """move <nodes> to <phase> in the local source repo"""
1048 1048 if pushop.trmanager:
1049 1049 phases.advanceboundary(pushop.repo,
1050 1050 pushop.trmanager.transaction(),
1051 1051 phase,
1052 1052 nodes)
1053 1053 else:
1054 1054 # repo is not locked, do not change any phases!
1055 1055 # Informs the user that phases should have been moved when
1056 1056 # applicable.
1057 1057 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1058 1058 phasestr = phases.phasenames[phase]
1059 1059 if actualmoves:
1060 1060 pushop.ui.status(_('cannot lock source repo, skipping '
1061 1061 'local %s phase update\n') % phasestr)
1062 1062
1063 1063 def _pushobsolete(pushop):
1064 1064 """utility function to push obsolete markers to a remote"""
1065 1065 if 'obsmarkers' in pushop.stepsdone:
1066 1066 return
1067 1067 repo = pushop.repo
1068 1068 remote = pushop.remote
1069 1069 pushop.stepsdone.add('obsmarkers')
1070 1070 if pushop.outobsmarkers:
1071 1071 pushop.ui.debug('try to push obsolete markers to remote\n')
1072 1072 rslts = []
1073 1073 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1074 1074 for key in sorted(remotedata, reverse=True):
1075 1075 # reverse sort to ensure we end with dump0
1076 1076 data = remotedata[key]
1077 1077 rslts.append(remote.pushkey('obsolete', key, '', data))
1078 1078 if [r for r in rslts if not r]:
1079 1079 msg = _('failed to push some obsolete markers!\n')
1080 1080 repo.ui.warn(msg)
1081 1081
1082 1082 def _pushbookmark(pushop):
1083 1083 """Update bookmark position on remote"""
1084 1084 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1085 1085 return
1086 1086 pushop.stepsdone.add('bookmarks')
1087 1087 ui = pushop.ui
1088 1088 remote = pushop.remote
1089 1089
1090 1090 for b, old, new in pushop.outbookmarks:
1091 1091 action = 'update'
1092 1092 if not old:
1093 1093 action = 'export'
1094 1094 elif not new:
1095 1095 action = 'delete'
1096 1096 if remote.pushkey('bookmarks', b, old, new):
1097 1097 ui.status(bookmsgmap[action][0] % b)
1098 1098 else:
1099 1099 ui.warn(bookmsgmap[action][1] % b)
1100 1100 # discovery can have set the value form invalid entry
1101 1101 if pushop.bkresult is not None:
1102 1102 pushop.bkresult = 1
1103 1103
1104 1104 class pulloperation(object):
1105 1105 """A object that represent a single pull operation
1106 1106
1107 1107 It purpose is to carry pull related state and very common operation.
1108 1108
1109 1109 A new should be created at the beginning of each pull and discarded
1110 1110 afterward.
1111 1111 """
1112 1112
1113 1113 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1114 1114 remotebookmarks=None, streamclonerequested=None):
1115 1115 # repo we pull into
1116 1116 self.repo = repo
1117 1117 # repo we pull from
1118 1118 self.remote = remote
1119 1119 # revision we try to pull (None is "all")
1120 1120 self.heads = heads
1121 1121 # bookmark pulled explicitly
1122 1122 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1123 1123 for bookmark in bookmarks]
1124 1124 # do we force pull?
1125 1125 self.force = force
1126 1126 # whether a streaming clone was requested
1127 1127 self.streamclonerequested = streamclonerequested
1128 1128 # transaction manager
1129 1129 self.trmanager = None
1130 1130 # set of common changeset between local and remote before pull
1131 1131 self.common = None
1132 1132 # set of pulled head
1133 1133 self.rheads = None
1134 1134 # list of missing changeset to fetch remotely
1135 1135 self.fetch = None
1136 1136 # remote bookmarks data
1137 1137 self.remotebookmarks = remotebookmarks
1138 1138 # result of changegroup pulling (used as return code by pull)
1139 1139 self.cgresult = None
1140 1140 # list of step already done
1141 1141 self.stepsdone = set()
1142 1142 # Whether we attempted a clone from pre-generated bundles.
1143 1143 self.clonebundleattempted = False
1144 1144
1145 1145 @util.propertycache
1146 1146 def pulledsubset(self):
1147 1147 """heads of the set of changeset target by the pull"""
1148 1148 # compute target subset
1149 1149 if self.heads is None:
1150 1150 # We pulled every thing possible
1151 1151 # sync on everything common
1152 1152 c = set(self.common)
1153 1153 ret = list(self.common)
1154 1154 for n in self.rheads:
1155 1155 if n not in c:
1156 1156 ret.append(n)
1157 1157 return ret
1158 1158 else:
1159 1159 # We pulled a specific subset
1160 1160 # sync on this subset
1161 1161 return self.heads
1162 1162
1163 1163 @util.propertycache
1164 1164 def canusebundle2(self):
1165 1165 return not _forcebundle1(self)
1166 1166
1167 1167 @util.propertycache
1168 1168 def remotebundle2caps(self):
1169 1169 return bundle2.bundle2caps(self.remote)
1170 1170
1171 1171 def gettransaction(self):
1172 1172 # deprecated; talk to trmanager directly
1173 1173 return self.trmanager.transaction()
1174 1174
1175 1175 class transactionmanager(object):
1176 1176 """An object to manage the life cycle of a transaction
1177 1177
1178 1178 It creates the transaction on demand and calls the appropriate hooks when
1179 1179 closing the transaction."""
1180 1180 def __init__(self, repo, source, url):
1181 1181 self.repo = repo
1182 1182 self.source = source
1183 1183 self.url = url
1184 1184 self._tr = None
1185 1185
1186 1186 def transaction(self):
1187 1187 """Return an open transaction object, constructing if necessary"""
1188 1188 if not self._tr:
1189 1189 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1190 1190 self._tr = self.repo.transaction(trname)
1191 1191 self._tr.hookargs['source'] = self.source
1192 1192 self._tr.hookargs['url'] = self.url
1193 1193 return self._tr
1194 1194
1195 1195 def close(self):
1196 1196 """close transaction if created"""
1197 1197 if self._tr is not None:
1198 1198 self._tr.close()
1199 1199
1200 1200 def release(self):
1201 1201 """release transaction if created"""
1202 1202 if self._tr is not None:
1203 1203 self._tr.release()
1204 1204
1205 1205 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1206 1206 streamclonerequested=None):
1207 1207 """Fetch repository data from a remote.
1208 1208
1209 1209 This is the main function used to retrieve data from a remote repository.
1210 1210
1211 1211 ``repo`` is the local repository to clone into.
1212 1212 ``remote`` is a peer instance.
1213 1213 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1214 1214 default) means to pull everything from the remote.
1215 1215 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1216 1216 default, all remote bookmarks are pulled.
1217 1217 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1218 1218 initialization.
1219 1219 ``streamclonerequested`` is a boolean indicating whether a "streaming
1220 1220 clone" is requested. A "streaming clone" is essentially a raw file copy
1221 1221 of revlogs from the server. This only works when the local repository is
1222 1222 empty. The default value of ``None`` means to respect the server
1223 1223 configuration for preferring stream clones.
1224 1224
1225 1225 Returns the ``pulloperation`` created for this pull.
1226 1226 """
1227 1227 if opargs is None:
1228 1228 opargs = {}
1229 1229 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1230 1230 streamclonerequested=streamclonerequested, **opargs)
1231 1231 if pullop.remote.local():
1232 1232 missing = set(pullop.remote.requirements) - pullop.repo.supported
1233 1233 if missing:
1234 1234 msg = _("required features are not"
1235 1235 " supported in the destination:"
1236 1236 " %s") % (', '.join(sorted(missing)))
1237 1237 raise error.Abort(msg)
1238 1238
1239 1239 wlock = lock = None
1240 1240 try:
1241 1241 wlock = pullop.repo.wlock()
1242 1242 lock = pullop.repo.lock()
1243 1243 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1244 1244 streamclone.maybeperformlegacystreamclone(pullop)
1245 1245 # This should ideally be in _pullbundle2(). However, it needs to run
1246 1246 # before discovery to avoid extra work.
1247 1247 _maybeapplyclonebundle(pullop)
1248 1248 _pulldiscovery(pullop)
1249 1249 if pullop.canusebundle2:
1250 1250 _pullbundle2(pullop)
1251 1251 _pullchangeset(pullop)
1252 1252 _pullphase(pullop)
1253 1253 _pullbookmarks(pullop)
1254 1254 _pullobsolete(pullop)
1255 1255 pullop.trmanager.close()
1256 1256 finally:
1257 1257 lockmod.release(pullop.trmanager, lock, wlock)
1258 1258
1259 1259 return pullop
1260 1260
1261 1261 # list of steps to perform discovery before pull
1262 1262 pulldiscoveryorder = []
1263 1263
1264 1264 # Mapping between step name and function
1265 1265 #
1266 1266 # This exists to help extensions wrap steps if necessary
1267 1267 pulldiscoverymapping = {}
1268 1268
1269 1269 def pulldiscovery(stepname):
1270 1270 """decorator for function performing discovery before pull
1271 1271
1272 1272 The function is added to the step -> function mapping and appended to the
1273 1273 list of steps. Beware that decorated function will be added in order (this
1274 1274 may matter).
1275 1275
1276 1276 You can only use this decorator for a new step, if you want to wrap a step
1277 1277 from an extension, change the pulldiscovery dictionary directly."""
1278 1278 def dec(func):
1279 1279 assert stepname not in pulldiscoverymapping
1280 1280 pulldiscoverymapping[stepname] = func
1281 1281 pulldiscoveryorder.append(stepname)
1282 1282 return func
1283 1283 return dec
1284 1284
1285 1285 def _pulldiscovery(pullop):
1286 1286 """Run all discovery steps"""
1287 1287 for stepname in pulldiscoveryorder:
1288 1288 step = pulldiscoverymapping[stepname]
1289 1289 step(pullop)
1290 1290
1291 1291 @pulldiscovery('b1:bookmarks')
1292 1292 def _pullbookmarkbundle1(pullop):
1293 1293 """fetch bookmark data in bundle1 case
1294 1294
1295 1295 If not using bundle2, we have to fetch bookmarks before changeset
1296 1296 discovery to reduce the chance and impact of race conditions."""
1297 1297 if pullop.remotebookmarks is not None:
1298 1298 return
1299 1299 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1300 1300 # all known bundle2 servers now support listkeys, but lets be nice with
1301 1301 # new implementation.
1302 1302 return
1303 1303 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1304 1304
1305 1305
1306 1306 @pulldiscovery('changegroup')
1307 1307 def _pulldiscoverychangegroup(pullop):
1308 1308 """discovery phase for the pull
1309 1309
1310 1310 Current handle changeset discovery only, will change handle all discovery
1311 1311 at some point."""
1312 1312 tmp = discovery.findcommonincoming(pullop.repo,
1313 1313 pullop.remote,
1314 1314 heads=pullop.heads,
1315 1315 force=pullop.force)
1316 1316 common, fetch, rheads = tmp
1317 1317 nm = pullop.repo.unfiltered().changelog.nodemap
1318 1318 if fetch and rheads:
1319 1319 # If a remote heads in filtered locally, lets drop it from the unknown
1320 1320 # remote heads and put in back in common.
1321 1321 #
1322 1322 # This is a hackish solution to catch most of "common but locally
1323 1323 # hidden situation". We do not performs discovery on unfiltered
1324 1324 # repository because it end up doing a pathological amount of round
1325 1325 # trip for w huge amount of changeset we do not care about.
1326 1326 #
1327 1327 # If a set of such "common but filtered" changeset exist on the server
1328 1328 # but are not including a remote heads, we'll not be able to detect it,
1329 1329 scommon = set(common)
1330 1330 filteredrheads = []
1331 1331 for n in rheads:
1332 1332 if n in nm:
1333 1333 if n not in scommon:
1334 1334 common.append(n)
1335 1335 else:
1336 1336 filteredrheads.append(n)
1337 1337 if not filteredrheads:
1338 1338 fetch = []
1339 1339 rheads = filteredrheads
1340 1340 pullop.common = common
1341 1341 pullop.fetch = fetch
1342 1342 pullop.rheads = rheads
1343 1343
1344 1344 def _pullbundle2(pullop):
1345 1345 """pull data using bundle2
1346 1346
1347 1347 For now, the only supported data are changegroup."""
1348 1348 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1349 1349
1350 1350 # At the moment we don't do stream clones over bundle2. If that is
1351 1351 # implemented then here's where the check for that will go.
1352 1352 streaming = False
1353 1353
1354 1354 # pulling changegroup
1355 1355 pullop.stepsdone.add('changegroup')
1356 1356
1357 1357 kwargs['common'] = pullop.common
1358 1358 kwargs['heads'] = pullop.heads or pullop.rheads
1359 1359 kwargs['cg'] = pullop.fetch
1360 1360 if 'listkeys' in pullop.remotebundle2caps:
1361 1361 kwargs['listkeys'] = ['phases']
1362 1362 if pullop.remotebookmarks is None:
1363 1363 # make sure to always includes bookmark data when migrating
1364 1364 # `hg incoming --bundle` to using this function.
1365 1365 kwargs['listkeys'].append('bookmarks')
1366 1366
1367 1367 # If this is a full pull / clone and the server supports the clone bundles
1368 1368 # feature, tell the server whether we attempted a clone bundle. The
1369 1369 # presence of this flag indicates the client supports clone bundles. This
1370 1370 # will enable the server to treat clients that support clone bundles
1371 1371 # differently from those that don't.
1372 1372 if (pullop.remote.capable('clonebundles')
1373 1373 and pullop.heads is None and list(pullop.common) == [nullid]):
1374 1374 kwargs['cbattempted'] = pullop.clonebundleattempted
1375 1375
1376 1376 if streaming:
1377 1377 pullop.repo.ui.status(_('streaming all changes\n'))
1378 1378 elif not pullop.fetch:
1379 1379 pullop.repo.ui.status(_("no changes found\n"))
1380 1380 pullop.cgresult = 0
1381 1381 else:
1382 1382 if pullop.heads is None and list(pullop.common) == [nullid]:
1383 1383 pullop.repo.ui.status(_("requesting all changes\n"))
1384 1384 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1385 1385 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1386 1386 if obsolete.commonversion(remoteversions) is not None:
1387 1387 kwargs['obsmarkers'] = True
1388 1388 pullop.stepsdone.add('obsmarkers')
1389 1389 _pullbundle2extraprepare(pullop, kwargs)
1390 1390 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1391 1391 try:
1392 1392 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1393 1393 except bundle2.AbortFromPart as exc:
1394 1394 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1395 1395 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1396 1396 except error.BundleValueError as exc:
1397 1397 raise error.Abort(_('missing support for %s') % exc)
1398 1398
1399 1399 if pullop.fetch:
1400 1400 results = [cg['return'] for cg in op.records['changegroup']]
1401 1401 pullop.cgresult = changegroup.combineresults(results)
1402 1402
1403 1403 # processing phases change
1404 1404 for namespace, value in op.records['listkeys']:
1405 1405 if namespace == 'phases':
1406 1406 _pullapplyphases(pullop, value)
1407 1407
1408 1408 # processing bookmark update
1409 1409 for namespace, value in op.records['listkeys']:
1410 1410 if namespace == 'bookmarks':
1411 1411 pullop.remotebookmarks = value
1412 1412
1413 1413 # bookmark data were either already there or pulled in the bundle
1414 1414 if pullop.remotebookmarks is not None:
1415 1415 _pullbookmarks(pullop)
1416 1416
1417 1417 def _pullbundle2extraprepare(pullop, kwargs):
1418 1418 """hook function so that extensions can extend the getbundle call"""
1419 1419 pass
1420 1420
1421 1421 def _pullchangeset(pullop):
1422 1422 """pull changeset from unbundle into the local repo"""
1423 1423 # We delay the open of the transaction as late as possible so we
1424 1424 # don't open transaction for nothing or you break future useful
1425 1425 # rollback call
1426 1426 if 'changegroup' in pullop.stepsdone:
1427 1427 return
1428 1428 pullop.stepsdone.add('changegroup')
1429 1429 if not pullop.fetch:
1430 1430 pullop.repo.ui.status(_("no changes found\n"))
1431 1431 pullop.cgresult = 0
1432 1432 return
1433 1433 tr = pullop.gettransaction()
1434 1434 if pullop.heads is None and list(pullop.common) == [nullid]:
1435 1435 pullop.repo.ui.status(_("requesting all changes\n"))
1436 1436 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1437 1437 # issue1320, avoid a race if remote changed after discovery
1438 1438 pullop.heads = pullop.rheads
1439 1439
1440 1440 if pullop.remote.capable('getbundle'):
1441 1441 # TODO: get bundlecaps from remote
1442 1442 cg = pullop.remote.getbundle('pull', common=pullop.common,
1443 1443 heads=pullop.heads or pullop.rheads)
1444 1444 elif pullop.heads is None:
1445 1445 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1446 1446 elif not pullop.remote.capable('changegroupsubset'):
1447 1447 raise error.Abort(_("partial pull cannot be done because "
1448 1448 "other repository doesn't support "
1449 1449 "changegroupsubset."))
1450 1450 else:
1451 1451 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1452 1452 pullop.cgresult = cg.apply(pullop.repo, tr, 'pull', pullop.remote.url())
1453 1453
1454 1454 def _pullphase(pullop):
1455 1455 # Get remote phases data from remote
1456 1456 if 'phases' in pullop.stepsdone:
1457 1457 return
1458 1458 remotephases = pullop.remote.listkeys('phases')
1459 1459 _pullapplyphases(pullop, remotephases)
1460 1460
1461 1461 def _pullapplyphases(pullop, remotephases):
1462 1462 """apply phase movement from observed remote state"""
1463 1463 if 'phases' in pullop.stepsdone:
1464 1464 return
1465 1465 pullop.stepsdone.add('phases')
1466 1466 publishing = bool(remotephases.get('publishing', False))
1467 1467 if remotephases and not publishing:
1468 1468 # remote is new and non-publishing
1469 1469 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1470 1470 pullop.pulledsubset,
1471 1471 remotephases)
1472 1472 dheads = pullop.pulledsubset
1473 1473 else:
1474 1474 # Remote is old or publishing all common changesets
1475 1475 # should be seen as public
1476 1476 pheads = pullop.pulledsubset
1477 1477 dheads = []
1478 1478 unfi = pullop.repo.unfiltered()
1479 1479 phase = unfi._phasecache.phase
1480 1480 rev = unfi.changelog.nodemap.get
1481 1481 public = phases.public
1482 1482 draft = phases.draft
1483 1483
1484 1484 # exclude changesets already public locally and update the others
1485 1485 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1486 1486 if pheads:
1487 1487 tr = pullop.gettransaction()
1488 1488 phases.advanceboundary(pullop.repo, tr, public, pheads)
1489 1489
1490 1490 # exclude changesets already draft locally and update the others
1491 1491 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1492 1492 if dheads:
1493 1493 tr = pullop.gettransaction()
1494 1494 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1495 1495
1496 1496 def _pullbookmarks(pullop):
1497 1497 """process the remote bookmark information to update the local one"""
1498 1498 if 'bookmarks' in pullop.stepsdone:
1499 1499 return
1500 1500 pullop.stepsdone.add('bookmarks')
1501 1501 repo = pullop.repo
1502 1502 remotebookmarks = pullop.remotebookmarks
1503 1503 remotebookmarks = bookmod.unhexlifybookmarks(remotebookmarks)
1504 1504 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1505 1505 pullop.remote.url(),
1506 1506 pullop.gettransaction,
1507 1507 explicit=pullop.explicitbookmarks)
1508 1508
1509 1509 def _pullobsolete(pullop):
1510 1510 """utility function to pull obsolete markers from a remote
1511 1511
1512 1512 The `gettransaction` is function that return the pull transaction, creating
1513 1513 one if necessary. We return the transaction to inform the calling code that
1514 1514 a new transaction have been created (when applicable).
1515 1515
1516 1516 Exists mostly to allow overriding for experimentation purpose"""
1517 1517 if 'obsmarkers' in pullop.stepsdone:
1518 1518 return
1519 1519 pullop.stepsdone.add('obsmarkers')
1520 1520 tr = None
1521 1521 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1522 1522 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1523 1523 remoteobs = pullop.remote.listkeys('obsolete')
1524 1524 if 'dump0' in remoteobs:
1525 1525 tr = pullop.gettransaction()
1526 1526 markers = []
1527 1527 for key in sorted(remoteobs, reverse=True):
1528 1528 if key.startswith('dump'):
1529 1529 data = util.b85decode(remoteobs[key])
1530 1530 version, newmarks = obsolete._readmarkers(data)
1531 1531 markers += newmarks
1532 1532 if markers:
1533 1533 pullop.repo.obsstore.add(tr, markers)
1534 1534 pullop.repo.invalidatevolatilesets()
1535 1535 return tr
1536 1536
1537 1537 def caps20to10(repo):
1538 1538 """return a set with appropriate options to use bundle20 during getbundle"""
1539 1539 caps = {'HG20'}
1540 1540 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1541 1541 caps.add('bundle2=' + urlreq.quote(capsblob))
1542 1542 return caps
1543 1543
1544 1544 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1545 1545 getbundle2partsorder = []
1546 1546
1547 1547 # Mapping between step name and function
1548 1548 #
1549 1549 # This exists to help extensions wrap steps if necessary
1550 1550 getbundle2partsmapping = {}
1551 1551
1552 1552 def getbundle2partsgenerator(stepname, idx=None):
1553 1553 """decorator for function generating bundle2 part for getbundle
1554 1554
1555 1555 The function is added to the step -> function mapping and appended to the
1556 1556 list of steps. Beware that decorated functions will be added in order
1557 1557 (this may matter).
1558 1558
1559 1559 You can only use this decorator for new steps, if you want to wrap a step
1560 1560 from an extension, attack the getbundle2partsmapping dictionary directly."""
1561 1561 def dec(func):
1562 1562 assert stepname not in getbundle2partsmapping
1563 1563 getbundle2partsmapping[stepname] = func
1564 1564 if idx is None:
1565 1565 getbundle2partsorder.append(stepname)
1566 1566 else:
1567 1567 getbundle2partsorder.insert(idx, stepname)
1568 1568 return func
1569 1569 return dec
1570 1570
1571 1571 def bundle2requested(bundlecaps):
1572 1572 if bundlecaps is not None:
1573 1573 return any(cap.startswith('HG2') for cap in bundlecaps)
1574 1574 return False
1575 1575
1576 1576 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1577 1577 **kwargs):
1578 1578 """Return chunks constituting a bundle's raw data.
1579 1579
1580 1580 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1581 1581 passed.
1582 1582
1583 1583 Returns an iterator over raw chunks (of varying sizes).
1584 1584 """
1585 1585 usebundle2 = bundle2requested(bundlecaps)
1586 1586 # bundle10 case
1587 1587 if not usebundle2:
1588 1588 if bundlecaps and not kwargs.get('cg', True):
1589 1589 raise ValueError(_('request for bundle10 must include changegroup'))
1590 1590
1591 1591 if kwargs:
1592 1592 raise ValueError(_('unsupported getbundle arguments: %s')
1593 1593 % ', '.join(sorted(kwargs.keys())))
1594 1594 outgoing = _computeoutgoing(repo, heads, common)
1595 1595 bundler = changegroup.getbundler('01', repo, bundlecaps)
1596 1596 return changegroup.getsubsetraw(repo, outgoing, bundler, source)
1597 1597
1598 1598 # bundle20 case
1599 1599 b2caps = {}
1600 1600 for bcaps in bundlecaps:
1601 1601 if bcaps.startswith('bundle2='):
1602 1602 blob = urlreq.unquote(bcaps[len('bundle2='):])
1603 1603 b2caps.update(bundle2.decodecaps(blob))
1604 1604 bundler = bundle2.bundle20(repo.ui, b2caps)
1605 1605
1606 1606 kwargs['heads'] = heads
1607 1607 kwargs['common'] = common
1608 1608
1609 1609 for name in getbundle2partsorder:
1610 1610 func = getbundle2partsmapping[name]
1611 1611 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1612 1612 **kwargs)
1613 1613
1614 1614 return bundler.getchunks()
1615 1615
1616 1616 @getbundle2partsgenerator('changegroup')
1617 1617 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1618 1618 b2caps=None, heads=None, common=None, **kwargs):
1619 1619 """add a changegroup part to the requested bundle"""
1620 1620 cg = None
1621 1621 if kwargs.get('cg', True):
1622 1622 # build changegroup bundle here.
1623 1623 version = '01'
1624 1624 cgversions = b2caps.get('changegroup')
1625 1625 if cgversions: # 3.1 and 3.2 ship with an empty value
1626 1626 cgversions = [v for v in cgversions
1627 1627 if v in changegroup.supportedoutgoingversions(repo)]
1628 1628 if not cgversions:
1629 1629 raise ValueError(_('no common changegroup version'))
1630 1630 version = max(cgversions)
1631 1631 outgoing = _computeoutgoing(repo, heads, common)
1632 1632 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1633 1633 bundlecaps=bundlecaps,
1634 1634 version=version)
1635 1635
1636 1636 if cg:
1637 1637 part = bundler.newpart('changegroup', data=cg)
1638 1638 if cgversions:
1639 1639 part.addparam('version', version)
1640 1640 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1641 1641 if 'treemanifest' in repo.requirements:
1642 1642 part.addparam('treemanifest', '1')
1643 1643
1644 1644 @getbundle2partsgenerator('listkeys')
1645 1645 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1646 1646 b2caps=None, **kwargs):
1647 1647 """add parts containing listkeys namespaces to the requested bundle"""
1648 1648 listkeys = kwargs.get('listkeys', ())
1649 1649 for namespace in listkeys:
1650 1650 part = bundler.newpart('listkeys')
1651 1651 part.addparam('namespace', namespace)
1652 1652 keys = repo.listkeys(namespace).items()
1653 1653 part.data = pushkey.encodekeys(keys)
1654 1654
1655 1655 @getbundle2partsgenerator('obsmarkers')
1656 1656 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1657 1657 b2caps=None, heads=None, **kwargs):
1658 1658 """add an obsolescence markers part to the requested bundle"""
1659 1659 if kwargs.get('obsmarkers', False):
1660 1660 if heads is None:
1661 1661 heads = repo.heads()
1662 1662 subset = [c.node() for c in repo.set('::%ln', heads)]
1663 1663 markers = repo.obsstore.relevantmarkers(subset)
1664 1664 markers = sorted(markers)
1665 1665 bundle2.buildobsmarkerspart(bundler, markers)
1666 1666
1667 1667 @getbundle2partsgenerator('hgtagsfnodes')
1668 1668 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1669 1669 b2caps=None, heads=None, common=None,
1670 1670 **kwargs):
1671 1671 """Transfer the .hgtags filenodes mapping.
1672 1672
1673 1673 Only values for heads in this bundle will be transferred.
1674 1674
1675 1675 The part data consists of pairs of 20 byte changeset node and .hgtags
1676 1676 filenodes raw values.
1677 1677 """
1678 1678 # Don't send unless:
1679 1679 # - changeset are being exchanged,
1680 1680 # - the client supports it.
1681 1681 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1682 1682 return
1683 1683
1684 1684 outgoing = _computeoutgoing(repo, heads, common)
1685 1685 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1686 1686
1687 1687 def _getbookmarks(repo, **kwargs):
1688 1688 """Returns bookmark to node mapping.
1689 1689
1690 1690 This function is primarily used to generate `bookmarks` bundle2 part.
1691 1691 It is a separate function in order to make it easy to wrap it
1692 1692 in extensions. Passing `kwargs` to the function makes it easy to
1693 1693 add new parameters in extensions.
1694 1694 """
1695 1695
1696 1696 return dict(bookmod.listbinbookmarks(repo))
1697 1697
1698 1698 def check_heads(repo, their_heads, context):
1699 1699 """check if the heads of a repo have been modified
1700 1700
1701 1701 Used by peer for unbundling.
1702 1702 """
1703 1703 heads = repo.heads()
1704 1704 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1705 1705 if not (their_heads == ['force'] or their_heads == heads or
1706 1706 their_heads == ['hashed', heads_hash]):
1707 1707 # someone else committed/pushed/unbundled while we
1708 1708 # were transferring data
1709 1709 raise error.PushRaced('repository changed while %s - '
1710 1710 'please try again' % context)
1711 1711
1712 1712 def unbundle(repo, cg, heads, source, url):
1713 1713 """Apply a bundle to a repo.
1714 1714
1715 1715 this function makes sure the repo is locked during the application and have
1716 1716 mechanism to check that no push race occurred between the creation of the
1717 1717 bundle and its application.
1718 1718
1719 1719 If the push was raced as PushRaced exception is raised."""
1720 1720 r = 0
1721 1721 # need a transaction when processing a bundle2 stream
1722 1722 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1723 1723 lockandtr = [None, None, None]
1724 1724 recordout = None
1725 1725 # quick fix for output mismatch with bundle2 in 3.4
1726 1726 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1727 1727 False)
1728 1728 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1729 1729 captureoutput = True
1730 1730 try:
1731 1731 # note: outside bundle1, 'heads' is expected to be empty and this
1732 1732 # 'check_heads' call wil be a no-op
1733 1733 check_heads(repo, heads, 'uploading changes')
1734 1734 # push can proceed
1735 1735 if not isinstance(cg, bundle2.unbundle20):
1736 1736 # legacy case: bundle1 (changegroup 01)
1737 1737 txnname = "\n".join([source, util.hidepassword(url)])
1738 1738 with repo.lock(), repo.transaction(txnname) as tr:
1739 1739 r = cg.apply(repo, tr, source, url)
1740 1740 else:
1741 1741 r = None
1742 1742 try:
1743 1743 def gettransaction():
1744 1744 if not lockandtr[2]:
1745 1745 lockandtr[0] = repo.wlock()
1746 1746 lockandtr[1] = repo.lock()
1747 1747 lockandtr[2] = repo.transaction(source)
1748 1748 lockandtr[2].hookargs['source'] = source
1749 1749 lockandtr[2].hookargs['url'] = url
1750 1750 lockandtr[2].hookargs['bundle2'] = '1'
1751 1751 return lockandtr[2]
1752 1752
1753 1753 # Do greedy locking by default until we're satisfied with lazy
1754 1754 # locking.
1755 1755 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1756 1756 gettransaction()
1757 1757
1758 1758 op = bundle2.bundleoperation(repo, gettransaction,
1759 1759 captureoutput=captureoutput)
1760 1760 try:
1761 1761 op = bundle2.processbundle(repo, cg, op=op)
1762 1762 finally:
1763 1763 r = op.reply
1764 1764 if captureoutput and r is not None:
1765 1765 repo.ui.pushbuffer(error=True, subproc=True)
1766 1766 def recordout(output):
1767 1767 r.newpart('output', data=output, mandatory=False)
1768 1768 if lockandtr[2] is not None:
1769 1769 lockandtr[2].close()
1770 1770 except BaseException as exc:
1771 1771 exc.duringunbundle2 = True
1772 1772 if captureoutput and r is not None:
1773 1773 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1774 1774 def recordout(output):
1775 1775 part = bundle2.bundlepart('output', data=output,
1776 1776 mandatory=False)
1777 1777 parts.append(part)
1778 1778 raise
1779 1779 finally:
1780 1780 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1781 1781 if recordout is not None:
1782 1782 recordout(repo.ui.popbuffer())
1783 1783 return r
1784 1784
1785 1785 def _maybeapplyclonebundle(pullop):
1786 1786 """Apply a clone bundle from a remote, if possible."""
1787 1787
1788 1788 repo = pullop.repo
1789 1789 remote = pullop.remote
1790 1790
1791 1791 if not repo.ui.configbool('ui', 'clonebundles', True):
1792 1792 return
1793 1793
1794 1794 # Only run if local repo is empty.
1795 1795 if len(repo):
1796 1796 return
1797 1797
1798 1798 if pullop.heads:
1799 1799 return
1800 1800
1801 1801 if not remote.capable('clonebundles'):
1802 1802 return
1803 1803
1804 1804 res = remote._call('clonebundles')
1805 1805
1806 1806 # If we call the wire protocol command, that's good enough to record the
1807 1807 # attempt.
1808 1808 pullop.clonebundleattempted = True
1809 1809
1810 1810 entries = parseclonebundlesmanifest(repo, res)
1811 1811 if not entries:
1812 1812 repo.ui.note(_('no clone bundles available on remote; '
1813 1813 'falling back to regular clone\n'))
1814 1814 return
1815 1815
1816 1816 entries = filterclonebundleentries(repo, entries)
1817 1817 if not entries:
1818 1818 # There is a thundering herd concern here. However, if a server
1819 1819 # operator doesn't advertise bundles appropriate for its clients,
1820 1820 # they deserve what's coming. Furthermore, from a client's
1821 1821 # perspective, no automatic fallback would mean not being able to
1822 1822 # clone!
1823 1823 repo.ui.warn(_('no compatible clone bundles available on server; '
1824 1824 'falling back to regular clone\n'))
1825 1825 repo.ui.warn(_('(you may want to report this to the server '
1826 1826 'operator)\n'))
1827 1827 return
1828 1828
1829 1829 entries = sortclonebundleentries(repo.ui, entries)
1830 1830
1831 1831 url = entries[0]['URL']
1832 1832 repo.ui.status(_('applying clone bundle from %s\n') % url)
1833 1833 if trypullbundlefromurl(repo.ui, repo, url):
1834 1834 repo.ui.status(_('finished applying clone bundle\n'))
1835 1835 # Bundle failed.
1836 1836 #
1837 1837 # We abort by default to avoid the thundering herd of
1838 1838 # clients flooding a server that was expecting expensive
1839 1839 # clone load to be offloaded.
1840 1840 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1841 1841 repo.ui.warn(_('falling back to normal clone\n'))
1842 1842 else:
1843 1843 raise error.Abort(_('error applying bundle'),
1844 1844 hint=_('if this error persists, consider contacting '
1845 1845 'the server operator or disable clone '
1846 1846 'bundles via '
1847 1847 '"--config ui.clonebundles=false"'))
1848 1848
1849 1849 def parseclonebundlesmanifest(repo, s):
1850 1850 """Parses the raw text of a clone bundles manifest.
1851 1851
1852 1852 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1853 1853 to the URL and other keys are the attributes for the entry.
1854 1854 """
1855 1855 m = []
1856 1856 for line in s.splitlines():
1857 1857 fields = line.split()
1858 1858 if not fields:
1859 1859 continue
1860 1860 attrs = {'URL': fields[0]}
1861 1861 for rawattr in fields[1:]:
1862 1862 key, value = rawattr.split('=', 1)
1863 1863 key = urlreq.unquote(key)
1864 1864 value = urlreq.unquote(value)
1865 1865 attrs[key] = value
1866 1866
1867 1867 # Parse BUNDLESPEC into components. This makes client-side
1868 1868 # preferences easier to specify since you can prefer a single
1869 1869 # component of the BUNDLESPEC.
1870 1870 if key == 'BUNDLESPEC':
1871 1871 try:
1872 1872 comp, version, params = parsebundlespec(repo, value,
1873 1873 externalnames=True)
1874 1874 attrs['COMPRESSION'] = comp
1875 1875 attrs['VERSION'] = version
1876 1876 except error.InvalidBundleSpecification:
1877 1877 pass
1878 1878 except error.UnsupportedBundleSpecification:
1879 1879 pass
1880 1880
1881 1881 m.append(attrs)
1882 1882
1883 1883 return m
1884 1884
1885 1885 def filterclonebundleentries(repo, entries):
1886 1886 """Remove incompatible clone bundle manifest entries.
1887 1887
1888 1888 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1889 1889 and returns a new list consisting of only the entries that this client
1890 1890 should be able to apply.
1891 1891
1892 1892 There is no guarantee we'll be able to apply all returned entries because
1893 1893 the metadata we use to filter on may be missing or wrong.
1894 1894 """
1895 1895 newentries = []
1896 1896 for entry in entries:
1897 1897 spec = entry.get('BUNDLESPEC')
1898 1898 if spec:
1899 1899 try:
1900 1900 parsebundlespec(repo, spec, strict=True)
1901 1901 except error.InvalidBundleSpecification as e:
1902 1902 repo.ui.debug(str(e) + '\n')
1903 1903 continue
1904 1904 except error.UnsupportedBundleSpecification as e:
1905 1905 repo.ui.debug('filtering %s because unsupported bundle '
1906 1906 'spec: %s\n' % (entry['URL'], str(e)))
1907 1907 continue
1908 1908
1909 1909 if 'REQUIRESNI' in entry and not sslutil.hassni:
1910 1910 repo.ui.debug('filtering %s because SNI not supported\n' %
1911 1911 entry['URL'])
1912 1912 continue
1913 1913
1914 1914 newentries.append(entry)
1915 1915
1916 1916 return newentries
1917 1917
1918 1918 class clonebundleentry(object):
1919 1919 """Represents an item in a clone bundles manifest.
1920 1920
1921 1921 This rich class is needed to support sorting since sorted() in Python 3
1922 1922 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
1923 1923 won't work.
1924 1924 """
1925 1925
1926 1926 def __init__(self, value, prefers):
1927 1927 self.value = value
1928 1928 self.prefers = prefers
1929 1929
1930 1930 def _cmp(self, other):
1931 1931 for prefkey, prefvalue in self.prefers:
1932 1932 avalue = self.value.get(prefkey)
1933 1933 bvalue = other.value.get(prefkey)
1934 1934
1935 1935 # Special case for b missing attribute and a matches exactly.
1936 1936 if avalue is not None and bvalue is None and avalue == prefvalue:
1937 1937 return -1
1938 1938
1939 1939 # Special case for a missing attribute and b matches exactly.
1940 1940 if bvalue is not None and avalue is None and bvalue == prefvalue:
1941 1941 return 1
1942 1942
1943 1943 # We can't compare unless attribute present on both.
1944 1944 if avalue is None or bvalue is None:
1945 1945 continue
1946 1946
1947 1947 # Same values should fall back to next attribute.
1948 1948 if avalue == bvalue:
1949 1949 continue
1950 1950
1951 1951 # Exact matches come first.
1952 1952 if avalue == prefvalue:
1953 1953 return -1
1954 1954 if bvalue == prefvalue:
1955 1955 return 1
1956 1956
1957 1957 # Fall back to next attribute.
1958 1958 continue
1959 1959
1960 1960 # If we got here we couldn't sort by attributes and prefers. Fall
1961 1961 # back to index order.
1962 1962 return 0
1963 1963
1964 1964 def __lt__(self, other):
1965 1965 return self._cmp(other) < 0
1966 1966
1967 1967 def __gt__(self, other):
1968 1968 return self._cmp(other) > 0
1969 1969
1970 1970 def __eq__(self, other):
1971 1971 return self._cmp(other) == 0
1972 1972
1973 1973 def __le__(self, other):
1974 1974 return self._cmp(other) <= 0
1975 1975
1976 1976 def __ge__(self, other):
1977 1977 return self._cmp(other) >= 0
1978 1978
1979 1979 def __ne__(self, other):
1980 1980 return self._cmp(other) != 0
1981 1981
1982 1982 def sortclonebundleentries(ui, entries):
1983 1983 prefers = ui.configlist('ui', 'clonebundleprefers', default=[])
1984 1984 if not prefers:
1985 1985 return list(entries)
1986 1986
1987 1987 prefers = [p.split('=', 1) for p in prefers]
1988 1988
1989 1989 items = sorted(clonebundleentry(v, prefers) for v in entries)
1990 1990 return [i.value for i in items]
1991 1991
1992 1992 def trypullbundlefromurl(ui, repo, url):
1993 1993 """Attempt to apply a bundle from a URL."""
1994 1994 with repo.lock(), repo.transaction('bundleurl') as tr:
1995 1995 try:
1996 1996 fh = urlmod.open(ui, url)
1997 1997 cg = readbundle(ui, fh, 'stream')
1998 1998
1999 1999 if isinstance(cg, bundle2.unbundle20):
2000 bundle2.processbundle(repo, cg, lambda: tr)
2000 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2001 2001 elif isinstance(cg, streamclone.streamcloneapplier):
2002 2002 cg.apply(repo)
2003 2003 else:
2004 2004 cg.apply(repo, tr, 'clonebundles', url)
2005 2005 return True
2006 2006 except urlerr.httperror as e:
2007 2007 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2008 2008 except urlerr.urlerror as e:
2009 2009 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2010 2010
2011 2011 return False
General Comments 0
You need to be logged in to leave comments. Login now