##// END OF EJS Templates
push: move bundle2-pushkey based bookmarks exchange in its own function...
Boris Feld -
r35263:3fd5f05a default
parent child Browse files
Show More
@@ -1,2137 +1,2139 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import errno
12 12 import hashlib
13 13
14 14 from .i18n import _
15 15 from .node import (
16 16 bin,
17 17 hex,
18 18 nullid,
19 19 )
20 20 from . import (
21 21 bookmarks as bookmod,
22 22 bundle2,
23 23 changegroup,
24 24 discovery,
25 25 error,
26 26 lock as lockmod,
27 27 obsolete,
28 28 phases,
29 29 pushkey,
30 30 pycompat,
31 31 remotenames,
32 32 scmutil,
33 33 sslutil,
34 34 streamclone,
35 35 url as urlmod,
36 36 util,
37 37 )
38 38
39 39 urlerr = util.urlerr
40 40 urlreq = util.urlreq
41 41
42 42 # Maps bundle version human names to changegroup versions.
43 43 _bundlespeccgversions = {'v1': '01',
44 44 'v2': '02',
45 45 'packed1': 's1',
46 46 'bundle2': '02', #legacy
47 47 }
48 48
49 49 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
50 50 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
51 51
52 52 def parsebundlespec(repo, spec, strict=True, externalnames=False):
53 53 """Parse a bundle string specification into parts.
54 54
55 55 Bundle specifications denote a well-defined bundle/exchange format.
56 56 The content of a given specification should not change over time in
57 57 order to ensure that bundles produced by a newer version of Mercurial are
58 58 readable from an older version.
59 59
60 60 The string currently has the form:
61 61
62 62 <compression>-<type>[;<parameter0>[;<parameter1>]]
63 63
64 64 Where <compression> is one of the supported compression formats
65 65 and <type> is (currently) a version string. A ";" can follow the type and
66 66 all text afterwards is interpreted as URI encoded, ";" delimited key=value
67 67 pairs.
68 68
69 69 If ``strict`` is True (the default) <compression> is required. Otherwise,
70 70 it is optional.
71 71
72 72 If ``externalnames`` is False (the default), the human-centric names will
73 73 be converted to their internal representation.
74 74
75 75 Returns a 3-tuple of (compression, version, parameters). Compression will
76 76 be ``None`` if not in strict mode and a compression isn't defined.
77 77
78 78 An ``InvalidBundleSpecification`` is raised when the specification is
79 79 not syntactically well formed.
80 80
81 81 An ``UnsupportedBundleSpecification`` is raised when the compression or
82 82 bundle type/version is not recognized.
83 83
84 84 Note: this function will likely eventually return a more complex data
85 85 structure, including bundle2 part information.
86 86 """
87 87 def parseparams(s):
88 88 if ';' not in s:
89 89 return s, {}
90 90
91 91 params = {}
92 92 version, paramstr = s.split(';', 1)
93 93
94 94 for p in paramstr.split(';'):
95 95 if '=' not in p:
96 96 raise error.InvalidBundleSpecification(
97 97 _('invalid bundle specification: '
98 98 'missing "=" in parameter: %s') % p)
99 99
100 100 key, value = p.split('=', 1)
101 101 key = urlreq.unquote(key)
102 102 value = urlreq.unquote(value)
103 103 params[key] = value
104 104
105 105 return version, params
106 106
107 107
108 108 if strict and '-' not in spec:
109 109 raise error.InvalidBundleSpecification(
110 110 _('invalid bundle specification; '
111 111 'must be prefixed with compression: %s') % spec)
112 112
113 113 if '-' in spec:
114 114 compression, version = spec.split('-', 1)
115 115
116 116 if compression not in util.compengines.supportedbundlenames:
117 117 raise error.UnsupportedBundleSpecification(
118 118 _('%s compression is not supported') % compression)
119 119
120 120 version, params = parseparams(version)
121 121
122 122 if version not in _bundlespeccgversions:
123 123 raise error.UnsupportedBundleSpecification(
124 124 _('%s is not a recognized bundle version') % version)
125 125 else:
126 126 # Value could be just the compression or just the version, in which
127 127 # case some defaults are assumed (but only when not in strict mode).
128 128 assert not strict
129 129
130 130 spec, params = parseparams(spec)
131 131
132 132 if spec in util.compengines.supportedbundlenames:
133 133 compression = spec
134 134 version = 'v1'
135 135 # Generaldelta repos require v2.
136 136 if 'generaldelta' in repo.requirements:
137 137 version = 'v2'
138 138 # Modern compression engines require v2.
139 139 if compression not in _bundlespecv1compengines:
140 140 version = 'v2'
141 141 elif spec in _bundlespeccgversions:
142 142 if spec == 'packed1':
143 143 compression = 'none'
144 144 else:
145 145 compression = 'bzip2'
146 146 version = spec
147 147 else:
148 148 raise error.UnsupportedBundleSpecification(
149 149 _('%s is not a recognized bundle specification') % spec)
150 150
151 151 # Bundle version 1 only supports a known set of compression engines.
152 152 if version == 'v1' and compression not in _bundlespecv1compengines:
153 153 raise error.UnsupportedBundleSpecification(
154 154 _('compression engine %s is not supported on v1 bundles') %
155 155 compression)
156 156
157 157 # The specification for packed1 can optionally declare the data formats
158 158 # required to apply it. If we see this metadata, compare against what the
159 159 # repo supports and error if the bundle isn't compatible.
160 160 if version == 'packed1' and 'requirements' in params:
161 161 requirements = set(params['requirements'].split(','))
162 162 missingreqs = requirements - repo.supportedformats
163 163 if missingreqs:
164 164 raise error.UnsupportedBundleSpecification(
165 165 _('missing support for repository features: %s') %
166 166 ', '.join(sorted(missingreqs)))
167 167
168 168 if not externalnames:
169 169 engine = util.compengines.forbundlename(compression)
170 170 compression = engine.bundletype()[1]
171 171 version = _bundlespeccgversions[version]
172 172 return compression, version, params
173 173
174 174 def readbundle(ui, fh, fname, vfs=None):
175 175 header = changegroup.readexactly(fh, 4)
176 176
177 177 alg = None
178 178 if not fname:
179 179 fname = "stream"
180 180 if not header.startswith('HG') and header.startswith('\0'):
181 181 fh = changegroup.headerlessfixup(fh, header)
182 182 header = "HG10"
183 183 alg = 'UN'
184 184 elif vfs:
185 185 fname = vfs.join(fname)
186 186
187 187 magic, version = header[0:2], header[2:4]
188 188
189 189 if magic != 'HG':
190 190 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
191 191 if version == '10':
192 192 if alg is None:
193 193 alg = changegroup.readexactly(fh, 2)
194 194 return changegroup.cg1unpacker(fh, alg)
195 195 elif version.startswith('2'):
196 196 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
197 197 elif version == 'S1':
198 198 return streamclone.streamcloneapplier(fh)
199 199 else:
200 200 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
201 201
202 202 def getbundlespec(ui, fh):
203 203 """Infer the bundlespec from a bundle file handle.
204 204
205 205 The input file handle is seeked and the original seek position is not
206 206 restored.
207 207 """
208 208 def speccompression(alg):
209 209 try:
210 210 return util.compengines.forbundletype(alg).bundletype()[0]
211 211 except KeyError:
212 212 return None
213 213
214 214 b = readbundle(ui, fh, None)
215 215 if isinstance(b, changegroup.cg1unpacker):
216 216 alg = b._type
217 217 if alg == '_truncatedBZ':
218 218 alg = 'BZ'
219 219 comp = speccompression(alg)
220 220 if not comp:
221 221 raise error.Abort(_('unknown compression algorithm: %s') % alg)
222 222 return '%s-v1' % comp
223 223 elif isinstance(b, bundle2.unbundle20):
224 224 if 'Compression' in b.params:
225 225 comp = speccompression(b.params['Compression'])
226 226 if not comp:
227 227 raise error.Abort(_('unknown compression algorithm: %s') % comp)
228 228 else:
229 229 comp = 'none'
230 230
231 231 version = None
232 232 for part in b.iterparts():
233 233 if part.type == 'changegroup':
234 234 version = part.params['version']
235 235 if version in ('01', '02'):
236 236 version = 'v2'
237 237 else:
238 238 raise error.Abort(_('changegroup version %s does not have '
239 239 'a known bundlespec') % version,
240 240 hint=_('try upgrading your Mercurial '
241 241 'client'))
242 242
243 243 if not version:
244 244 raise error.Abort(_('could not identify changegroup version in '
245 245 'bundle'))
246 246
247 247 return '%s-%s' % (comp, version)
248 248 elif isinstance(b, streamclone.streamcloneapplier):
249 249 requirements = streamclone.readbundle1header(fh)[2]
250 250 params = 'requirements=%s' % ','.join(sorted(requirements))
251 251 return 'none-packed1;%s' % urlreq.quote(params)
252 252 else:
253 253 raise error.Abort(_('unknown bundle type: %s') % b)
254 254
255 255 def _computeoutgoing(repo, heads, common):
256 256 """Computes which revs are outgoing given a set of common
257 257 and a set of heads.
258 258
259 259 This is a separate function so extensions can have access to
260 260 the logic.
261 261
262 262 Returns a discovery.outgoing object.
263 263 """
264 264 cl = repo.changelog
265 265 if common:
266 266 hasnode = cl.hasnode
267 267 common = [n for n in common if hasnode(n)]
268 268 else:
269 269 common = [nullid]
270 270 if not heads:
271 271 heads = cl.heads()
272 272 return discovery.outgoing(repo, common, heads)
273 273
274 274 def _forcebundle1(op):
275 275 """return true if a pull/push must use bundle1
276 276
277 277 This function is used to allow testing of the older bundle version"""
278 278 ui = op.repo.ui
279 279 forcebundle1 = False
280 280 # The goal is this config is to allow developer to choose the bundle
281 281 # version used during exchanged. This is especially handy during test.
282 282 # Value is a list of bundle version to be picked from, highest version
283 283 # should be used.
284 284 #
285 285 # developer config: devel.legacy.exchange
286 286 exchange = ui.configlist('devel', 'legacy.exchange')
287 287 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
288 288 return forcebundle1 or not op.remote.capable('bundle2')
289 289
290 290 class pushoperation(object):
291 291 """A object that represent a single push operation
292 292
293 293 Its purpose is to carry push related state and very common operations.
294 294
295 295 A new pushoperation should be created at the beginning of each push and
296 296 discarded afterward.
297 297 """
298 298
299 299 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
300 300 bookmarks=(), pushvars=None):
301 301 # repo we push from
302 302 self.repo = repo
303 303 self.ui = repo.ui
304 304 # repo we push to
305 305 self.remote = remote
306 306 # force option provided
307 307 self.force = force
308 308 # revs to be pushed (None is "all")
309 309 self.revs = revs
310 310 # bookmark explicitly pushed
311 311 self.bookmarks = bookmarks
312 312 # allow push of new branch
313 313 self.newbranch = newbranch
314 314 # step already performed
315 315 # (used to check what steps have been already performed through bundle2)
316 316 self.stepsdone = set()
317 317 # Integer version of the changegroup push result
318 318 # - None means nothing to push
319 319 # - 0 means HTTP error
320 320 # - 1 means we pushed and remote head count is unchanged *or*
321 321 # we have outgoing changesets but refused to push
322 322 # - other values as described by addchangegroup()
323 323 self.cgresult = None
324 324 # Boolean value for the bookmark push
325 325 self.bkresult = None
326 326 # discover.outgoing object (contains common and outgoing data)
327 327 self.outgoing = None
328 328 # all remote topological heads before the push
329 329 self.remoteheads = None
330 330 # Details of the remote branch pre and post push
331 331 #
332 332 # mapping: {'branch': ([remoteheads],
333 333 # [newheads],
334 334 # [unsyncedheads],
335 335 # [discardedheads])}
336 336 # - branch: the branch name
337 337 # - remoteheads: the list of remote heads known locally
338 338 # None if the branch is new
339 339 # - newheads: the new remote heads (known locally) with outgoing pushed
340 340 # - unsyncedheads: the list of remote heads unknown locally.
341 341 # - discardedheads: the list of remote heads made obsolete by the push
342 342 self.pushbranchmap = None
343 343 # testable as a boolean indicating if any nodes are missing locally.
344 344 self.incoming = None
345 345 # summary of the remote phase situation
346 346 self.remotephases = None
347 347 # phases changes that must be pushed along side the changesets
348 348 self.outdatedphases = None
349 349 # phases changes that must be pushed if changeset push fails
350 350 self.fallbackoutdatedphases = None
351 351 # outgoing obsmarkers
352 352 self.outobsmarkers = set()
353 353 # outgoing bookmarks
354 354 self.outbookmarks = []
355 355 # transaction manager
356 356 self.trmanager = None
357 357 # map { pushkey partid -> callback handling failure}
358 358 # used to handle exception from mandatory pushkey part failure
359 359 self.pkfailcb = {}
360 360 # an iterable of pushvars or None
361 361 self.pushvars = pushvars
362 362
363 363 @util.propertycache
364 364 def futureheads(self):
365 365 """future remote heads if the changeset push succeeds"""
366 366 return self.outgoing.missingheads
367 367
368 368 @util.propertycache
369 369 def fallbackheads(self):
370 370 """future remote heads if the changeset push fails"""
371 371 if self.revs is None:
372 372 # not target to push, all common are relevant
373 373 return self.outgoing.commonheads
374 374 unfi = self.repo.unfiltered()
375 375 # I want cheads = heads(::missingheads and ::commonheads)
376 376 # (missingheads is revs with secret changeset filtered out)
377 377 #
378 378 # This can be expressed as:
379 379 # cheads = ( (missingheads and ::commonheads)
380 380 # + (commonheads and ::missingheads))"
381 381 # )
382 382 #
383 383 # while trying to push we already computed the following:
384 384 # common = (::commonheads)
385 385 # missing = ((commonheads::missingheads) - commonheads)
386 386 #
387 387 # We can pick:
388 388 # * missingheads part of common (::commonheads)
389 389 common = self.outgoing.common
390 390 nm = self.repo.changelog.nodemap
391 391 cheads = [node for node in self.revs if nm[node] in common]
392 392 # and
393 393 # * commonheads parents on missing
394 394 revset = unfi.set('%ln and parents(roots(%ln))',
395 395 self.outgoing.commonheads,
396 396 self.outgoing.missing)
397 397 cheads.extend(c.node() for c in revset)
398 398 return cheads
399 399
400 400 @property
401 401 def commonheads(self):
402 402 """set of all common heads after changeset bundle push"""
403 403 if self.cgresult:
404 404 return self.futureheads
405 405 else:
406 406 return self.fallbackheads
407 407
408 408 # mapping of message used when pushing bookmark
409 409 bookmsgmap = {'update': (_("updating bookmark %s\n"),
410 410 _('updating bookmark %s failed!\n')),
411 411 'export': (_("exporting bookmark %s\n"),
412 412 _('exporting bookmark %s failed!\n')),
413 413 'delete': (_("deleting remote bookmark %s\n"),
414 414 _('deleting remote bookmark %s failed!\n')),
415 415 }
416 416
417 417
418 418 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
419 419 opargs=None):
420 420 '''Push outgoing changesets (limited by revs) from a local
421 421 repository to remote. Return an integer:
422 422 - None means nothing to push
423 423 - 0 means HTTP error
424 424 - 1 means we pushed and remote head count is unchanged *or*
425 425 we have outgoing changesets but refused to push
426 426 - other values as described by addchangegroup()
427 427 '''
428 428 if opargs is None:
429 429 opargs = {}
430 430 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
431 431 **pycompat.strkwargs(opargs))
432 432 if pushop.remote.local():
433 433 missing = (set(pushop.repo.requirements)
434 434 - pushop.remote.local().supported)
435 435 if missing:
436 436 msg = _("required features are not"
437 437 " supported in the destination:"
438 438 " %s") % (', '.join(sorted(missing)))
439 439 raise error.Abort(msg)
440 440
441 441 if not pushop.remote.canpush():
442 442 raise error.Abort(_("destination does not support push"))
443 443
444 444 if not pushop.remote.capable('unbundle'):
445 445 raise error.Abort(_('cannot push: destination does not support the '
446 446 'unbundle wire protocol command'))
447 447
448 448 # get lock as we might write phase data
449 449 wlock = lock = None
450 450 try:
451 451 # bundle2 push may receive a reply bundle touching bookmarks or other
452 452 # things requiring the wlock. Take it now to ensure proper ordering.
453 453 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
454 454 if (not _forcebundle1(pushop)) and maypushback:
455 455 wlock = pushop.repo.wlock()
456 456 lock = pushop.repo.lock()
457 457 pushop.trmanager = transactionmanager(pushop.repo,
458 458 'push-response',
459 459 pushop.remote.url())
460 460 except IOError as err:
461 461 if err.errno != errno.EACCES:
462 462 raise
463 463 # source repo cannot be locked.
464 464 # We do not abort the push, but just disable the local phase
465 465 # synchronisation.
466 466 msg = 'cannot lock source repository: %s\n' % err
467 467 pushop.ui.debug(msg)
468 468
469 469 with wlock or util.nullcontextmanager(), \
470 470 lock or util.nullcontextmanager(), \
471 471 pushop.trmanager or util.nullcontextmanager():
472 472 pushop.repo.checkpush(pushop)
473 473 _pushdiscovery(pushop)
474 474 if not _forcebundle1(pushop):
475 475 _pushbundle2(pushop)
476 476 _pushchangeset(pushop)
477 477 _pushsyncphase(pushop)
478 478 _pushobsolete(pushop)
479 479 _pushbookmark(pushop)
480 480
481 481 return pushop
482 482
483 483 # list of steps to perform discovery before push
484 484 pushdiscoveryorder = []
485 485
486 486 # Mapping between step name and function
487 487 #
488 488 # This exists to help extensions wrap steps if necessary
489 489 pushdiscoverymapping = {}
490 490
491 491 def pushdiscovery(stepname):
492 492 """decorator for function performing discovery before push
493 493
494 494 The function is added to the step -> function mapping and appended to the
495 495 list of steps. Beware that decorated function will be added in order (this
496 496 may matter).
497 497
498 498 You can only use this decorator for a new step, if you want to wrap a step
499 499 from an extension, change the pushdiscovery dictionary directly."""
500 500 def dec(func):
501 501 assert stepname not in pushdiscoverymapping
502 502 pushdiscoverymapping[stepname] = func
503 503 pushdiscoveryorder.append(stepname)
504 504 return func
505 505 return dec
506 506
507 507 def _pushdiscovery(pushop):
508 508 """Run all discovery steps"""
509 509 for stepname in pushdiscoveryorder:
510 510 step = pushdiscoverymapping[stepname]
511 511 step(pushop)
512 512
513 513 @pushdiscovery('changeset')
514 514 def _pushdiscoverychangeset(pushop):
515 515 """discover the changeset that need to be pushed"""
516 516 fci = discovery.findcommonincoming
517 517 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
518 518 common, inc, remoteheads = commoninc
519 519 fco = discovery.findcommonoutgoing
520 520 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
521 521 commoninc=commoninc, force=pushop.force)
522 522 pushop.outgoing = outgoing
523 523 pushop.remoteheads = remoteheads
524 524 pushop.incoming = inc
525 525
526 526 @pushdiscovery('phase')
527 527 def _pushdiscoveryphase(pushop):
528 528 """discover the phase that needs to be pushed
529 529
530 530 (computed for both success and failure case for changesets push)"""
531 531 outgoing = pushop.outgoing
532 532 unfi = pushop.repo.unfiltered()
533 533 remotephases = pushop.remote.listkeys('phases')
534 534 if (pushop.ui.configbool('ui', '_usedassubrepo')
535 535 and remotephases # server supports phases
536 536 and not pushop.outgoing.missing # no changesets to be pushed
537 537 and remotephases.get('publishing', False)):
538 538 # When:
539 539 # - this is a subrepo push
540 540 # - and remote support phase
541 541 # - and no changeset are to be pushed
542 542 # - and remote is publishing
543 543 # We may be in issue 3781 case!
544 544 # We drop the possible phase synchronisation done by
545 545 # courtesy to publish changesets possibly locally draft
546 546 # on the remote.
547 547 pushop.outdatedphases = []
548 548 pushop.fallbackoutdatedphases = []
549 549 return
550 550
551 551 pushop.remotephases = phases.remotephasessummary(pushop.repo,
552 552 pushop.fallbackheads,
553 553 remotephases)
554 554 droots = pushop.remotephases.draftroots
555 555
556 556 extracond = ''
557 557 if not pushop.remotephases.publishing:
558 558 extracond = ' and public()'
559 559 revset = 'heads((%%ln::%%ln) %s)' % extracond
560 560 # Get the list of all revs draft on remote by public here.
561 561 # XXX Beware that revset break if droots is not strictly
562 562 # XXX root we may want to ensure it is but it is costly
563 563 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
564 564 if not outgoing.missing:
565 565 future = fallback
566 566 else:
567 567 # adds changeset we are going to push as draft
568 568 #
569 569 # should not be necessary for publishing server, but because of an
570 570 # issue fixed in xxxxx we have to do it anyway.
571 571 fdroots = list(unfi.set('roots(%ln + %ln::)',
572 572 outgoing.missing, droots))
573 573 fdroots = [f.node() for f in fdroots]
574 574 future = list(unfi.set(revset, fdroots, pushop.futureheads))
575 575 pushop.outdatedphases = future
576 576 pushop.fallbackoutdatedphases = fallback
577 577
578 578 @pushdiscovery('obsmarker')
579 579 def _pushdiscoveryobsmarkers(pushop):
580 580 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
581 581 and pushop.repo.obsstore
582 582 and 'obsolete' in pushop.remote.listkeys('namespaces')):
583 583 repo = pushop.repo
584 584 # very naive computation, that can be quite expensive on big repo.
585 585 # However: evolution is currently slow on them anyway.
586 586 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
587 587 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
588 588
589 589 @pushdiscovery('bookmarks')
590 590 def _pushdiscoverybookmarks(pushop):
591 591 ui = pushop.ui
592 592 repo = pushop.repo.unfiltered()
593 593 remote = pushop.remote
594 594 ui.debug("checking for updated bookmarks\n")
595 595 ancestors = ()
596 596 if pushop.revs:
597 597 revnums = map(repo.changelog.rev, pushop.revs)
598 598 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
599 599 remotebookmark = remote.listkeys('bookmarks')
600 600
601 601 explicit = set([repo._bookmarks.expandname(bookmark)
602 602 for bookmark in pushop.bookmarks])
603 603
604 604 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
605 605 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
606 606
607 607 def safehex(x):
608 608 if x is None:
609 609 return x
610 610 return hex(x)
611 611
612 612 def hexifycompbookmarks(bookmarks):
613 613 for b, scid, dcid in bookmarks:
614 614 yield b, safehex(scid), safehex(dcid)
615 615
616 616 comp = [hexifycompbookmarks(marks) for marks in comp]
617 617 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
618 618
619 619 for b, scid, dcid in advsrc:
620 620 if b in explicit:
621 621 explicit.remove(b)
622 622 if not ancestors or repo[scid].rev() in ancestors:
623 623 pushop.outbookmarks.append((b, dcid, scid))
624 624 # search added bookmark
625 625 for b, scid, dcid in addsrc:
626 626 if b in explicit:
627 627 explicit.remove(b)
628 628 pushop.outbookmarks.append((b, '', scid))
629 629 # search for overwritten bookmark
630 630 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
631 631 if b in explicit:
632 632 explicit.remove(b)
633 633 pushop.outbookmarks.append((b, dcid, scid))
634 634 # search for bookmark to delete
635 635 for b, scid, dcid in adddst:
636 636 if b in explicit:
637 637 explicit.remove(b)
638 638 # treat as "deleted locally"
639 639 pushop.outbookmarks.append((b, dcid, ''))
640 640 # identical bookmarks shouldn't get reported
641 641 for b, scid, dcid in same:
642 642 if b in explicit:
643 643 explicit.remove(b)
644 644
645 645 if explicit:
646 646 explicit = sorted(explicit)
647 647 # we should probably list all of them
648 648 ui.warn(_('bookmark %s does not exist on the local '
649 649 'or remote repository!\n') % explicit[0])
650 650 pushop.bkresult = 2
651 651
652 652 pushop.outbookmarks.sort()
653 653
654 654 def _pushcheckoutgoing(pushop):
655 655 outgoing = pushop.outgoing
656 656 unfi = pushop.repo.unfiltered()
657 657 if not outgoing.missing:
658 658 # nothing to push
659 659 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
660 660 return False
661 661 # something to push
662 662 if not pushop.force:
663 663 # if repo.obsstore == False --> no obsolete
664 664 # then, save the iteration
665 665 if unfi.obsstore:
666 666 # this message are here for 80 char limit reason
667 667 mso = _("push includes obsolete changeset: %s!")
668 668 mspd = _("push includes phase-divergent changeset: %s!")
669 669 mscd = _("push includes content-divergent changeset: %s!")
670 670 mst = {"orphan": _("push includes orphan changeset: %s!"),
671 671 "phase-divergent": mspd,
672 672 "content-divergent": mscd}
673 673 # If we are to push if there is at least one
674 674 # obsolete or unstable changeset in missing, at
675 675 # least one of the missinghead will be obsolete or
676 676 # unstable. So checking heads only is ok
677 677 for node in outgoing.missingheads:
678 678 ctx = unfi[node]
679 679 if ctx.obsolete():
680 680 raise error.Abort(mso % ctx)
681 681 elif ctx.isunstable():
682 682 # TODO print more than one instability in the abort
683 683 # message
684 684 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
685 685
686 686 discovery.checkheads(pushop)
687 687 return True
688 688
689 689 # List of names of steps to perform for an outgoing bundle2, order matters.
690 690 b2partsgenorder = []
691 691
692 692 # Mapping between step name and function
693 693 #
694 694 # This exists to help extensions wrap steps if necessary
695 695 b2partsgenmapping = {}
696 696
697 697 def b2partsgenerator(stepname, idx=None):
698 698 """decorator for function generating bundle2 part
699 699
700 700 The function is added to the step -> function mapping and appended to the
701 701 list of steps. Beware that decorated functions will be added in order
702 702 (this may matter).
703 703
704 704 You can only use this decorator for new steps, if you want to wrap a step
705 705 from an extension, attack the b2partsgenmapping dictionary directly."""
706 706 def dec(func):
707 707 assert stepname not in b2partsgenmapping
708 708 b2partsgenmapping[stepname] = func
709 709 if idx is None:
710 710 b2partsgenorder.append(stepname)
711 711 else:
712 712 b2partsgenorder.insert(idx, stepname)
713 713 return func
714 714 return dec
715 715
716 716 def _pushb2ctxcheckheads(pushop, bundler):
717 717 """Generate race condition checking parts
718 718
719 719 Exists as an independent function to aid extensions
720 720 """
721 721 # * 'force' do not check for push race,
722 722 # * if we don't push anything, there are nothing to check.
723 723 if not pushop.force and pushop.outgoing.missingheads:
724 724 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
725 725 emptyremote = pushop.pushbranchmap is None
726 726 if not allowunrelated or emptyremote:
727 727 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
728 728 else:
729 729 affected = set()
730 730 for branch, heads in pushop.pushbranchmap.iteritems():
731 731 remoteheads, newheads, unsyncedheads, discardedheads = heads
732 732 if remoteheads is not None:
733 733 remote = set(remoteheads)
734 734 affected |= set(discardedheads) & remote
735 735 affected |= remote - set(newheads)
736 736 if affected:
737 737 data = iter(sorted(affected))
738 738 bundler.newpart('check:updated-heads', data=data)
739 739
740 740 def _pushing(pushop):
741 741 """return True if we are pushing anything"""
742 742 return bool(pushop.outgoing.missing
743 743 or pushop.outdatedphases
744 744 or pushop.outobsmarkers
745 745 or pushop.outbookmarks)
746 746
747 747 @b2partsgenerator('check-bookmarks')
748 748 def _pushb2checkbookmarks(pushop, bundler):
749 749 """insert bookmark move checking"""
750 750 if not _pushing(pushop) or pushop.force:
751 751 return
752 752 b2caps = bundle2.bundle2caps(pushop.remote)
753 753 hasbookmarkcheck = 'bookmarks' in b2caps
754 754 if not (pushop.outbookmarks and hasbookmarkcheck):
755 755 return
756 756 data = []
757 757 for book, old, new in pushop.outbookmarks:
758 758 old = bin(old)
759 759 data.append((book, old))
760 760 checkdata = bookmod.binaryencode(data)
761 761 bundler.newpart('check:bookmarks', data=checkdata)
762 762
763 763 @b2partsgenerator('check-phases')
764 764 def _pushb2checkphases(pushop, bundler):
765 765 """insert phase move checking"""
766 766 if not _pushing(pushop) or pushop.force:
767 767 return
768 768 b2caps = bundle2.bundle2caps(pushop.remote)
769 769 hasphaseheads = 'heads' in b2caps.get('phases', ())
770 770 if pushop.remotephases is not None and hasphaseheads:
771 771 # check that the remote phase has not changed
772 772 checks = [[] for p in phases.allphases]
773 773 checks[phases.public].extend(pushop.remotephases.publicheads)
774 774 checks[phases.draft].extend(pushop.remotephases.draftroots)
775 775 if any(checks):
776 776 for nodes in checks:
777 777 nodes.sort()
778 778 checkdata = phases.binaryencode(checks)
779 779 bundler.newpart('check:phases', data=checkdata)
780 780
781 781 @b2partsgenerator('changeset')
782 782 def _pushb2ctx(pushop, bundler):
783 783 """handle changegroup push through bundle2
784 784
785 785 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
786 786 """
787 787 if 'changesets' in pushop.stepsdone:
788 788 return
789 789 pushop.stepsdone.add('changesets')
790 790 # Send known heads to the server for race detection.
791 791 if not _pushcheckoutgoing(pushop):
792 792 return
793 793 pushop.repo.prepushoutgoinghooks(pushop)
794 794
795 795 _pushb2ctxcheckheads(pushop, bundler)
796 796
797 797 b2caps = bundle2.bundle2caps(pushop.remote)
798 798 version = '01'
799 799 cgversions = b2caps.get('changegroup')
800 800 if cgversions: # 3.1 and 3.2 ship with an empty value
801 801 cgversions = [v for v in cgversions
802 802 if v in changegroup.supportedoutgoingversions(
803 803 pushop.repo)]
804 804 if not cgversions:
805 805 raise ValueError(_('no common changegroup version'))
806 806 version = max(cgversions)
807 807 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
808 808 'push')
809 809 cgpart = bundler.newpart('changegroup', data=cgstream)
810 810 if cgversions:
811 811 cgpart.addparam('version', version)
812 812 if 'treemanifest' in pushop.repo.requirements:
813 813 cgpart.addparam('treemanifest', '1')
814 814 def handlereply(op):
815 815 """extract addchangegroup returns from server reply"""
816 816 cgreplies = op.records.getreplies(cgpart.id)
817 817 assert len(cgreplies['changegroup']) == 1
818 818 pushop.cgresult = cgreplies['changegroup'][0]['return']
819 819 return handlereply
820 820
821 821 @b2partsgenerator('phase')
822 822 def _pushb2phases(pushop, bundler):
823 823 """handle phase push through bundle2"""
824 824 if 'phases' in pushop.stepsdone:
825 825 return
826 826 b2caps = bundle2.bundle2caps(pushop.remote)
827 827 ui = pushop.repo.ui
828 828
829 829 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
830 830 haspushkey = 'pushkey' in b2caps
831 831 hasphaseheads = 'heads' in b2caps.get('phases', ())
832 832
833 833 if hasphaseheads and not legacyphase:
834 834 return _pushb2phaseheads(pushop, bundler)
835 835 elif haspushkey:
836 836 return _pushb2phasespushkey(pushop, bundler)
837 837
838 838 def _pushb2phaseheads(pushop, bundler):
839 839 """push phase information through a bundle2 - binary part"""
840 840 pushop.stepsdone.add('phases')
841 841 if pushop.outdatedphases:
842 842 updates = [[] for p in phases.allphases]
843 843 updates[0].extend(h.node() for h in pushop.outdatedphases)
844 844 phasedata = phases.binaryencode(updates)
845 845 bundler.newpart('phase-heads', data=phasedata)
846 846
847 847 def _pushb2phasespushkey(pushop, bundler):
848 848 """push phase information through a bundle2 - pushkey part"""
849 849 pushop.stepsdone.add('phases')
850 850 part2node = []
851 851
852 852 def handlefailure(pushop, exc):
853 853 targetid = int(exc.partid)
854 854 for partid, node in part2node:
855 855 if partid == targetid:
856 856 raise error.Abort(_('updating %s to public failed') % node)
857 857
858 858 enc = pushkey.encode
859 859 for newremotehead in pushop.outdatedphases:
860 860 part = bundler.newpart('pushkey')
861 861 part.addparam('namespace', enc('phases'))
862 862 part.addparam('key', enc(newremotehead.hex()))
863 863 part.addparam('old', enc('%d' % phases.draft))
864 864 part.addparam('new', enc('%d' % phases.public))
865 865 part2node.append((part.id, newremotehead))
866 866 pushop.pkfailcb[part.id] = handlefailure
867 867
868 868 def handlereply(op):
869 869 for partid, node in part2node:
870 870 partrep = op.records.getreplies(partid)
871 871 results = partrep['pushkey']
872 872 assert len(results) <= 1
873 873 msg = None
874 874 if not results:
875 875 msg = _('server ignored update of %s to public!\n') % node
876 876 elif not int(results[0]['return']):
877 877 msg = _('updating %s to public failed!\n') % node
878 878 if msg is not None:
879 879 pushop.ui.warn(msg)
880 880 return handlereply
881 881
882 882 @b2partsgenerator('obsmarkers')
883 883 def _pushb2obsmarkers(pushop, bundler):
884 884 if 'obsmarkers' in pushop.stepsdone:
885 885 return
886 886 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
887 887 if obsolete.commonversion(remoteversions) is None:
888 888 return
889 889 pushop.stepsdone.add('obsmarkers')
890 890 if pushop.outobsmarkers:
891 891 markers = sorted(pushop.outobsmarkers)
892 892 bundle2.buildobsmarkerspart(bundler, markers)
893 893
894 894 @b2partsgenerator('bookmarks')
895 895 def _pushb2bookmarks(pushop, bundler):
896 896 """handle bookmark push through bundle2"""
897 897 if 'bookmarks' in pushop.stepsdone:
898 898 return
899 899 b2caps = bundle2.bundle2caps(pushop.remote)
900 if 'pushkey' not in b2caps:
901 return
900 if 'pushkey' in b2caps:
901 return _pushb2bookmarkspushkey(pushop, bundler)
902
903 def _pushb2bookmarkspushkey(pushop, bundler):
902 904 pushop.stepsdone.add('bookmarks')
903 905 part2book = []
904 906 enc = pushkey.encode
905 907
906 908 def handlefailure(pushop, exc):
907 909 targetid = int(exc.partid)
908 910 for partid, book, action in part2book:
909 911 if partid == targetid:
910 912 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
911 913 # we should not be called for part we did not generated
912 914 assert False
913 915
914 916 for book, old, new in pushop.outbookmarks:
915 917 part = bundler.newpart('pushkey')
916 918 part.addparam('namespace', enc('bookmarks'))
917 919 part.addparam('key', enc(book))
918 920 part.addparam('old', enc(old))
919 921 part.addparam('new', enc(new))
920 922 action = 'update'
921 923 if not old:
922 924 action = 'export'
923 925 elif not new:
924 926 action = 'delete'
925 927 part2book.append((part.id, book, action))
926 928 pushop.pkfailcb[part.id] = handlefailure
927 929
928 930 def handlereply(op):
929 931 ui = pushop.ui
930 932 for partid, book, action in part2book:
931 933 partrep = op.records.getreplies(partid)
932 934 results = partrep['pushkey']
933 935 assert len(results) <= 1
934 936 if not results:
935 937 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
936 938 else:
937 939 ret = int(results[0]['return'])
938 940 if ret:
939 941 ui.status(bookmsgmap[action][0] % book)
940 942 else:
941 943 ui.warn(bookmsgmap[action][1] % book)
942 944 if pushop.bkresult is not None:
943 945 pushop.bkresult = 1
944 946 return handlereply
945 947
946 948 @b2partsgenerator('pushvars', idx=0)
947 949 def _getbundlesendvars(pushop, bundler):
948 950 '''send shellvars via bundle2'''
949 951 pushvars = pushop.pushvars
950 952 if pushvars:
951 953 shellvars = {}
952 954 for raw in pushvars:
953 955 if '=' not in raw:
954 956 msg = ("unable to parse variable '%s', should follow "
955 957 "'KEY=VALUE' or 'KEY=' format")
956 958 raise error.Abort(msg % raw)
957 959 k, v = raw.split('=', 1)
958 960 shellvars[k] = v
959 961
960 962 part = bundler.newpart('pushvars')
961 963
962 964 for key, value in shellvars.iteritems():
963 965 part.addparam(key, value, mandatory=False)
964 966
965 967 def _pushbundle2(pushop):
966 968 """push data to the remote using bundle2
967 969
968 970 The only currently supported type of data is changegroup but this will
969 971 evolve in the future."""
970 972 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
971 973 pushback = (pushop.trmanager
972 974 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
973 975
974 976 # create reply capability
975 977 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
976 978 allowpushback=pushback))
977 979 bundler.newpart('replycaps', data=capsblob)
978 980 replyhandlers = []
979 981 for partgenname in b2partsgenorder:
980 982 partgen = b2partsgenmapping[partgenname]
981 983 ret = partgen(pushop, bundler)
982 984 if callable(ret):
983 985 replyhandlers.append(ret)
984 986 # do not push if nothing to push
985 987 if bundler.nbparts <= 1:
986 988 return
987 989 stream = util.chunkbuffer(bundler.getchunks())
988 990 try:
989 991 try:
990 992 reply = pushop.remote.unbundle(
991 993 stream, ['force'], pushop.remote.url())
992 994 except error.BundleValueError as exc:
993 995 raise error.Abort(_('missing support for %s') % exc)
994 996 try:
995 997 trgetter = None
996 998 if pushback:
997 999 trgetter = pushop.trmanager.transaction
998 1000 op = bundle2.processbundle(pushop.repo, reply, trgetter)
999 1001 except error.BundleValueError as exc:
1000 1002 raise error.Abort(_('missing support for %s') % exc)
1001 1003 except bundle2.AbortFromPart as exc:
1002 1004 pushop.ui.status(_('remote: %s\n') % exc)
1003 1005 if exc.hint is not None:
1004 1006 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1005 1007 raise error.Abort(_('push failed on remote'))
1006 1008 except error.PushkeyFailed as exc:
1007 1009 partid = int(exc.partid)
1008 1010 if partid not in pushop.pkfailcb:
1009 1011 raise
1010 1012 pushop.pkfailcb[partid](pushop, exc)
1011 1013 for rephand in replyhandlers:
1012 1014 rephand(op)
1013 1015
1014 1016 def _pushchangeset(pushop):
1015 1017 """Make the actual push of changeset bundle to remote repo"""
1016 1018 if 'changesets' in pushop.stepsdone:
1017 1019 return
1018 1020 pushop.stepsdone.add('changesets')
1019 1021 if not _pushcheckoutgoing(pushop):
1020 1022 return
1021 1023
1022 1024 # Should have verified this in push().
1023 1025 assert pushop.remote.capable('unbundle')
1024 1026
1025 1027 pushop.repo.prepushoutgoinghooks(pushop)
1026 1028 outgoing = pushop.outgoing
1027 1029 # TODO: get bundlecaps from remote
1028 1030 bundlecaps = None
1029 1031 # create a changegroup from local
1030 1032 if pushop.revs is None and not (outgoing.excluded
1031 1033 or pushop.repo.changelog.filteredrevs):
1032 1034 # push everything,
1033 1035 # use the fast path, no race possible on push
1034 1036 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1035 1037 fastpath=True, bundlecaps=bundlecaps)
1036 1038 else:
1037 1039 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1038 1040 'push', bundlecaps=bundlecaps)
1039 1041
1040 1042 # apply changegroup to remote
1041 1043 # local repo finds heads on server, finds out what
1042 1044 # revs it must push. once revs transferred, if server
1043 1045 # finds it has different heads (someone else won
1044 1046 # commit/push race), server aborts.
1045 1047 if pushop.force:
1046 1048 remoteheads = ['force']
1047 1049 else:
1048 1050 remoteheads = pushop.remoteheads
1049 1051 # ssh: return remote's addchangegroup()
1050 1052 # http: return remote's addchangegroup() or 0 for error
1051 1053 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1052 1054 pushop.repo.url())
1053 1055
1054 1056 def _pushsyncphase(pushop):
1055 1057 """synchronise phase information locally and remotely"""
1056 1058 cheads = pushop.commonheads
1057 1059 # even when we don't push, exchanging phase data is useful
1058 1060 remotephases = pushop.remote.listkeys('phases')
1059 1061 if (pushop.ui.configbool('ui', '_usedassubrepo')
1060 1062 and remotephases # server supports phases
1061 1063 and pushop.cgresult is None # nothing was pushed
1062 1064 and remotephases.get('publishing', False)):
1063 1065 # When:
1064 1066 # - this is a subrepo push
1065 1067 # - and remote support phase
1066 1068 # - and no changeset was pushed
1067 1069 # - and remote is publishing
1068 1070 # We may be in issue 3871 case!
1069 1071 # We drop the possible phase synchronisation done by
1070 1072 # courtesy to publish changesets possibly locally draft
1071 1073 # on the remote.
1072 1074 remotephases = {'publishing': 'True'}
1073 1075 if not remotephases: # old server or public only reply from non-publishing
1074 1076 _localphasemove(pushop, cheads)
1075 1077 # don't push any phase data as there is nothing to push
1076 1078 else:
1077 1079 ana = phases.analyzeremotephases(pushop.repo, cheads,
1078 1080 remotephases)
1079 1081 pheads, droots = ana
1080 1082 ### Apply remote phase on local
1081 1083 if remotephases.get('publishing', False):
1082 1084 _localphasemove(pushop, cheads)
1083 1085 else: # publish = False
1084 1086 _localphasemove(pushop, pheads)
1085 1087 _localphasemove(pushop, cheads, phases.draft)
1086 1088 ### Apply local phase on remote
1087 1089
1088 1090 if pushop.cgresult:
1089 1091 if 'phases' in pushop.stepsdone:
1090 1092 # phases already pushed though bundle2
1091 1093 return
1092 1094 outdated = pushop.outdatedphases
1093 1095 else:
1094 1096 outdated = pushop.fallbackoutdatedphases
1095 1097
1096 1098 pushop.stepsdone.add('phases')
1097 1099
1098 1100 # filter heads already turned public by the push
1099 1101 outdated = [c for c in outdated if c.node() not in pheads]
1100 1102 # fallback to independent pushkey command
1101 1103 for newremotehead in outdated:
1102 1104 r = pushop.remote.pushkey('phases',
1103 1105 newremotehead.hex(),
1104 1106 str(phases.draft),
1105 1107 str(phases.public))
1106 1108 if not r:
1107 1109 pushop.ui.warn(_('updating %s to public failed!\n')
1108 1110 % newremotehead)
1109 1111
1110 1112 def _localphasemove(pushop, nodes, phase=phases.public):
1111 1113 """move <nodes> to <phase> in the local source repo"""
1112 1114 if pushop.trmanager:
1113 1115 phases.advanceboundary(pushop.repo,
1114 1116 pushop.trmanager.transaction(),
1115 1117 phase,
1116 1118 nodes)
1117 1119 else:
1118 1120 # repo is not locked, do not change any phases!
1119 1121 # Informs the user that phases should have been moved when
1120 1122 # applicable.
1121 1123 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1122 1124 phasestr = phases.phasenames[phase]
1123 1125 if actualmoves:
1124 1126 pushop.ui.status(_('cannot lock source repo, skipping '
1125 1127 'local %s phase update\n') % phasestr)
1126 1128
1127 1129 def _pushobsolete(pushop):
1128 1130 """utility function to push obsolete markers to a remote"""
1129 1131 if 'obsmarkers' in pushop.stepsdone:
1130 1132 return
1131 1133 repo = pushop.repo
1132 1134 remote = pushop.remote
1133 1135 pushop.stepsdone.add('obsmarkers')
1134 1136 if pushop.outobsmarkers:
1135 1137 pushop.ui.debug('try to push obsolete markers to remote\n')
1136 1138 rslts = []
1137 1139 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1138 1140 for key in sorted(remotedata, reverse=True):
1139 1141 # reverse sort to ensure we end with dump0
1140 1142 data = remotedata[key]
1141 1143 rslts.append(remote.pushkey('obsolete', key, '', data))
1142 1144 if [r for r in rslts if not r]:
1143 1145 msg = _('failed to push some obsolete markers!\n')
1144 1146 repo.ui.warn(msg)
1145 1147
1146 1148 def _pushbookmark(pushop):
1147 1149 """Update bookmark position on remote"""
1148 1150 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1149 1151 return
1150 1152 pushop.stepsdone.add('bookmarks')
1151 1153 ui = pushop.ui
1152 1154 remote = pushop.remote
1153 1155
1154 1156 for b, old, new in pushop.outbookmarks:
1155 1157 action = 'update'
1156 1158 if not old:
1157 1159 action = 'export'
1158 1160 elif not new:
1159 1161 action = 'delete'
1160 1162 if remote.pushkey('bookmarks', b, old, new):
1161 1163 ui.status(bookmsgmap[action][0] % b)
1162 1164 else:
1163 1165 ui.warn(bookmsgmap[action][1] % b)
1164 1166 # discovery can have set the value form invalid entry
1165 1167 if pushop.bkresult is not None:
1166 1168 pushop.bkresult = 1
1167 1169
1168 1170 class pulloperation(object):
1169 1171 """A object that represent a single pull operation
1170 1172
1171 1173 It purpose is to carry pull related state and very common operation.
1172 1174
1173 1175 A new should be created at the beginning of each pull and discarded
1174 1176 afterward.
1175 1177 """
1176 1178
1177 1179 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1178 1180 remotebookmarks=None, streamclonerequested=None):
1179 1181 # repo we pull into
1180 1182 self.repo = repo
1181 1183 # repo we pull from
1182 1184 self.remote = remote
1183 1185 # revision we try to pull (None is "all")
1184 1186 self.heads = heads
1185 1187 # bookmark pulled explicitly
1186 1188 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1187 1189 for bookmark in bookmarks]
1188 1190 # do we force pull?
1189 1191 self.force = force
1190 1192 # whether a streaming clone was requested
1191 1193 self.streamclonerequested = streamclonerequested
1192 1194 # transaction manager
1193 1195 self.trmanager = None
1194 1196 # set of common changeset between local and remote before pull
1195 1197 self.common = None
1196 1198 # set of pulled head
1197 1199 self.rheads = None
1198 1200 # list of missing changeset to fetch remotely
1199 1201 self.fetch = None
1200 1202 # remote bookmarks data
1201 1203 self.remotebookmarks = remotebookmarks
1202 1204 # result of changegroup pulling (used as return code by pull)
1203 1205 self.cgresult = None
1204 1206 # list of step already done
1205 1207 self.stepsdone = set()
1206 1208 # Whether we attempted a clone from pre-generated bundles.
1207 1209 self.clonebundleattempted = False
1208 1210
1209 1211 @util.propertycache
1210 1212 def pulledsubset(self):
1211 1213 """heads of the set of changeset target by the pull"""
1212 1214 # compute target subset
1213 1215 if self.heads is None:
1214 1216 # We pulled every thing possible
1215 1217 # sync on everything common
1216 1218 c = set(self.common)
1217 1219 ret = list(self.common)
1218 1220 for n in self.rheads:
1219 1221 if n not in c:
1220 1222 ret.append(n)
1221 1223 return ret
1222 1224 else:
1223 1225 # We pulled a specific subset
1224 1226 # sync on this subset
1225 1227 return self.heads
1226 1228
1227 1229 @util.propertycache
1228 1230 def canusebundle2(self):
1229 1231 return not _forcebundle1(self)
1230 1232
1231 1233 @util.propertycache
1232 1234 def remotebundle2caps(self):
1233 1235 return bundle2.bundle2caps(self.remote)
1234 1236
1235 1237 def gettransaction(self):
1236 1238 # deprecated; talk to trmanager directly
1237 1239 return self.trmanager.transaction()
1238 1240
1239 1241 class transactionmanager(util.transactional):
1240 1242 """An object to manage the life cycle of a transaction
1241 1243
1242 1244 It creates the transaction on demand and calls the appropriate hooks when
1243 1245 closing the transaction."""
1244 1246 def __init__(self, repo, source, url):
1245 1247 self.repo = repo
1246 1248 self.source = source
1247 1249 self.url = url
1248 1250 self._tr = None
1249 1251
1250 1252 def transaction(self):
1251 1253 """Return an open transaction object, constructing if necessary"""
1252 1254 if not self._tr:
1253 1255 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1254 1256 self._tr = self.repo.transaction(trname)
1255 1257 self._tr.hookargs['source'] = self.source
1256 1258 self._tr.hookargs['url'] = self.url
1257 1259 return self._tr
1258 1260
1259 1261 def close(self):
1260 1262 """close transaction if created"""
1261 1263 if self._tr is not None:
1262 1264 self._tr.close()
1263 1265
1264 1266 def release(self):
1265 1267 """release transaction if created"""
1266 1268 if self._tr is not None:
1267 1269 self._tr.release()
1268 1270
1269 1271 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1270 1272 streamclonerequested=None):
1271 1273 """Fetch repository data from a remote.
1272 1274
1273 1275 This is the main function used to retrieve data from a remote repository.
1274 1276
1275 1277 ``repo`` is the local repository to clone into.
1276 1278 ``remote`` is a peer instance.
1277 1279 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1278 1280 default) means to pull everything from the remote.
1279 1281 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1280 1282 default, all remote bookmarks are pulled.
1281 1283 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1282 1284 initialization.
1283 1285 ``streamclonerequested`` is a boolean indicating whether a "streaming
1284 1286 clone" is requested. A "streaming clone" is essentially a raw file copy
1285 1287 of revlogs from the server. This only works when the local repository is
1286 1288 empty. The default value of ``None`` means to respect the server
1287 1289 configuration for preferring stream clones.
1288 1290
1289 1291 Returns the ``pulloperation`` created for this pull.
1290 1292 """
1291 1293 if opargs is None:
1292 1294 opargs = {}
1293 1295 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1294 1296 streamclonerequested=streamclonerequested, **opargs)
1295 1297
1296 1298 peerlocal = pullop.remote.local()
1297 1299 if peerlocal:
1298 1300 missing = set(peerlocal.requirements) - pullop.repo.supported
1299 1301 if missing:
1300 1302 msg = _("required features are not"
1301 1303 " supported in the destination:"
1302 1304 " %s") % (', '.join(sorted(missing)))
1303 1305 raise error.Abort(msg)
1304 1306
1305 1307 wlock = lock = None
1306 1308 try:
1307 1309 wlock = pullop.repo.wlock()
1308 1310 lock = pullop.repo.lock()
1309 1311 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1310 1312 # This should ideally be in _pullbundle2(). However, it needs to run
1311 1313 # before discovery to avoid extra work.
1312 1314 _maybeapplyclonebundle(pullop)
1313 1315 streamclone.maybeperformlegacystreamclone(pullop)
1314 1316 _pulldiscovery(pullop)
1315 1317 if pullop.canusebundle2:
1316 1318 _pullbundle2(pullop)
1317 1319 _pullchangeset(pullop)
1318 1320 _pullphase(pullop)
1319 1321 _pullbookmarks(pullop)
1320 1322 _pullobsolete(pullop)
1321 1323 pullop.trmanager.close()
1322 1324 finally:
1323 1325 lockmod.release(pullop.trmanager, lock, wlock)
1324 1326
1325 1327 # storing remotenames
1326 1328 if repo.ui.configbool('experimental', 'remotenames'):
1327 1329 remotenames.pullremotenames(repo, remote)
1328 1330
1329 1331 return pullop
1330 1332
1331 1333 # list of steps to perform discovery before pull
1332 1334 pulldiscoveryorder = []
1333 1335
1334 1336 # Mapping between step name and function
1335 1337 #
1336 1338 # This exists to help extensions wrap steps if necessary
1337 1339 pulldiscoverymapping = {}
1338 1340
1339 1341 def pulldiscovery(stepname):
1340 1342 """decorator for function performing discovery before pull
1341 1343
1342 1344 The function is added to the step -> function mapping and appended to the
1343 1345 list of steps. Beware that decorated function will be added in order (this
1344 1346 may matter).
1345 1347
1346 1348 You can only use this decorator for a new step, if you want to wrap a step
1347 1349 from an extension, change the pulldiscovery dictionary directly."""
1348 1350 def dec(func):
1349 1351 assert stepname not in pulldiscoverymapping
1350 1352 pulldiscoverymapping[stepname] = func
1351 1353 pulldiscoveryorder.append(stepname)
1352 1354 return func
1353 1355 return dec
1354 1356
1355 1357 def _pulldiscovery(pullop):
1356 1358 """Run all discovery steps"""
1357 1359 for stepname in pulldiscoveryorder:
1358 1360 step = pulldiscoverymapping[stepname]
1359 1361 step(pullop)
1360 1362
1361 1363 @pulldiscovery('b1:bookmarks')
1362 1364 def _pullbookmarkbundle1(pullop):
1363 1365 """fetch bookmark data in bundle1 case
1364 1366
1365 1367 If not using bundle2, we have to fetch bookmarks before changeset
1366 1368 discovery to reduce the chance and impact of race conditions."""
1367 1369 if pullop.remotebookmarks is not None:
1368 1370 return
1369 1371 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1370 1372 # all known bundle2 servers now support listkeys, but lets be nice with
1371 1373 # new implementation.
1372 1374 return
1373 1375 books = pullop.remote.listkeys('bookmarks')
1374 1376 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1375 1377
1376 1378
1377 1379 @pulldiscovery('changegroup')
1378 1380 def _pulldiscoverychangegroup(pullop):
1379 1381 """discovery phase for the pull
1380 1382
1381 1383 Current handle changeset discovery only, will change handle all discovery
1382 1384 at some point."""
1383 1385 tmp = discovery.findcommonincoming(pullop.repo,
1384 1386 pullop.remote,
1385 1387 heads=pullop.heads,
1386 1388 force=pullop.force)
1387 1389 common, fetch, rheads = tmp
1388 1390 nm = pullop.repo.unfiltered().changelog.nodemap
1389 1391 if fetch and rheads:
1390 1392 # If a remote heads is filtered locally, put in back in common.
1391 1393 #
1392 1394 # This is a hackish solution to catch most of "common but locally
1393 1395 # hidden situation". We do not performs discovery on unfiltered
1394 1396 # repository because it end up doing a pathological amount of round
1395 1397 # trip for w huge amount of changeset we do not care about.
1396 1398 #
1397 1399 # If a set of such "common but filtered" changeset exist on the server
1398 1400 # but are not including a remote heads, we'll not be able to detect it,
1399 1401 scommon = set(common)
1400 1402 for n in rheads:
1401 1403 if n in nm:
1402 1404 if n not in scommon:
1403 1405 common.append(n)
1404 1406 if set(rheads).issubset(set(common)):
1405 1407 fetch = []
1406 1408 pullop.common = common
1407 1409 pullop.fetch = fetch
1408 1410 pullop.rheads = rheads
1409 1411
1410 1412 def _pullbundle2(pullop):
1411 1413 """pull data using bundle2
1412 1414
1413 1415 For now, the only supported data are changegroup."""
1414 1416 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1415 1417
1416 1418 # At the moment we don't do stream clones over bundle2. If that is
1417 1419 # implemented then here's where the check for that will go.
1418 1420 streaming = False
1419 1421
1420 1422 # pulling changegroup
1421 1423 pullop.stepsdone.add('changegroup')
1422 1424
1423 1425 kwargs['common'] = pullop.common
1424 1426 kwargs['heads'] = pullop.heads or pullop.rheads
1425 1427 kwargs['cg'] = pullop.fetch
1426 1428
1427 1429 ui = pullop.repo.ui
1428 1430 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1429 1431 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1430 1432 if (not legacyphase and hasbinaryphase):
1431 1433 kwargs['phases'] = True
1432 1434 pullop.stepsdone.add('phases')
1433 1435
1434 1436 if 'listkeys' in pullop.remotebundle2caps:
1435 1437 if 'phases' not in pullop.stepsdone:
1436 1438 kwargs['listkeys'] = ['phases']
1437 1439 if pullop.remotebookmarks is None:
1438 1440 # make sure to always includes bookmark data when migrating
1439 1441 # `hg incoming --bundle` to using this function.
1440 1442 kwargs.setdefault('listkeys', []).append('bookmarks')
1441 1443
1442 1444 # If this is a full pull / clone and the server supports the clone bundles
1443 1445 # feature, tell the server whether we attempted a clone bundle. The
1444 1446 # presence of this flag indicates the client supports clone bundles. This
1445 1447 # will enable the server to treat clients that support clone bundles
1446 1448 # differently from those that don't.
1447 1449 if (pullop.remote.capable('clonebundles')
1448 1450 and pullop.heads is None and list(pullop.common) == [nullid]):
1449 1451 kwargs['cbattempted'] = pullop.clonebundleattempted
1450 1452
1451 1453 if streaming:
1452 1454 pullop.repo.ui.status(_('streaming all changes\n'))
1453 1455 elif not pullop.fetch:
1454 1456 pullop.repo.ui.status(_("no changes found\n"))
1455 1457 pullop.cgresult = 0
1456 1458 else:
1457 1459 if pullop.heads is None and list(pullop.common) == [nullid]:
1458 1460 pullop.repo.ui.status(_("requesting all changes\n"))
1459 1461 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1460 1462 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1461 1463 if obsolete.commonversion(remoteversions) is not None:
1462 1464 kwargs['obsmarkers'] = True
1463 1465 pullop.stepsdone.add('obsmarkers')
1464 1466 _pullbundle2extraprepare(pullop, kwargs)
1465 1467 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1466 1468 try:
1467 1469 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1468 1470 except bundle2.AbortFromPart as exc:
1469 1471 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1470 1472 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1471 1473 except error.BundleValueError as exc:
1472 1474 raise error.Abort(_('missing support for %s') % exc)
1473 1475
1474 1476 if pullop.fetch:
1475 1477 pullop.cgresult = bundle2.combinechangegroupresults(op)
1476 1478
1477 1479 # processing phases change
1478 1480 for namespace, value in op.records['listkeys']:
1479 1481 if namespace == 'phases':
1480 1482 _pullapplyphases(pullop, value)
1481 1483
1482 1484 # processing bookmark update
1483 1485 for namespace, value in op.records['listkeys']:
1484 1486 if namespace == 'bookmarks':
1485 1487 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1486 1488
1487 1489 # bookmark data were either already there or pulled in the bundle
1488 1490 if pullop.remotebookmarks is not None:
1489 1491 _pullbookmarks(pullop)
1490 1492
1491 1493 def _pullbundle2extraprepare(pullop, kwargs):
1492 1494 """hook function so that extensions can extend the getbundle call"""
1493 1495
1494 1496 def _pullchangeset(pullop):
1495 1497 """pull changeset from unbundle into the local repo"""
1496 1498 # We delay the open of the transaction as late as possible so we
1497 1499 # don't open transaction for nothing or you break future useful
1498 1500 # rollback call
1499 1501 if 'changegroup' in pullop.stepsdone:
1500 1502 return
1501 1503 pullop.stepsdone.add('changegroup')
1502 1504 if not pullop.fetch:
1503 1505 pullop.repo.ui.status(_("no changes found\n"))
1504 1506 pullop.cgresult = 0
1505 1507 return
1506 1508 tr = pullop.gettransaction()
1507 1509 if pullop.heads is None and list(pullop.common) == [nullid]:
1508 1510 pullop.repo.ui.status(_("requesting all changes\n"))
1509 1511 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1510 1512 # issue1320, avoid a race if remote changed after discovery
1511 1513 pullop.heads = pullop.rheads
1512 1514
1513 1515 if pullop.remote.capable('getbundle'):
1514 1516 # TODO: get bundlecaps from remote
1515 1517 cg = pullop.remote.getbundle('pull', common=pullop.common,
1516 1518 heads=pullop.heads or pullop.rheads)
1517 1519 elif pullop.heads is None:
1518 1520 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1519 1521 elif not pullop.remote.capable('changegroupsubset'):
1520 1522 raise error.Abort(_("partial pull cannot be done because "
1521 1523 "other repository doesn't support "
1522 1524 "changegroupsubset."))
1523 1525 else:
1524 1526 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1525 1527 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1526 1528 pullop.remote.url())
1527 1529 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1528 1530
1529 1531 def _pullphase(pullop):
1530 1532 # Get remote phases data from remote
1531 1533 if 'phases' in pullop.stepsdone:
1532 1534 return
1533 1535 remotephases = pullop.remote.listkeys('phases')
1534 1536 _pullapplyphases(pullop, remotephases)
1535 1537
1536 1538 def _pullapplyphases(pullop, remotephases):
1537 1539 """apply phase movement from observed remote state"""
1538 1540 if 'phases' in pullop.stepsdone:
1539 1541 return
1540 1542 pullop.stepsdone.add('phases')
1541 1543 publishing = bool(remotephases.get('publishing', False))
1542 1544 if remotephases and not publishing:
1543 1545 # remote is new and non-publishing
1544 1546 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1545 1547 pullop.pulledsubset,
1546 1548 remotephases)
1547 1549 dheads = pullop.pulledsubset
1548 1550 else:
1549 1551 # Remote is old or publishing all common changesets
1550 1552 # should be seen as public
1551 1553 pheads = pullop.pulledsubset
1552 1554 dheads = []
1553 1555 unfi = pullop.repo.unfiltered()
1554 1556 phase = unfi._phasecache.phase
1555 1557 rev = unfi.changelog.nodemap.get
1556 1558 public = phases.public
1557 1559 draft = phases.draft
1558 1560
1559 1561 # exclude changesets already public locally and update the others
1560 1562 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1561 1563 if pheads:
1562 1564 tr = pullop.gettransaction()
1563 1565 phases.advanceboundary(pullop.repo, tr, public, pheads)
1564 1566
1565 1567 # exclude changesets already draft locally and update the others
1566 1568 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1567 1569 if dheads:
1568 1570 tr = pullop.gettransaction()
1569 1571 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1570 1572
1571 1573 def _pullbookmarks(pullop):
1572 1574 """process the remote bookmark information to update the local one"""
1573 1575 if 'bookmarks' in pullop.stepsdone:
1574 1576 return
1575 1577 pullop.stepsdone.add('bookmarks')
1576 1578 repo = pullop.repo
1577 1579 remotebookmarks = pullop.remotebookmarks
1578 1580 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1579 1581 pullop.remote.url(),
1580 1582 pullop.gettransaction,
1581 1583 explicit=pullop.explicitbookmarks)
1582 1584
1583 1585 def _pullobsolete(pullop):
1584 1586 """utility function to pull obsolete markers from a remote
1585 1587
1586 1588 The `gettransaction` is function that return the pull transaction, creating
1587 1589 one if necessary. We return the transaction to inform the calling code that
1588 1590 a new transaction have been created (when applicable).
1589 1591
1590 1592 Exists mostly to allow overriding for experimentation purpose"""
1591 1593 if 'obsmarkers' in pullop.stepsdone:
1592 1594 return
1593 1595 pullop.stepsdone.add('obsmarkers')
1594 1596 tr = None
1595 1597 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1596 1598 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1597 1599 remoteobs = pullop.remote.listkeys('obsolete')
1598 1600 if 'dump0' in remoteobs:
1599 1601 tr = pullop.gettransaction()
1600 1602 markers = []
1601 1603 for key in sorted(remoteobs, reverse=True):
1602 1604 if key.startswith('dump'):
1603 1605 data = util.b85decode(remoteobs[key])
1604 1606 version, newmarks = obsolete._readmarkers(data)
1605 1607 markers += newmarks
1606 1608 if markers:
1607 1609 pullop.repo.obsstore.add(tr, markers)
1608 1610 pullop.repo.invalidatevolatilesets()
1609 1611 return tr
1610 1612
1611 1613 def caps20to10(repo):
1612 1614 """return a set with appropriate options to use bundle20 during getbundle"""
1613 1615 caps = {'HG20'}
1614 1616 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1615 1617 caps.add('bundle2=' + urlreq.quote(capsblob))
1616 1618 return caps
1617 1619
1618 1620 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1619 1621 getbundle2partsorder = []
1620 1622
1621 1623 # Mapping between step name and function
1622 1624 #
1623 1625 # This exists to help extensions wrap steps if necessary
1624 1626 getbundle2partsmapping = {}
1625 1627
1626 1628 def getbundle2partsgenerator(stepname, idx=None):
1627 1629 """decorator for function generating bundle2 part for getbundle
1628 1630
1629 1631 The function is added to the step -> function mapping and appended to the
1630 1632 list of steps. Beware that decorated functions will be added in order
1631 1633 (this may matter).
1632 1634
1633 1635 You can only use this decorator for new steps, if you want to wrap a step
1634 1636 from an extension, attack the getbundle2partsmapping dictionary directly."""
1635 1637 def dec(func):
1636 1638 assert stepname not in getbundle2partsmapping
1637 1639 getbundle2partsmapping[stepname] = func
1638 1640 if idx is None:
1639 1641 getbundle2partsorder.append(stepname)
1640 1642 else:
1641 1643 getbundle2partsorder.insert(idx, stepname)
1642 1644 return func
1643 1645 return dec
1644 1646
1645 1647 def bundle2requested(bundlecaps):
1646 1648 if bundlecaps is not None:
1647 1649 return any(cap.startswith('HG2') for cap in bundlecaps)
1648 1650 return False
1649 1651
1650 1652 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1651 1653 **kwargs):
1652 1654 """Return chunks constituting a bundle's raw data.
1653 1655
1654 1656 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1655 1657 passed.
1656 1658
1657 1659 Returns an iterator over raw chunks (of varying sizes).
1658 1660 """
1659 1661 kwargs = pycompat.byteskwargs(kwargs)
1660 1662 usebundle2 = bundle2requested(bundlecaps)
1661 1663 # bundle10 case
1662 1664 if not usebundle2:
1663 1665 if bundlecaps and not kwargs.get('cg', True):
1664 1666 raise ValueError(_('request for bundle10 must include changegroup'))
1665 1667
1666 1668 if kwargs:
1667 1669 raise ValueError(_('unsupported getbundle arguments: %s')
1668 1670 % ', '.join(sorted(kwargs.keys())))
1669 1671 outgoing = _computeoutgoing(repo, heads, common)
1670 1672 return changegroup.makestream(repo, outgoing, '01', source,
1671 1673 bundlecaps=bundlecaps)
1672 1674
1673 1675 # bundle20 case
1674 1676 b2caps = {}
1675 1677 for bcaps in bundlecaps:
1676 1678 if bcaps.startswith('bundle2='):
1677 1679 blob = urlreq.unquote(bcaps[len('bundle2='):])
1678 1680 b2caps.update(bundle2.decodecaps(blob))
1679 1681 bundler = bundle2.bundle20(repo.ui, b2caps)
1680 1682
1681 1683 kwargs['heads'] = heads
1682 1684 kwargs['common'] = common
1683 1685
1684 1686 for name in getbundle2partsorder:
1685 1687 func = getbundle2partsmapping[name]
1686 1688 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1687 1689 **pycompat.strkwargs(kwargs))
1688 1690
1689 1691 return bundler.getchunks()
1690 1692
1691 1693 @getbundle2partsgenerator('changegroup')
1692 1694 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1693 1695 b2caps=None, heads=None, common=None, **kwargs):
1694 1696 """add a changegroup part to the requested bundle"""
1695 1697 cgstream = None
1696 1698 if kwargs.get('cg', True):
1697 1699 # build changegroup bundle here.
1698 1700 version = '01'
1699 1701 cgversions = b2caps.get('changegroup')
1700 1702 if cgversions: # 3.1 and 3.2 ship with an empty value
1701 1703 cgversions = [v for v in cgversions
1702 1704 if v in changegroup.supportedoutgoingversions(repo)]
1703 1705 if not cgversions:
1704 1706 raise ValueError(_('no common changegroup version'))
1705 1707 version = max(cgversions)
1706 1708 outgoing = _computeoutgoing(repo, heads, common)
1707 1709 if outgoing.missing:
1708 1710 cgstream = changegroup.makestream(repo, outgoing, version, source,
1709 1711 bundlecaps=bundlecaps)
1710 1712
1711 1713 if cgstream:
1712 1714 part = bundler.newpart('changegroup', data=cgstream)
1713 1715 if cgversions:
1714 1716 part.addparam('version', version)
1715 1717 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1716 1718 mandatory=False)
1717 1719 if 'treemanifest' in repo.requirements:
1718 1720 part.addparam('treemanifest', '1')
1719 1721
1720 1722 @getbundle2partsgenerator('listkeys')
1721 1723 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1722 1724 b2caps=None, **kwargs):
1723 1725 """add parts containing listkeys namespaces to the requested bundle"""
1724 1726 listkeys = kwargs.get('listkeys', ())
1725 1727 for namespace in listkeys:
1726 1728 part = bundler.newpart('listkeys')
1727 1729 part.addparam('namespace', namespace)
1728 1730 keys = repo.listkeys(namespace).items()
1729 1731 part.data = pushkey.encodekeys(keys)
1730 1732
1731 1733 @getbundle2partsgenerator('obsmarkers')
1732 1734 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1733 1735 b2caps=None, heads=None, **kwargs):
1734 1736 """add an obsolescence markers part to the requested bundle"""
1735 1737 if kwargs.get('obsmarkers', False):
1736 1738 if heads is None:
1737 1739 heads = repo.heads()
1738 1740 subset = [c.node() for c in repo.set('::%ln', heads)]
1739 1741 markers = repo.obsstore.relevantmarkers(subset)
1740 1742 markers = sorted(markers)
1741 1743 bundle2.buildobsmarkerspart(bundler, markers)
1742 1744
1743 1745 @getbundle2partsgenerator('phases')
1744 1746 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1745 1747 b2caps=None, heads=None, **kwargs):
1746 1748 """add phase heads part to the requested bundle"""
1747 1749 if kwargs.get('phases', False):
1748 1750 if not 'heads' in b2caps.get('phases'):
1749 1751 raise ValueError(_('no common phases exchange method'))
1750 1752 if heads is None:
1751 1753 heads = repo.heads()
1752 1754
1753 1755 headsbyphase = collections.defaultdict(set)
1754 1756 if repo.publishing():
1755 1757 headsbyphase[phases.public] = heads
1756 1758 else:
1757 1759 # find the appropriate heads to move
1758 1760
1759 1761 phase = repo._phasecache.phase
1760 1762 node = repo.changelog.node
1761 1763 rev = repo.changelog.rev
1762 1764 for h in heads:
1763 1765 headsbyphase[phase(repo, rev(h))].add(h)
1764 1766 seenphases = list(headsbyphase.keys())
1765 1767
1766 1768 # We do not handle anything but public and draft phase for now)
1767 1769 if seenphases:
1768 1770 assert max(seenphases) <= phases.draft
1769 1771
1770 1772 # if client is pulling non-public changesets, we need to find
1771 1773 # intermediate public heads.
1772 1774 draftheads = headsbyphase.get(phases.draft, set())
1773 1775 if draftheads:
1774 1776 publicheads = headsbyphase.get(phases.public, set())
1775 1777
1776 1778 revset = 'heads(only(%ln, %ln) and public())'
1777 1779 extraheads = repo.revs(revset, draftheads, publicheads)
1778 1780 for r in extraheads:
1779 1781 headsbyphase[phases.public].add(node(r))
1780 1782
1781 1783 # transform data in a format used by the encoding function
1782 1784 phasemapping = []
1783 1785 for phase in phases.allphases:
1784 1786 phasemapping.append(sorted(headsbyphase[phase]))
1785 1787
1786 1788 # generate the actual part
1787 1789 phasedata = phases.binaryencode(phasemapping)
1788 1790 bundler.newpart('phase-heads', data=phasedata)
1789 1791
1790 1792 @getbundle2partsgenerator('hgtagsfnodes')
1791 1793 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1792 1794 b2caps=None, heads=None, common=None,
1793 1795 **kwargs):
1794 1796 """Transfer the .hgtags filenodes mapping.
1795 1797
1796 1798 Only values for heads in this bundle will be transferred.
1797 1799
1798 1800 The part data consists of pairs of 20 byte changeset node and .hgtags
1799 1801 filenodes raw values.
1800 1802 """
1801 1803 # Don't send unless:
1802 1804 # - changeset are being exchanged,
1803 1805 # - the client supports it.
1804 1806 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1805 1807 return
1806 1808
1807 1809 outgoing = _computeoutgoing(repo, heads, common)
1808 1810 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1809 1811
1810 1812 def check_heads(repo, their_heads, context):
1811 1813 """check if the heads of a repo have been modified
1812 1814
1813 1815 Used by peer for unbundling.
1814 1816 """
1815 1817 heads = repo.heads()
1816 1818 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1817 1819 if not (their_heads == ['force'] or their_heads == heads or
1818 1820 their_heads == ['hashed', heads_hash]):
1819 1821 # someone else committed/pushed/unbundled while we
1820 1822 # were transferring data
1821 1823 raise error.PushRaced('repository changed while %s - '
1822 1824 'please try again' % context)
1823 1825
1824 1826 def unbundle(repo, cg, heads, source, url):
1825 1827 """Apply a bundle to a repo.
1826 1828
1827 1829 this function makes sure the repo is locked during the application and have
1828 1830 mechanism to check that no push race occurred between the creation of the
1829 1831 bundle and its application.
1830 1832
1831 1833 If the push was raced as PushRaced exception is raised."""
1832 1834 r = 0
1833 1835 # need a transaction when processing a bundle2 stream
1834 1836 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1835 1837 lockandtr = [None, None, None]
1836 1838 recordout = None
1837 1839 # quick fix for output mismatch with bundle2 in 3.4
1838 1840 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
1839 1841 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1840 1842 captureoutput = True
1841 1843 try:
1842 1844 # note: outside bundle1, 'heads' is expected to be empty and this
1843 1845 # 'check_heads' call wil be a no-op
1844 1846 check_heads(repo, heads, 'uploading changes')
1845 1847 # push can proceed
1846 1848 if not isinstance(cg, bundle2.unbundle20):
1847 1849 # legacy case: bundle1 (changegroup 01)
1848 1850 txnname = "\n".join([source, util.hidepassword(url)])
1849 1851 with repo.lock(), repo.transaction(txnname) as tr:
1850 1852 op = bundle2.applybundle(repo, cg, tr, source, url)
1851 1853 r = bundle2.combinechangegroupresults(op)
1852 1854 else:
1853 1855 r = None
1854 1856 try:
1855 1857 def gettransaction():
1856 1858 if not lockandtr[2]:
1857 1859 lockandtr[0] = repo.wlock()
1858 1860 lockandtr[1] = repo.lock()
1859 1861 lockandtr[2] = repo.transaction(source)
1860 1862 lockandtr[2].hookargs['source'] = source
1861 1863 lockandtr[2].hookargs['url'] = url
1862 1864 lockandtr[2].hookargs['bundle2'] = '1'
1863 1865 return lockandtr[2]
1864 1866
1865 1867 # Do greedy locking by default until we're satisfied with lazy
1866 1868 # locking.
1867 1869 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1868 1870 gettransaction()
1869 1871
1870 1872 op = bundle2.bundleoperation(repo, gettransaction,
1871 1873 captureoutput=captureoutput)
1872 1874 try:
1873 1875 op = bundle2.processbundle(repo, cg, op=op)
1874 1876 finally:
1875 1877 r = op.reply
1876 1878 if captureoutput and r is not None:
1877 1879 repo.ui.pushbuffer(error=True, subproc=True)
1878 1880 def recordout(output):
1879 1881 r.newpart('output', data=output, mandatory=False)
1880 1882 if lockandtr[2] is not None:
1881 1883 lockandtr[2].close()
1882 1884 except BaseException as exc:
1883 1885 exc.duringunbundle2 = True
1884 1886 if captureoutput and r is not None:
1885 1887 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1886 1888 def recordout(output):
1887 1889 part = bundle2.bundlepart('output', data=output,
1888 1890 mandatory=False)
1889 1891 parts.append(part)
1890 1892 raise
1891 1893 finally:
1892 1894 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1893 1895 if recordout is not None:
1894 1896 recordout(repo.ui.popbuffer())
1895 1897 return r
1896 1898
1897 1899 def _maybeapplyclonebundle(pullop):
1898 1900 """Apply a clone bundle from a remote, if possible."""
1899 1901
1900 1902 repo = pullop.repo
1901 1903 remote = pullop.remote
1902 1904
1903 1905 if not repo.ui.configbool('ui', 'clonebundles'):
1904 1906 return
1905 1907
1906 1908 # Only run if local repo is empty.
1907 1909 if len(repo):
1908 1910 return
1909 1911
1910 1912 if pullop.heads:
1911 1913 return
1912 1914
1913 1915 if not remote.capable('clonebundles'):
1914 1916 return
1915 1917
1916 1918 res = remote._call('clonebundles')
1917 1919
1918 1920 # If we call the wire protocol command, that's good enough to record the
1919 1921 # attempt.
1920 1922 pullop.clonebundleattempted = True
1921 1923
1922 1924 entries = parseclonebundlesmanifest(repo, res)
1923 1925 if not entries:
1924 1926 repo.ui.note(_('no clone bundles available on remote; '
1925 1927 'falling back to regular clone\n'))
1926 1928 return
1927 1929
1928 1930 entries = filterclonebundleentries(
1929 1931 repo, entries, streamclonerequested=pullop.streamclonerequested)
1930 1932
1931 1933 if not entries:
1932 1934 # There is a thundering herd concern here. However, if a server
1933 1935 # operator doesn't advertise bundles appropriate for its clients,
1934 1936 # they deserve what's coming. Furthermore, from a client's
1935 1937 # perspective, no automatic fallback would mean not being able to
1936 1938 # clone!
1937 1939 repo.ui.warn(_('no compatible clone bundles available on server; '
1938 1940 'falling back to regular clone\n'))
1939 1941 repo.ui.warn(_('(you may want to report this to the server '
1940 1942 'operator)\n'))
1941 1943 return
1942 1944
1943 1945 entries = sortclonebundleentries(repo.ui, entries)
1944 1946
1945 1947 url = entries[0]['URL']
1946 1948 repo.ui.status(_('applying clone bundle from %s\n') % url)
1947 1949 if trypullbundlefromurl(repo.ui, repo, url):
1948 1950 repo.ui.status(_('finished applying clone bundle\n'))
1949 1951 # Bundle failed.
1950 1952 #
1951 1953 # We abort by default to avoid the thundering herd of
1952 1954 # clients flooding a server that was expecting expensive
1953 1955 # clone load to be offloaded.
1954 1956 elif repo.ui.configbool('ui', 'clonebundlefallback'):
1955 1957 repo.ui.warn(_('falling back to normal clone\n'))
1956 1958 else:
1957 1959 raise error.Abort(_('error applying bundle'),
1958 1960 hint=_('if this error persists, consider contacting '
1959 1961 'the server operator or disable clone '
1960 1962 'bundles via '
1961 1963 '"--config ui.clonebundles=false"'))
1962 1964
1963 1965 def parseclonebundlesmanifest(repo, s):
1964 1966 """Parses the raw text of a clone bundles manifest.
1965 1967
1966 1968 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1967 1969 to the URL and other keys are the attributes for the entry.
1968 1970 """
1969 1971 m = []
1970 1972 for line in s.splitlines():
1971 1973 fields = line.split()
1972 1974 if not fields:
1973 1975 continue
1974 1976 attrs = {'URL': fields[0]}
1975 1977 for rawattr in fields[1:]:
1976 1978 key, value = rawattr.split('=', 1)
1977 1979 key = urlreq.unquote(key)
1978 1980 value = urlreq.unquote(value)
1979 1981 attrs[key] = value
1980 1982
1981 1983 # Parse BUNDLESPEC into components. This makes client-side
1982 1984 # preferences easier to specify since you can prefer a single
1983 1985 # component of the BUNDLESPEC.
1984 1986 if key == 'BUNDLESPEC':
1985 1987 try:
1986 1988 comp, version, params = parsebundlespec(repo, value,
1987 1989 externalnames=True)
1988 1990 attrs['COMPRESSION'] = comp
1989 1991 attrs['VERSION'] = version
1990 1992 except error.InvalidBundleSpecification:
1991 1993 pass
1992 1994 except error.UnsupportedBundleSpecification:
1993 1995 pass
1994 1996
1995 1997 m.append(attrs)
1996 1998
1997 1999 return m
1998 2000
1999 2001 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2000 2002 """Remove incompatible clone bundle manifest entries.
2001 2003
2002 2004 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2003 2005 and returns a new list consisting of only the entries that this client
2004 2006 should be able to apply.
2005 2007
2006 2008 There is no guarantee we'll be able to apply all returned entries because
2007 2009 the metadata we use to filter on may be missing or wrong.
2008 2010 """
2009 2011 newentries = []
2010 2012 for entry in entries:
2011 2013 spec = entry.get('BUNDLESPEC')
2012 2014 if spec:
2013 2015 try:
2014 2016 comp, version, params = parsebundlespec(repo, spec, strict=True)
2015 2017
2016 2018 # If a stream clone was requested, filter out non-streamclone
2017 2019 # entries.
2018 2020 if streamclonerequested and (comp != 'UN' or version != 's1'):
2019 2021 repo.ui.debug('filtering %s because not a stream clone\n' %
2020 2022 entry['URL'])
2021 2023 continue
2022 2024
2023 2025 except error.InvalidBundleSpecification as e:
2024 2026 repo.ui.debug(str(e) + '\n')
2025 2027 continue
2026 2028 except error.UnsupportedBundleSpecification as e:
2027 2029 repo.ui.debug('filtering %s because unsupported bundle '
2028 2030 'spec: %s\n' % (entry['URL'], str(e)))
2029 2031 continue
2030 2032 # If we don't have a spec and requested a stream clone, we don't know
2031 2033 # what the entry is so don't attempt to apply it.
2032 2034 elif streamclonerequested:
2033 2035 repo.ui.debug('filtering %s because cannot determine if a stream '
2034 2036 'clone bundle\n' % entry['URL'])
2035 2037 continue
2036 2038
2037 2039 if 'REQUIRESNI' in entry and not sslutil.hassni:
2038 2040 repo.ui.debug('filtering %s because SNI not supported\n' %
2039 2041 entry['URL'])
2040 2042 continue
2041 2043
2042 2044 newentries.append(entry)
2043 2045
2044 2046 return newentries
2045 2047
2046 2048 class clonebundleentry(object):
2047 2049 """Represents an item in a clone bundles manifest.
2048 2050
2049 2051 This rich class is needed to support sorting since sorted() in Python 3
2050 2052 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2051 2053 won't work.
2052 2054 """
2053 2055
2054 2056 def __init__(self, value, prefers):
2055 2057 self.value = value
2056 2058 self.prefers = prefers
2057 2059
2058 2060 def _cmp(self, other):
2059 2061 for prefkey, prefvalue in self.prefers:
2060 2062 avalue = self.value.get(prefkey)
2061 2063 bvalue = other.value.get(prefkey)
2062 2064
2063 2065 # Special case for b missing attribute and a matches exactly.
2064 2066 if avalue is not None and bvalue is None and avalue == prefvalue:
2065 2067 return -1
2066 2068
2067 2069 # Special case for a missing attribute and b matches exactly.
2068 2070 if bvalue is not None and avalue is None and bvalue == prefvalue:
2069 2071 return 1
2070 2072
2071 2073 # We can't compare unless attribute present on both.
2072 2074 if avalue is None or bvalue is None:
2073 2075 continue
2074 2076
2075 2077 # Same values should fall back to next attribute.
2076 2078 if avalue == bvalue:
2077 2079 continue
2078 2080
2079 2081 # Exact matches come first.
2080 2082 if avalue == prefvalue:
2081 2083 return -1
2082 2084 if bvalue == prefvalue:
2083 2085 return 1
2084 2086
2085 2087 # Fall back to next attribute.
2086 2088 continue
2087 2089
2088 2090 # If we got here we couldn't sort by attributes and prefers. Fall
2089 2091 # back to index order.
2090 2092 return 0
2091 2093
2092 2094 def __lt__(self, other):
2093 2095 return self._cmp(other) < 0
2094 2096
2095 2097 def __gt__(self, other):
2096 2098 return self._cmp(other) > 0
2097 2099
2098 2100 def __eq__(self, other):
2099 2101 return self._cmp(other) == 0
2100 2102
2101 2103 def __le__(self, other):
2102 2104 return self._cmp(other) <= 0
2103 2105
2104 2106 def __ge__(self, other):
2105 2107 return self._cmp(other) >= 0
2106 2108
2107 2109 def __ne__(self, other):
2108 2110 return self._cmp(other) != 0
2109 2111
2110 2112 def sortclonebundleentries(ui, entries):
2111 2113 prefers = ui.configlist('ui', 'clonebundleprefers')
2112 2114 if not prefers:
2113 2115 return list(entries)
2114 2116
2115 2117 prefers = [p.split('=', 1) for p in prefers]
2116 2118
2117 2119 items = sorted(clonebundleentry(v, prefers) for v in entries)
2118 2120 return [i.value for i in items]
2119 2121
2120 2122 def trypullbundlefromurl(ui, repo, url):
2121 2123 """Attempt to apply a bundle from a URL."""
2122 2124 with repo.lock(), repo.transaction('bundleurl') as tr:
2123 2125 try:
2124 2126 fh = urlmod.open(ui, url)
2125 2127 cg = readbundle(ui, fh, 'stream')
2126 2128
2127 2129 if isinstance(cg, streamclone.streamcloneapplier):
2128 2130 cg.apply(repo)
2129 2131 else:
2130 2132 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2131 2133 return True
2132 2134 except urlerr.httperror as e:
2133 2135 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2134 2136 except urlerr.urlerror as e:
2135 2137 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2136 2138
2137 2139 return False
General Comments 0
You need to be logged in to leave comments. Login now