##// END OF EJS Templates
phase: isolate logic to update remote phrase through bundle2 pushkey...
Boris Feld -
r34823:c1e7ce11 default
parent child Browse files
Show More
@@ -1,2106 +1,2109 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import errno
12 12 import hashlib
13 13
14 14 from .i18n import _
15 15 from .node import (
16 16 hex,
17 17 nullid,
18 18 )
19 19 from . import (
20 20 bookmarks as bookmod,
21 21 bundle2,
22 22 changegroup,
23 23 discovery,
24 24 error,
25 25 lock as lockmod,
26 26 obsolete,
27 27 phases,
28 28 pushkey,
29 29 pycompat,
30 30 scmutil,
31 31 sslutil,
32 32 streamclone,
33 33 url as urlmod,
34 34 util,
35 35 )
36 36
37 37 urlerr = util.urlerr
38 38 urlreq = util.urlreq
39 39
40 40 # Maps bundle version human names to changegroup versions.
41 41 _bundlespeccgversions = {'v1': '01',
42 42 'v2': '02',
43 43 'packed1': 's1',
44 44 'bundle2': '02', #legacy
45 45 }
46 46
47 47 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
48 48 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
49 49
50 50 def parsebundlespec(repo, spec, strict=True, externalnames=False):
51 51 """Parse a bundle string specification into parts.
52 52
53 53 Bundle specifications denote a well-defined bundle/exchange format.
54 54 The content of a given specification should not change over time in
55 55 order to ensure that bundles produced by a newer version of Mercurial are
56 56 readable from an older version.
57 57
58 58 The string currently has the form:
59 59
60 60 <compression>-<type>[;<parameter0>[;<parameter1>]]
61 61
62 62 Where <compression> is one of the supported compression formats
63 63 and <type> is (currently) a version string. A ";" can follow the type and
64 64 all text afterwards is interpreted as URI encoded, ";" delimited key=value
65 65 pairs.
66 66
67 67 If ``strict`` is True (the default) <compression> is required. Otherwise,
68 68 it is optional.
69 69
70 70 If ``externalnames`` is False (the default), the human-centric names will
71 71 be converted to their internal representation.
72 72
73 73 Returns a 3-tuple of (compression, version, parameters). Compression will
74 74 be ``None`` if not in strict mode and a compression isn't defined.
75 75
76 76 An ``InvalidBundleSpecification`` is raised when the specification is
77 77 not syntactically well formed.
78 78
79 79 An ``UnsupportedBundleSpecification`` is raised when the compression or
80 80 bundle type/version is not recognized.
81 81
82 82 Note: this function will likely eventually return a more complex data
83 83 structure, including bundle2 part information.
84 84 """
85 85 def parseparams(s):
86 86 if ';' not in s:
87 87 return s, {}
88 88
89 89 params = {}
90 90 version, paramstr = s.split(';', 1)
91 91
92 92 for p in paramstr.split(';'):
93 93 if '=' not in p:
94 94 raise error.InvalidBundleSpecification(
95 95 _('invalid bundle specification: '
96 96 'missing "=" in parameter: %s') % p)
97 97
98 98 key, value = p.split('=', 1)
99 99 key = urlreq.unquote(key)
100 100 value = urlreq.unquote(value)
101 101 params[key] = value
102 102
103 103 return version, params
104 104
105 105
106 106 if strict and '-' not in spec:
107 107 raise error.InvalidBundleSpecification(
108 108 _('invalid bundle specification; '
109 109 'must be prefixed with compression: %s') % spec)
110 110
111 111 if '-' in spec:
112 112 compression, version = spec.split('-', 1)
113 113
114 114 if compression not in util.compengines.supportedbundlenames:
115 115 raise error.UnsupportedBundleSpecification(
116 116 _('%s compression is not supported') % compression)
117 117
118 118 version, params = parseparams(version)
119 119
120 120 if version not in _bundlespeccgversions:
121 121 raise error.UnsupportedBundleSpecification(
122 122 _('%s is not a recognized bundle version') % version)
123 123 else:
124 124 # Value could be just the compression or just the version, in which
125 125 # case some defaults are assumed (but only when not in strict mode).
126 126 assert not strict
127 127
128 128 spec, params = parseparams(spec)
129 129
130 130 if spec in util.compengines.supportedbundlenames:
131 131 compression = spec
132 132 version = 'v1'
133 133 # Generaldelta repos require v2.
134 134 if 'generaldelta' in repo.requirements:
135 135 version = 'v2'
136 136 # Modern compression engines require v2.
137 137 if compression not in _bundlespecv1compengines:
138 138 version = 'v2'
139 139 elif spec in _bundlespeccgversions:
140 140 if spec == 'packed1':
141 141 compression = 'none'
142 142 else:
143 143 compression = 'bzip2'
144 144 version = spec
145 145 else:
146 146 raise error.UnsupportedBundleSpecification(
147 147 _('%s is not a recognized bundle specification') % spec)
148 148
149 149 # Bundle version 1 only supports a known set of compression engines.
150 150 if version == 'v1' and compression not in _bundlespecv1compengines:
151 151 raise error.UnsupportedBundleSpecification(
152 152 _('compression engine %s is not supported on v1 bundles') %
153 153 compression)
154 154
155 155 # The specification for packed1 can optionally declare the data formats
156 156 # required to apply it. If we see this metadata, compare against what the
157 157 # repo supports and error if the bundle isn't compatible.
158 158 if version == 'packed1' and 'requirements' in params:
159 159 requirements = set(params['requirements'].split(','))
160 160 missingreqs = requirements - repo.supportedformats
161 161 if missingreqs:
162 162 raise error.UnsupportedBundleSpecification(
163 163 _('missing support for repository features: %s') %
164 164 ', '.join(sorted(missingreqs)))
165 165
166 166 if not externalnames:
167 167 engine = util.compengines.forbundlename(compression)
168 168 compression = engine.bundletype()[1]
169 169 version = _bundlespeccgversions[version]
170 170 return compression, version, params
171 171
172 172 def readbundle(ui, fh, fname, vfs=None):
173 173 header = changegroup.readexactly(fh, 4)
174 174
175 175 alg = None
176 176 if not fname:
177 177 fname = "stream"
178 178 if not header.startswith('HG') and header.startswith('\0'):
179 179 fh = changegroup.headerlessfixup(fh, header)
180 180 header = "HG10"
181 181 alg = 'UN'
182 182 elif vfs:
183 183 fname = vfs.join(fname)
184 184
185 185 magic, version = header[0:2], header[2:4]
186 186
187 187 if magic != 'HG':
188 188 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
189 189 if version == '10':
190 190 if alg is None:
191 191 alg = changegroup.readexactly(fh, 2)
192 192 return changegroup.cg1unpacker(fh, alg)
193 193 elif version.startswith('2'):
194 194 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
195 195 elif version == 'S1':
196 196 return streamclone.streamcloneapplier(fh)
197 197 else:
198 198 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
199 199
200 200 def getbundlespec(ui, fh):
201 201 """Infer the bundlespec from a bundle file handle.
202 202
203 203 The input file handle is seeked and the original seek position is not
204 204 restored.
205 205 """
206 206 def speccompression(alg):
207 207 try:
208 208 return util.compengines.forbundletype(alg).bundletype()[0]
209 209 except KeyError:
210 210 return None
211 211
212 212 b = readbundle(ui, fh, None)
213 213 if isinstance(b, changegroup.cg1unpacker):
214 214 alg = b._type
215 215 if alg == '_truncatedBZ':
216 216 alg = 'BZ'
217 217 comp = speccompression(alg)
218 218 if not comp:
219 219 raise error.Abort(_('unknown compression algorithm: %s') % alg)
220 220 return '%s-v1' % comp
221 221 elif isinstance(b, bundle2.unbundle20):
222 222 if 'Compression' in b.params:
223 223 comp = speccompression(b.params['Compression'])
224 224 if not comp:
225 225 raise error.Abort(_('unknown compression algorithm: %s') % comp)
226 226 else:
227 227 comp = 'none'
228 228
229 229 version = None
230 230 for part in b.iterparts():
231 231 if part.type == 'changegroup':
232 232 version = part.params['version']
233 233 if version in ('01', '02'):
234 234 version = 'v2'
235 235 else:
236 236 raise error.Abort(_('changegroup version %s does not have '
237 237 'a known bundlespec') % version,
238 238 hint=_('try upgrading your Mercurial '
239 239 'client'))
240 240
241 241 if not version:
242 242 raise error.Abort(_('could not identify changegroup version in '
243 243 'bundle'))
244 244
245 245 return '%s-%s' % (comp, version)
246 246 elif isinstance(b, streamclone.streamcloneapplier):
247 247 requirements = streamclone.readbundle1header(fh)[2]
248 248 params = 'requirements=%s' % ','.join(sorted(requirements))
249 249 return 'none-packed1;%s' % urlreq.quote(params)
250 250 else:
251 251 raise error.Abort(_('unknown bundle type: %s') % b)
252 252
253 253 def _computeoutgoing(repo, heads, common):
254 254 """Computes which revs are outgoing given a set of common
255 255 and a set of heads.
256 256
257 257 This is a separate function so extensions can have access to
258 258 the logic.
259 259
260 260 Returns a discovery.outgoing object.
261 261 """
262 262 cl = repo.changelog
263 263 if common:
264 264 hasnode = cl.hasnode
265 265 common = [n for n in common if hasnode(n)]
266 266 else:
267 267 common = [nullid]
268 268 if not heads:
269 269 heads = cl.heads()
270 270 return discovery.outgoing(repo, common, heads)
271 271
272 272 def _forcebundle1(op):
273 273 """return true if a pull/push must use bundle1
274 274
275 275 This function is used to allow testing of the older bundle version"""
276 276 ui = op.repo.ui
277 277 forcebundle1 = False
278 278 # The goal is this config is to allow developer to choose the bundle
279 279 # version used during exchanged. This is especially handy during test.
280 280 # Value is a list of bundle version to be picked from, highest version
281 281 # should be used.
282 282 #
283 283 # developer config: devel.legacy.exchange
284 284 exchange = ui.configlist('devel', 'legacy.exchange')
285 285 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
286 286 return forcebundle1 or not op.remote.capable('bundle2')
287 287
288 288 class pushoperation(object):
289 289 """A object that represent a single push operation
290 290
291 291 Its purpose is to carry push related state and very common operations.
292 292
293 293 A new pushoperation should be created at the beginning of each push and
294 294 discarded afterward.
295 295 """
296 296
297 297 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
298 298 bookmarks=(), pushvars=None):
299 299 # repo we push from
300 300 self.repo = repo
301 301 self.ui = repo.ui
302 302 # repo we push to
303 303 self.remote = remote
304 304 # force option provided
305 305 self.force = force
306 306 # revs to be pushed (None is "all")
307 307 self.revs = revs
308 308 # bookmark explicitly pushed
309 309 self.bookmarks = bookmarks
310 310 # allow push of new branch
311 311 self.newbranch = newbranch
312 312 # step already performed
313 313 # (used to check what steps have been already performed through bundle2)
314 314 self.stepsdone = set()
315 315 # Integer version of the changegroup push result
316 316 # - None means nothing to push
317 317 # - 0 means HTTP error
318 318 # - 1 means we pushed and remote head count is unchanged *or*
319 319 # we have outgoing changesets but refused to push
320 320 # - other values as described by addchangegroup()
321 321 self.cgresult = None
322 322 # Boolean value for the bookmark push
323 323 self.bkresult = None
324 324 # discover.outgoing object (contains common and outgoing data)
325 325 self.outgoing = None
326 326 # all remote topological heads before the push
327 327 self.remoteheads = None
328 328 # Details of the remote branch pre and post push
329 329 #
330 330 # mapping: {'branch': ([remoteheads],
331 331 # [newheads],
332 332 # [unsyncedheads],
333 333 # [discardedheads])}
334 334 # - branch: the branch name
335 335 # - remoteheads: the list of remote heads known locally
336 336 # None if the branch is new
337 337 # - newheads: the new remote heads (known locally) with outgoing pushed
338 338 # - unsyncedheads: the list of remote heads unknown locally.
339 339 # - discardedheads: the list of remote heads made obsolete by the push
340 340 self.pushbranchmap = None
341 341 # testable as a boolean indicating if any nodes are missing locally.
342 342 self.incoming = None
343 343 # summary of the remote phase situation
344 344 self.remotephases = None
345 345 # phases changes that must be pushed along side the changesets
346 346 self.outdatedphases = None
347 347 # phases changes that must be pushed if changeset push fails
348 348 self.fallbackoutdatedphases = None
349 349 # outgoing obsmarkers
350 350 self.outobsmarkers = set()
351 351 # outgoing bookmarks
352 352 self.outbookmarks = []
353 353 # transaction manager
354 354 self.trmanager = None
355 355 # map { pushkey partid -> callback handling failure}
356 356 # used to handle exception from mandatory pushkey part failure
357 357 self.pkfailcb = {}
358 358 # an iterable of pushvars or None
359 359 self.pushvars = pushvars
360 360
361 361 @util.propertycache
362 362 def futureheads(self):
363 363 """future remote heads if the changeset push succeeds"""
364 364 return self.outgoing.missingheads
365 365
366 366 @util.propertycache
367 367 def fallbackheads(self):
368 368 """future remote heads if the changeset push fails"""
369 369 if self.revs is None:
370 370 # not target to push, all common are relevant
371 371 return self.outgoing.commonheads
372 372 unfi = self.repo.unfiltered()
373 373 # I want cheads = heads(::missingheads and ::commonheads)
374 374 # (missingheads is revs with secret changeset filtered out)
375 375 #
376 376 # This can be expressed as:
377 377 # cheads = ( (missingheads and ::commonheads)
378 378 # + (commonheads and ::missingheads))"
379 379 # )
380 380 #
381 381 # while trying to push we already computed the following:
382 382 # common = (::commonheads)
383 383 # missing = ((commonheads::missingheads) - commonheads)
384 384 #
385 385 # We can pick:
386 386 # * missingheads part of common (::commonheads)
387 387 common = self.outgoing.common
388 388 nm = self.repo.changelog.nodemap
389 389 cheads = [node for node in self.revs if nm[node] in common]
390 390 # and
391 391 # * commonheads parents on missing
392 392 revset = unfi.set('%ln and parents(roots(%ln))',
393 393 self.outgoing.commonheads,
394 394 self.outgoing.missing)
395 395 cheads.extend(c.node() for c in revset)
396 396 return cheads
397 397
398 398 @property
399 399 def commonheads(self):
400 400 """set of all common heads after changeset bundle push"""
401 401 if self.cgresult:
402 402 return self.futureheads
403 403 else:
404 404 return self.fallbackheads
405 405
406 406 # mapping of message used when pushing bookmark
407 407 bookmsgmap = {'update': (_("updating bookmark %s\n"),
408 408 _('updating bookmark %s failed!\n')),
409 409 'export': (_("exporting bookmark %s\n"),
410 410 _('exporting bookmark %s failed!\n')),
411 411 'delete': (_("deleting remote bookmark %s\n"),
412 412 _('deleting remote bookmark %s failed!\n')),
413 413 }
414 414
415 415
416 416 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
417 417 opargs=None):
418 418 '''Push outgoing changesets (limited by revs) from a local
419 419 repository to remote. Return an integer:
420 420 - None means nothing to push
421 421 - 0 means HTTP error
422 422 - 1 means we pushed and remote head count is unchanged *or*
423 423 we have outgoing changesets but refused to push
424 424 - other values as described by addchangegroup()
425 425 '''
426 426 if opargs is None:
427 427 opargs = {}
428 428 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
429 429 **pycompat.strkwargs(opargs))
430 430 if pushop.remote.local():
431 431 missing = (set(pushop.repo.requirements)
432 432 - pushop.remote.local().supported)
433 433 if missing:
434 434 msg = _("required features are not"
435 435 " supported in the destination:"
436 436 " %s") % (', '.join(sorted(missing)))
437 437 raise error.Abort(msg)
438 438
439 439 if not pushop.remote.canpush():
440 440 raise error.Abort(_("destination does not support push"))
441 441
442 442 if not pushop.remote.capable('unbundle'):
443 443 raise error.Abort(_('cannot push: destination does not support the '
444 444 'unbundle wire protocol command'))
445 445
446 446 # get lock as we might write phase data
447 447 wlock = lock = None
448 448 try:
449 449 # bundle2 push may receive a reply bundle touching bookmarks or other
450 450 # things requiring the wlock. Take it now to ensure proper ordering.
451 451 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
452 452 if (not _forcebundle1(pushop)) and maypushback:
453 453 wlock = pushop.repo.wlock()
454 454 lock = pushop.repo.lock()
455 455 pushop.trmanager = transactionmanager(pushop.repo,
456 456 'push-response',
457 457 pushop.remote.url())
458 458 except IOError as err:
459 459 if err.errno != errno.EACCES:
460 460 raise
461 461 # source repo cannot be locked.
462 462 # We do not abort the push, but just disable the local phase
463 463 # synchronisation.
464 464 msg = 'cannot lock source repository: %s\n' % err
465 465 pushop.ui.debug(msg)
466 466
467 467 with wlock or util.nullcontextmanager(), \
468 468 lock or util.nullcontextmanager(), \
469 469 pushop.trmanager or util.nullcontextmanager():
470 470 pushop.repo.checkpush(pushop)
471 471 _pushdiscovery(pushop)
472 472 if not _forcebundle1(pushop):
473 473 _pushbundle2(pushop)
474 474 _pushchangeset(pushop)
475 475 _pushsyncphase(pushop)
476 476 _pushobsolete(pushop)
477 477 _pushbookmark(pushop)
478 478
479 479 return pushop
480 480
481 481 # list of steps to perform discovery before push
482 482 pushdiscoveryorder = []
483 483
484 484 # Mapping between step name and function
485 485 #
486 486 # This exists to help extensions wrap steps if necessary
487 487 pushdiscoverymapping = {}
488 488
489 489 def pushdiscovery(stepname):
490 490 """decorator for function performing discovery before push
491 491
492 492 The function is added to the step -> function mapping and appended to the
493 493 list of steps. Beware that decorated function will be added in order (this
494 494 may matter).
495 495
496 496 You can only use this decorator for a new step, if you want to wrap a step
497 497 from an extension, change the pushdiscovery dictionary directly."""
498 498 def dec(func):
499 499 assert stepname not in pushdiscoverymapping
500 500 pushdiscoverymapping[stepname] = func
501 501 pushdiscoveryorder.append(stepname)
502 502 return func
503 503 return dec
504 504
505 505 def _pushdiscovery(pushop):
506 506 """Run all discovery steps"""
507 507 for stepname in pushdiscoveryorder:
508 508 step = pushdiscoverymapping[stepname]
509 509 step(pushop)
510 510
511 511 @pushdiscovery('changeset')
512 512 def _pushdiscoverychangeset(pushop):
513 513 """discover the changeset that need to be pushed"""
514 514 fci = discovery.findcommonincoming
515 515 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
516 516 common, inc, remoteheads = commoninc
517 517 fco = discovery.findcommonoutgoing
518 518 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
519 519 commoninc=commoninc, force=pushop.force)
520 520 pushop.outgoing = outgoing
521 521 pushop.remoteheads = remoteheads
522 522 pushop.incoming = inc
523 523
524 524 @pushdiscovery('phase')
525 525 def _pushdiscoveryphase(pushop):
526 526 """discover the phase that needs to be pushed
527 527
528 528 (computed for both success and failure case for changesets push)"""
529 529 outgoing = pushop.outgoing
530 530 unfi = pushop.repo.unfiltered()
531 531 remotephases = pushop.remote.listkeys('phases')
532 532 if (pushop.ui.configbool('ui', '_usedassubrepo')
533 533 and remotephases # server supports phases
534 534 and not pushop.outgoing.missing # no changesets to be pushed
535 535 and remotephases.get('publishing', False)):
536 536 # When:
537 537 # - this is a subrepo push
538 538 # - and remote support phase
539 539 # - and no changeset are to be pushed
540 540 # - and remote is publishing
541 541 # We may be in issue 3781 case!
542 542 # We drop the possible phase synchronisation done by
543 543 # courtesy to publish changesets possibly locally draft
544 544 # on the remote.
545 545 pushop.outdatedphases = []
546 546 pushop.fallbackoutdatedphases = []
547 547 return
548 548
549 549 pushop.remotephases = phases.remotephasessummary(pushop.repo,
550 550 pushop.fallbackheads,
551 551 remotephases)
552 552 droots = pushop.remotephases.draftroots
553 553
554 554 extracond = ''
555 555 if not pushop.remotephases.publishing:
556 556 extracond = ' and public()'
557 557 revset = 'heads((%%ln::%%ln) %s)' % extracond
558 558 # Get the list of all revs draft on remote by public here.
559 559 # XXX Beware that revset break if droots is not strictly
560 560 # XXX root we may want to ensure it is but it is costly
561 561 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
562 562 if not outgoing.missing:
563 563 future = fallback
564 564 else:
565 565 # adds changeset we are going to push as draft
566 566 #
567 567 # should not be necessary for publishing server, but because of an
568 568 # issue fixed in xxxxx we have to do it anyway.
569 569 fdroots = list(unfi.set('roots(%ln + %ln::)',
570 570 outgoing.missing, droots))
571 571 fdroots = [f.node() for f in fdroots]
572 572 future = list(unfi.set(revset, fdroots, pushop.futureheads))
573 573 pushop.outdatedphases = future
574 574 pushop.fallbackoutdatedphases = fallback
575 575
576 576 @pushdiscovery('obsmarker')
577 577 def _pushdiscoveryobsmarkers(pushop):
578 578 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
579 579 and pushop.repo.obsstore
580 580 and 'obsolete' in pushop.remote.listkeys('namespaces')):
581 581 repo = pushop.repo
582 582 # very naive computation, that can be quite expensive on big repo.
583 583 # However: evolution is currently slow on them anyway.
584 584 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
585 585 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
586 586
587 587 @pushdiscovery('bookmarks')
588 588 def _pushdiscoverybookmarks(pushop):
589 589 ui = pushop.ui
590 590 repo = pushop.repo.unfiltered()
591 591 remote = pushop.remote
592 592 ui.debug("checking for updated bookmarks\n")
593 593 ancestors = ()
594 594 if pushop.revs:
595 595 revnums = map(repo.changelog.rev, pushop.revs)
596 596 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
597 597 remotebookmark = remote.listkeys('bookmarks')
598 598
599 599 explicit = set([repo._bookmarks.expandname(bookmark)
600 600 for bookmark in pushop.bookmarks])
601 601
602 602 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
603 603 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
604 604
605 605 def safehex(x):
606 606 if x is None:
607 607 return x
608 608 return hex(x)
609 609
610 610 def hexifycompbookmarks(bookmarks):
611 611 for b, scid, dcid in bookmarks:
612 612 yield b, safehex(scid), safehex(dcid)
613 613
614 614 comp = [hexifycompbookmarks(marks) for marks in comp]
615 615 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
616 616
617 617 for b, scid, dcid in advsrc:
618 618 if b in explicit:
619 619 explicit.remove(b)
620 620 if not ancestors or repo[scid].rev() in ancestors:
621 621 pushop.outbookmarks.append((b, dcid, scid))
622 622 # search added bookmark
623 623 for b, scid, dcid in addsrc:
624 624 if b in explicit:
625 625 explicit.remove(b)
626 626 pushop.outbookmarks.append((b, '', scid))
627 627 # search for overwritten bookmark
628 628 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
629 629 if b in explicit:
630 630 explicit.remove(b)
631 631 pushop.outbookmarks.append((b, dcid, scid))
632 632 # search for bookmark to delete
633 633 for b, scid, dcid in adddst:
634 634 if b in explicit:
635 635 explicit.remove(b)
636 636 # treat as "deleted locally"
637 637 pushop.outbookmarks.append((b, dcid, ''))
638 638 # identical bookmarks shouldn't get reported
639 639 for b, scid, dcid in same:
640 640 if b in explicit:
641 641 explicit.remove(b)
642 642
643 643 if explicit:
644 644 explicit = sorted(explicit)
645 645 # we should probably list all of them
646 646 ui.warn(_('bookmark %s does not exist on the local '
647 647 'or remote repository!\n') % explicit[0])
648 648 pushop.bkresult = 2
649 649
650 650 pushop.outbookmarks.sort()
651 651
652 652 def _pushcheckoutgoing(pushop):
653 653 outgoing = pushop.outgoing
654 654 unfi = pushop.repo.unfiltered()
655 655 if not outgoing.missing:
656 656 # nothing to push
657 657 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
658 658 return False
659 659 # something to push
660 660 if not pushop.force:
661 661 # if repo.obsstore == False --> no obsolete
662 662 # then, save the iteration
663 663 if unfi.obsstore:
664 664 # this message are here for 80 char limit reason
665 665 mso = _("push includes obsolete changeset: %s!")
666 666 mspd = _("push includes phase-divergent changeset: %s!")
667 667 mscd = _("push includes content-divergent changeset: %s!")
668 668 mst = {"orphan": _("push includes orphan changeset: %s!"),
669 669 "phase-divergent": mspd,
670 670 "content-divergent": mscd}
671 671 # If we are to push if there is at least one
672 672 # obsolete or unstable changeset in missing, at
673 673 # least one of the missinghead will be obsolete or
674 674 # unstable. So checking heads only is ok
675 675 for node in outgoing.missingheads:
676 676 ctx = unfi[node]
677 677 if ctx.obsolete():
678 678 raise error.Abort(mso % ctx)
679 679 elif ctx.isunstable():
680 680 # TODO print more than one instability in the abort
681 681 # message
682 682 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
683 683
684 684 discovery.checkheads(pushop)
685 685 return True
686 686
687 687 # List of names of steps to perform for an outgoing bundle2, order matters.
688 688 b2partsgenorder = []
689 689
690 690 # Mapping between step name and function
691 691 #
692 692 # This exists to help extensions wrap steps if necessary
693 693 b2partsgenmapping = {}
694 694
695 695 def b2partsgenerator(stepname, idx=None):
696 696 """decorator for function generating bundle2 part
697 697
698 698 The function is added to the step -> function mapping and appended to the
699 699 list of steps. Beware that decorated functions will be added in order
700 700 (this may matter).
701 701
702 702 You can only use this decorator for new steps, if you want to wrap a step
703 703 from an extension, attack the b2partsgenmapping dictionary directly."""
704 704 def dec(func):
705 705 assert stepname not in b2partsgenmapping
706 706 b2partsgenmapping[stepname] = func
707 707 if idx is None:
708 708 b2partsgenorder.append(stepname)
709 709 else:
710 710 b2partsgenorder.insert(idx, stepname)
711 711 return func
712 712 return dec
713 713
714 714 def _pushb2ctxcheckheads(pushop, bundler):
715 715 """Generate race condition checking parts
716 716
717 717 Exists as an independent function to aid extensions
718 718 """
719 719 # * 'force' do not check for push race,
720 720 # * if we don't push anything, there are nothing to check.
721 721 if not pushop.force and pushop.outgoing.missingheads:
722 722 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
723 723 emptyremote = pushop.pushbranchmap is None
724 724 if not allowunrelated or emptyremote:
725 725 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
726 726 else:
727 727 affected = set()
728 728 for branch, heads in pushop.pushbranchmap.iteritems():
729 729 remoteheads, newheads, unsyncedheads, discardedheads = heads
730 730 if remoteheads is not None:
731 731 remote = set(remoteheads)
732 732 affected |= set(discardedheads) & remote
733 733 affected |= remote - set(newheads)
734 734 if affected:
735 735 data = iter(sorted(affected))
736 736 bundler.newpart('check:updated-heads', data=data)
737 737
738 738 def _pushing(pushop):
739 739 """return True if we are pushing anything"""
740 740 return bool(pushop.outgoing.missing
741 741 or pushop.outdatedphases
742 742 or pushop.outobsmarkers
743 743 or pushop.outbookmarks)
744 744
745 745 @b2partsgenerator('check-phases')
746 746 def _pushb2checkphases(pushop, bundler):
747 747 """insert phase move checking"""
748 748 if not _pushing(pushop) or pushop.force:
749 749 return
750 750 b2caps = bundle2.bundle2caps(pushop.remote)
751 751 hasphaseheads = 'heads' in b2caps.get('phases', ())
752 752 if pushop.remotephases is not None and hasphaseheads:
753 753 # check that the remote phase has not changed
754 754 checks = [[] for p in phases.allphases]
755 755 checks[phases.public].extend(pushop.remotephases.publicheads)
756 756 checks[phases.draft].extend(pushop.remotephases.draftroots)
757 757 if any(checks):
758 758 for nodes in checks:
759 759 nodes.sort()
760 760 checkdata = phases.binaryencode(checks)
761 761 bundler.newpart('check:phases', data=checkdata)
762 762
763 763 @b2partsgenerator('changeset')
764 764 def _pushb2ctx(pushop, bundler):
765 765 """handle changegroup push through bundle2
766 766
767 767 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
768 768 """
769 769 if 'changesets' in pushop.stepsdone:
770 770 return
771 771 pushop.stepsdone.add('changesets')
772 772 # Send known heads to the server for race detection.
773 773 if not _pushcheckoutgoing(pushop):
774 774 return
775 775 pushop.repo.prepushoutgoinghooks(pushop)
776 776
777 777 _pushb2ctxcheckheads(pushop, bundler)
778 778
779 779 b2caps = bundle2.bundle2caps(pushop.remote)
780 780 version = '01'
781 781 cgversions = b2caps.get('changegroup')
782 782 if cgversions: # 3.1 and 3.2 ship with an empty value
783 783 cgversions = [v for v in cgversions
784 784 if v in changegroup.supportedoutgoingversions(
785 785 pushop.repo)]
786 786 if not cgversions:
787 787 raise ValueError(_('no common changegroup version'))
788 788 version = max(cgversions)
789 789 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
790 790 'push')
791 791 cgpart = bundler.newpart('changegroup', data=cgstream)
792 792 if cgversions:
793 793 cgpart.addparam('version', version)
794 794 if 'treemanifest' in pushop.repo.requirements:
795 795 cgpart.addparam('treemanifest', '1')
796 796 def handlereply(op):
797 797 """extract addchangegroup returns from server reply"""
798 798 cgreplies = op.records.getreplies(cgpart.id)
799 799 assert len(cgreplies['changegroup']) == 1
800 800 pushop.cgresult = cgreplies['changegroup'][0]['return']
801 801 return handlereply
802 802
803 803 @b2partsgenerator('phase')
804 804 def _pushb2phases(pushop, bundler):
805 805 """handle phase push through bundle2"""
806 806 if 'phases' in pushop.stepsdone:
807 807 return
808 808 b2caps = bundle2.bundle2caps(pushop.remote)
809 if not 'pushkey' in b2caps:
810 return
809 if 'pushkey' in b2caps:
810 _pushb2phasespushkey(pushop, bundler)
811
812 def _pushb2phasespushkey(pushop, bundler):
813 """push phase information through a bundle2 - pushkey part"""
811 814 pushop.stepsdone.add('phases')
812 815 part2node = []
813 816
814 817 def handlefailure(pushop, exc):
815 818 targetid = int(exc.partid)
816 819 for partid, node in part2node:
817 820 if partid == targetid:
818 821 raise error.Abort(_('updating %s to public failed') % node)
819 822
820 823 enc = pushkey.encode
821 824 for newremotehead in pushop.outdatedphases:
822 825 part = bundler.newpart('pushkey')
823 826 part.addparam('namespace', enc('phases'))
824 827 part.addparam('key', enc(newremotehead.hex()))
825 828 part.addparam('old', enc('%d' % phases.draft))
826 829 part.addparam('new', enc('%d' % phases.public))
827 830 part2node.append((part.id, newremotehead))
828 831 pushop.pkfailcb[part.id] = handlefailure
829 832
830 833 def handlereply(op):
831 834 for partid, node in part2node:
832 835 partrep = op.records.getreplies(partid)
833 836 results = partrep['pushkey']
834 837 assert len(results) <= 1
835 838 msg = None
836 839 if not results:
837 840 msg = _('server ignored update of %s to public!\n') % node
838 841 elif not int(results[0]['return']):
839 842 msg = _('updating %s to public failed!\n') % node
840 843 if msg is not None:
841 844 pushop.ui.warn(msg)
842 845 return handlereply
843 846
844 847 @b2partsgenerator('obsmarkers')
845 848 def _pushb2obsmarkers(pushop, bundler):
846 849 if 'obsmarkers' in pushop.stepsdone:
847 850 return
848 851 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
849 852 if obsolete.commonversion(remoteversions) is None:
850 853 return
851 854 pushop.stepsdone.add('obsmarkers')
852 855 if pushop.outobsmarkers:
853 856 markers = sorted(pushop.outobsmarkers)
854 857 bundle2.buildobsmarkerspart(bundler, markers)
855 858
856 859 @b2partsgenerator('bookmarks')
857 860 def _pushb2bookmarks(pushop, bundler):
858 861 """handle bookmark push through bundle2"""
859 862 if 'bookmarks' in pushop.stepsdone:
860 863 return
861 864 b2caps = bundle2.bundle2caps(pushop.remote)
862 865 if 'pushkey' not in b2caps:
863 866 return
864 867 pushop.stepsdone.add('bookmarks')
865 868 part2book = []
866 869 enc = pushkey.encode
867 870
868 871 def handlefailure(pushop, exc):
869 872 targetid = int(exc.partid)
870 873 for partid, book, action in part2book:
871 874 if partid == targetid:
872 875 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
873 876 # we should not be called for part we did not generated
874 877 assert False
875 878
876 879 for book, old, new in pushop.outbookmarks:
877 880 part = bundler.newpart('pushkey')
878 881 part.addparam('namespace', enc('bookmarks'))
879 882 part.addparam('key', enc(book))
880 883 part.addparam('old', enc(old))
881 884 part.addparam('new', enc(new))
882 885 action = 'update'
883 886 if not old:
884 887 action = 'export'
885 888 elif not new:
886 889 action = 'delete'
887 890 part2book.append((part.id, book, action))
888 891 pushop.pkfailcb[part.id] = handlefailure
889 892
890 893 def handlereply(op):
891 894 ui = pushop.ui
892 895 for partid, book, action in part2book:
893 896 partrep = op.records.getreplies(partid)
894 897 results = partrep['pushkey']
895 898 assert len(results) <= 1
896 899 if not results:
897 900 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
898 901 else:
899 902 ret = int(results[0]['return'])
900 903 if ret:
901 904 ui.status(bookmsgmap[action][0] % book)
902 905 else:
903 906 ui.warn(bookmsgmap[action][1] % book)
904 907 if pushop.bkresult is not None:
905 908 pushop.bkresult = 1
906 909 return handlereply
907 910
908 911 @b2partsgenerator('pushvars', idx=0)
909 912 def _getbundlesendvars(pushop, bundler):
910 913 '''send shellvars via bundle2'''
911 914 pushvars = pushop.pushvars
912 915 if pushvars:
913 916 shellvars = {}
914 917 for raw in pushvars:
915 918 if '=' not in raw:
916 919 msg = ("unable to parse variable '%s', should follow "
917 920 "'KEY=VALUE' or 'KEY=' format")
918 921 raise error.Abort(msg % raw)
919 922 k, v = raw.split('=', 1)
920 923 shellvars[k] = v
921 924
922 925 part = bundler.newpart('pushvars')
923 926
924 927 for key, value in shellvars.iteritems():
925 928 part.addparam(key, value, mandatory=False)
926 929
927 930 def _pushbundle2(pushop):
928 931 """push data to the remote using bundle2
929 932
930 933 The only currently supported type of data is changegroup but this will
931 934 evolve in the future."""
932 935 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
933 936 pushback = (pushop.trmanager
934 937 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
935 938
936 939 # create reply capability
937 940 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
938 941 allowpushback=pushback))
939 942 bundler.newpart('replycaps', data=capsblob)
940 943 replyhandlers = []
941 944 for partgenname in b2partsgenorder:
942 945 partgen = b2partsgenmapping[partgenname]
943 946 ret = partgen(pushop, bundler)
944 947 if callable(ret):
945 948 replyhandlers.append(ret)
946 949 # do not push if nothing to push
947 950 if bundler.nbparts <= 1:
948 951 return
949 952 stream = util.chunkbuffer(bundler.getchunks())
950 953 try:
951 954 try:
952 955 reply = pushop.remote.unbundle(
953 956 stream, ['force'], pushop.remote.url())
954 957 except error.BundleValueError as exc:
955 958 raise error.Abort(_('missing support for %s') % exc)
956 959 try:
957 960 trgetter = None
958 961 if pushback:
959 962 trgetter = pushop.trmanager.transaction
960 963 op = bundle2.processbundle(pushop.repo, reply, trgetter)
961 964 except error.BundleValueError as exc:
962 965 raise error.Abort(_('missing support for %s') % exc)
963 966 except bundle2.AbortFromPart as exc:
964 967 pushop.ui.status(_('remote: %s\n') % exc)
965 968 if exc.hint is not None:
966 969 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
967 970 raise error.Abort(_('push failed on remote'))
968 971 except error.PushkeyFailed as exc:
969 972 partid = int(exc.partid)
970 973 if partid not in pushop.pkfailcb:
971 974 raise
972 975 pushop.pkfailcb[partid](pushop, exc)
973 976 for rephand in replyhandlers:
974 977 rephand(op)
975 978
976 979 def _pushchangeset(pushop):
977 980 """Make the actual push of changeset bundle to remote repo"""
978 981 if 'changesets' in pushop.stepsdone:
979 982 return
980 983 pushop.stepsdone.add('changesets')
981 984 if not _pushcheckoutgoing(pushop):
982 985 return
983 986
984 987 # Should have verified this in push().
985 988 assert pushop.remote.capable('unbundle')
986 989
987 990 pushop.repo.prepushoutgoinghooks(pushop)
988 991 outgoing = pushop.outgoing
989 992 # TODO: get bundlecaps from remote
990 993 bundlecaps = None
991 994 # create a changegroup from local
992 995 if pushop.revs is None and not (outgoing.excluded
993 996 or pushop.repo.changelog.filteredrevs):
994 997 # push everything,
995 998 # use the fast path, no race possible on push
996 999 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
997 1000 fastpath=True, bundlecaps=bundlecaps)
998 1001 else:
999 1002 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1000 1003 'push', bundlecaps=bundlecaps)
1001 1004
1002 1005 # apply changegroup to remote
1003 1006 # local repo finds heads on server, finds out what
1004 1007 # revs it must push. once revs transferred, if server
1005 1008 # finds it has different heads (someone else won
1006 1009 # commit/push race), server aborts.
1007 1010 if pushop.force:
1008 1011 remoteheads = ['force']
1009 1012 else:
1010 1013 remoteheads = pushop.remoteheads
1011 1014 # ssh: return remote's addchangegroup()
1012 1015 # http: return remote's addchangegroup() or 0 for error
1013 1016 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1014 1017 pushop.repo.url())
1015 1018
1016 1019 def _pushsyncphase(pushop):
1017 1020 """synchronise phase information locally and remotely"""
1018 1021 cheads = pushop.commonheads
1019 1022 # even when we don't push, exchanging phase data is useful
1020 1023 remotephases = pushop.remote.listkeys('phases')
1021 1024 if (pushop.ui.configbool('ui', '_usedassubrepo')
1022 1025 and remotephases # server supports phases
1023 1026 and pushop.cgresult is None # nothing was pushed
1024 1027 and remotephases.get('publishing', False)):
1025 1028 # When:
1026 1029 # - this is a subrepo push
1027 1030 # - and remote support phase
1028 1031 # - and no changeset was pushed
1029 1032 # - and remote is publishing
1030 1033 # We may be in issue 3871 case!
1031 1034 # We drop the possible phase synchronisation done by
1032 1035 # courtesy to publish changesets possibly locally draft
1033 1036 # on the remote.
1034 1037 remotephases = {'publishing': 'True'}
1035 1038 if not remotephases: # old server or public only reply from non-publishing
1036 1039 _localphasemove(pushop, cheads)
1037 1040 # don't push any phase data as there is nothing to push
1038 1041 else:
1039 1042 ana = phases.analyzeremotephases(pushop.repo, cheads,
1040 1043 remotephases)
1041 1044 pheads, droots = ana
1042 1045 ### Apply remote phase on local
1043 1046 if remotephases.get('publishing', False):
1044 1047 _localphasemove(pushop, cheads)
1045 1048 else: # publish = False
1046 1049 _localphasemove(pushop, pheads)
1047 1050 _localphasemove(pushop, cheads, phases.draft)
1048 1051 ### Apply local phase on remote
1049 1052
1050 1053 if pushop.cgresult:
1051 1054 if 'phases' in pushop.stepsdone:
1052 1055 # phases already pushed though bundle2
1053 1056 return
1054 1057 outdated = pushop.outdatedphases
1055 1058 else:
1056 1059 outdated = pushop.fallbackoutdatedphases
1057 1060
1058 1061 pushop.stepsdone.add('phases')
1059 1062
1060 1063 # filter heads already turned public by the push
1061 1064 outdated = [c for c in outdated if c.node() not in pheads]
1062 1065 # fallback to independent pushkey command
1063 1066 for newremotehead in outdated:
1064 1067 r = pushop.remote.pushkey('phases',
1065 1068 newremotehead.hex(),
1066 1069 str(phases.draft),
1067 1070 str(phases.public))
1068 1071 if not r:
1069 1072 pushop.ui.warn(_('updating %s to public failed!\n')
1070 1073 % newremotehead)
1071 1074
1072 1075 def _localphasemove(pushop, nodes, phase=phases.public):
1073 1076 """move <nodes> to <phase> in the local source repo"""
1074 1077 if pushop.trmanager:
1075 1078 phases.advanceboundary(pushop.repo,
1076 1079 pushop.trmanager.transaction(),
1077 1080 phase,
1078 1081 nodes)
1079 1082 else:
1080 1083 # repo is not locked, do not change any phases!
1081 1084 # Informs the user that phases should have been moved when
1082 1085 # applicable.
1083 1086 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1084 1087 phasestr = phases.phasenames[phase]
1085 1088 if actualmoves:
1086 1089 pushop.ui.status(_('cannot lock source repo, skipping '
1087 1090 'local %s phase update\n') % phasestr)
1088 1091
1089 1092 def _pushobsolete(pushop):
1090 1093 """utility function to push obsolete markers to a remote"""
1091 1094 if 'obsmarkers' in pushop.stepsdone:
1092 1095 return
1093 1096 repo = pushop.repo
1094 1097 remote = pushop.remote
1095 1098 pushop.stepsdone.add('obsmarkers')
1096 1099 if pushop.outobsmarkers:
1097 1100 pushop.ui.debug('try to push obsolete markers to remote\n')
1098 1101 rslts = []
1099 1102 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1100 1103 for key in sorted(remotedata, reverse=True):
1101 1104 # reverse sort to ensure we end with dump0
1102 1105 data = remotedata[key]
1103 1106 rslts.append(remote.pushkey('obsolete', key, '', data))
1104 1107 if [r for r in rslts if not r]:
1105 1108 msg = _('failed to push some obsolete markers!\n')
1106 1109 repo.ui.warn(msg)
1107 1110
1108 1111 def _pushbookmark(pushop):
1109 1112 """Update bookmark position on remote"""
1110 1113 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1111 1114 return
1112 1115 pushop.stepsdone.add('bookmarks')
1113 1116 ui = pushop.ui
1114 1117 remote = pushop.remote
1115 1118
1116 1119 for b, old, new in pushop.outbookmarks:
1117 1120 action = 'update'
1118 1121 if not old:
1119 1122 action = 'export'
1120 1123 elif not new:
1121 1124 action = 'delete'
1122 1125 if remote.pushkey('bookmarks', b, old, new):
1123 1126 ui.status(bookmsgmap[action][0] % b)
1124 1127 else:
1125 1128 ui.warn(bookmsgmap[action][1] % b)
1126 1129 # discovery can have set the value form invalid entry
1127 1130 if pushop.bkresult is not None:
1128 1131 pushop.bkresult = 1
1129 1132
1130 1133 class pulloperation(object):
1131 1134 """A object that represent a single pull operation
1132 1135
1133 1136 It purpose is to carry pull related state and very common operation.
1134 1137
1135 1138 A new should be created at the beginning of each pull and discarded
1136 1139 afterward.
1137 1140 """
1138 1141
1139 1142 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1140 1143 remotebookmarks=None, streamclonerequested=None):
1141 1144 # repo we pull into
1142 1145 self.repo = repo
1143 1146 # repo we pull from
1144 1147 self.remote = remote
1145 1148 # revision we try to pull (None is "all")
1146 1149 self.heads = heads
1147 1150 # bookmark pulled explicitly
1148 1151 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1149 1152 for bookmark in bookmarks]
1150 1153 # do we force pull?
1151 1154 self.force = force
1152 1155 # whether a streaming clone was requested
1153 1156 self.streamclonerequested = streamclonerequested
1154 1157 # transaction manager
1155 1158 self.trmanager = None
1156 1159 # set of common changeset between local and remote before pull
1157 1160 self.common = None
1158 1161 # set of pulled head
1159 1162 self.rheads = None
1160 1163 # list of missing changeset to fetch remotely
1161 1164 self.fetch = None
1162 1165 # remote bookmarks data
1163 1166 self.remotebookmarks = remotebookmarks
1164 1167 # result of changegroup pulling (used as return code by pull)
1165 1168 self.cgresult = None
1166 1169 # list of step already done
1167 1170 self.stepsdone = set()
1168 1171 # Whether we attempted a clone from pre-generated bundles.
1169 1172 self.clonebundleattempted = False
1170 1173
1171 1174 @util.propertycache
1172 1175 def pulledsubset(self):
1173 1176 """heads of the set of changeset target by the pull"""
1174 1177 # compute target subset
1175 1178 if self.heads is None:
1176 1179 # We pulled every thing possible
1177 1180 # sync on everything common
1178 1181 c = set(self.common)
1179 1182 ret = list(self.common)
1180 1183 for n in self.rheads:
1181 1184 if n not in c:
1182 1185 ret.append(n)
1183 1186 return ret
1184 1187 else:
1185 1188 # We pulled a specific subset
1186 1189 # sync on this subset
1187 1190 return self.heads
1188 1191
1189 1192 @util.propertycache
1190 1193 def canusebundle2(self):
1191 1194 return not _forcebundle1(self)
1192 1195
1193 1196 @util.propertycache
1194 1197 def remotebundle2caps(self):
1195 1198 return bundle2.bundle2caps(self.remote)
1196 1199
1197 1200 def gettransaction(self):
1198 1201 # deprecated; talk to trmanager directly
1199 1202 return self.trmanager.transaction()
1200 1203
1201 1204 class transactionmanager(util.transactional):
1202 1205 """An object to manage the life cycle of a transaction
1203 1206
1204 1207 It creates the transaction on demand and calls the appropriate hooks when
1205 1208 closing the transaction."""
1206 1209 def __init__(self, repo, source, url):
1207 1210 self.repo = repo
1208 1211 self.source = source
1209 1212 self.url = url
1210 1213 self._tr = None
1211 1214
1212 1215 def transaction(self):
1213 1216 """Return an open transaction object, constructing if necessary"""
1214 1217 if not self._tr:
1215 1218 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1216 1219 self._tr = self.repo.transaction(trname)
1217 1220 self._tr.hookargs['source'] = self.source
1218 1221 self._tr.hookargs['url'] = self.url
1219 1222 return self._tr
1220 1223
1221 1224 def close(self):
1222 1225 """close transaction if created"""
1223 1226 if self._tr is not None:
1224 1227 self._tr.close()
1225 1228
1226 1229 def release(self):
1227 1230 """release transaction if created"""
1228 1231 if self._tr is not None:
1229 1232 self._tr.release()
1230 1233
1231 1234 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1232 1235 streamclonerequested=None):
1233 1236 """Fetch repository data from a remote.
1234 1237
1235 1238 This is the main function used to retrieve data from a remote repository.
1236 1239
1237 1240 ``repo`` is the local repository to clone into.
1238 1241 ``remote`` is a peer instance.
1239 1242 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1240 1243 default) means to pull everything from the remote.
1241 1244 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1242 1245 default, all remote bookmarks are pulled.
1243 1246 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1244 1247 initialization.
1245 1248 ``streamclonerequested`` is a boolean indicating whether a "streaming
1246 1249 clone" is requested. A "streaming clone" is essentially a raw file copy
1247 1250 of revlogs from the server. This only works when the local repository is
1248 1251 empty. The default value of ``None`` means to respect the server
1249 1252 configuration for preferring stream clones.
1250 1253
1251 1254 Returns the ``pulloperation`` created for this pull.
1252 1255 """
1253 1256 if opargs is None:
1254 1257 opargs = {}
1255 1258 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1256 1259 streamclonerequested=streamclonerequested, **opargs)
1257 1260
1258 1261 peerlocal = pullop.remote.local()
1259 1262 if peerlocal:
1260 1263 missing = set(peerlocal.requirements) - pullop.repo.supported
1261 1264 if missing:
1262 1265 msg = _("required features are not"
1263 1266 " supported in the destination:"
1264 1267 " %s") % (', '.join(sorted(missing)))
1265 1268 raise error.Abort(msg)
1266 1269
1267 1270 wlock = lock = None
1268 1271 try:
1269 1272 wlock = pullop.repo.wlock()
1270 1273 lock = pullop.repo.lock()
1271 1274 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1272 1275 # This should ideally be in _pullbundle2(). However, it needs to run
1273 1276 # before discovery to avoid extra work.
1274 1277 _maybeapplyclonebundle(pullop)
1275 1278 streamclone.maybeperformlegacystreamclone(pullop)
1276 1279 _pulldiscovery(pullop)
1277 1280 if pullop.canusebundle2:
1278 1281 _pullbundle2(pullop)
1279 1282 _pullchangeset(pullop)
1280 1283 _pullphase(pullop)
1281 1284 _pullbookmarks(pullop)
1282 1285 _pullobsolete(pullop)
1283 1286 pullop.trmanager.close()
1284 1287 finally:
1285 1288 lockmod.release(pullop.trmanager, lock, wlock)
1286 1289
1287 1290 return pullop
1288 1291
1289 1292 # list of steps to perform discovery before pull
1290 1293 pulldiscoveryorder = []
1291 1294
1292 1295 # Mapping between step name and function
1293 1296 #
1294 1297 # This exists to help extensions wrap steps if necessary
1295 1298 pulldiscoverymapping = {}
1296 1299
1297 1300 def pulldiscovery(stepname):
1298 1301 """decorator for function performing discovery before pull
1299 1302
1300 1303 The function is added to the step -> function mapping and appended to the
1301 1304 list of steps. Beware that decorated function will be added in order (this
1302 1305 may matter).
1303 1306
1304 1307 You can only use this decorator for a new step, if you want to wrap a step
1305 1308 from an extension, change the pulldiscovery dictionary directly."""
1306 1309 def dec(func):
1307 1310 assert stepname not in pulldiscoverymapping
1308 1311 pulldiscoverymapping[stepname] = func
1309 1312 pulldiscoveryorder.append(stepname)
1310 1313 return func
1311 1314 return dec
1312 1315
1313 1316 def _pulldiscovery(pullop):
1314 1317 """Run all discovery steps"""
1315 1318 for stepname in pulldiscoveryorder:
1316 1319 step = pulldiscoverymapping[stepname]
1317 1320 step(pullop)
1318 1321
1319 1322 @pulldiscovery('b1:bookmarks')
1320 1323 def _pullbookmarkbundle1(pullop):
1321 1324 """fetch bookmark data in bundle1 case
1322 1325
1323 1326 If not using bundle2, we have to fetch bookmarks before changeset
1324 1327 discovery to reduce the chance and impact of race conditions."""
1325 1328 if pullop.remotebookmarks is not None:
1326 1329 return
1327 1330 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1328 1331 # all known bundle2 servers now support listkeys, but lets be nice with
1329 1332 # new implementation.
1330 1333 return
1331 1334 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1332 1335
1333 1336
1334 1337 @pulldiscovery('changegroup')
1335 1338 def _pulldiscoverychangegroup(pullop):
1336 1339 """discovery phase for the pull
1337 1340
1338 1341 Current handle changeset discovery only, will change handle all discovery
1339 1342 at some point."""
1340 1343 tmp = discovery.findcommonincoming(pullop.repo,
1341 1344 pullop.remote,
1342 1345 heads=pullop.heads,
1343 1346 force=pullop.force)
1344 1347 common, fetch, rheads = tmp
1345 1348 nm = pullop.repo.unfiltered().changelog.nodemap
1346 1349 if fetch and rheads:
1347 1350 # If a remote heads is filtered locally, put in back in common.
1348 1351 #
1349 1352 # This is a hackish solution to catch most of "common but locally
1350 1353 # hidden situation". We do not performs discovery on unfiltered
1351 1354 # repository because it end up doing a pathological amount of round
1352 1355 # trip for w huge amount of changeset we do not care about.
1353 1356 #
1354 1357 # If a set of such "common but filtered" changeset exist on the server
1355 1358 # but are not including a remote heads, we'll not be able to detect it,
1356 1359 scommon = set(common)
1357 1360 for n in rheads:
1358 1361 if n in nm:
1359 1362 if n not in scommon:
1360 1363 common.append(n)
1361 1364 if set(rheads).issubset(set(common)):
1362 1365 fetch = []
1363 1366 pullop.common = common
1364 1367 pullop.fetch = fetch
1365 1368 pullop.rheads = rheads
1366 1369
1367 1370 def _pullbundle2(pullop):
1368 1371 """pull data using bundle2
1369 1372
1370 1373 For now, the only supported data are changegroup."""
1371 1374 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1372 1375
1373 1376 # At the moment we don't do stream clones over bundle2. If that is
1374 1377 # implemented then here's where the check for that will go.
1375 1378 streaming = False
1376 1379
1377 1380 # pulling changegroup
1378 1381 pullop.stepsdone.add('changegroup')
1379 1382
1380 1383 kwargs['common'] = pullop.common
1381 1384 kwargs['heads'] = pullop.heads or pullop.rheads
1382 1385 kwargs['cg'] = pullop.fetch
1383 1386
1384 1387 ui = pullop.repo.ui
1385 1388 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1386 1389 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1387 1390 if (not legacyphase and hasbinaryphase):
1388 1391 kwargs['phases'] = True
1389 1392 pullop.stepsdone.add('phases')
1390 1393
1391 1394 if 'listkeys' in pullop.remotebundle2caps:
1392 1395 if 'phases' not in pullop.stepsdone:
1393 1396 kwargs['listkeys'] = ['phases']
1394 1397 if pullop.remotebookmarks is None:
1395 1398 # make sure to always includes bookmark data when migrating
1396 1399 # `hg incoming --bundle` to using this function.
1397 1400 kwargs.setdefault('listkeys', []).append('bookmarks')
1398 1401
1399 1402 # If this is a full pull / clone and the server supports the clone bundles
1400 1403 # feature, tell the server whether we attempted a clone bundle. The
1401 1404 # presence of this flag indicates the client supports clone bundles. This
1402 1405 # will enable the server to treat clients that support clone bundles
1403 1406 # differently from those that don't.
1404 1407 if (pullop.remote.capable('clonebundles')
1405 1408 and pullop.heads is None and list(pullop.common) == [nullid]):
1406 1409 kwargs['cbattempted'] = pullop.clonebundleattempted
1407 1410
1408 1411 if streaming:
1409 1412 pullop.repo.ui.status(_('streaming all changes\n'))
1410 1413 elif not pullop.fetch:
1411 1414 pullop.repo.ui.status(_("no changes found\n"))
1412 1415 pullop.cgresult = 0
1413 1416 else:
1414 1417 if pullop.heads is None and list(pullop.common) == [nullid]:
1415 1418 pullop.repo.ui.status(_("requesting all changes\n"))
1416 1419 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1417 1420 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1418 1421 if obsolete.commonversion(remoteversions) is not None:
1419 1422 kwargs['obsmarkers'] = True
1420 1423 pullop.stepsdone.add('obsmarkers')
1421 1424 _pullbundle2extraprepare(pullop, kwargs)
1422 1425 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1423 1426 try:
1424 1427 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1425 1428 except bundle2.AbortFromPart as exc:
1426 1429 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1427 1430 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1428 1431 except error.BundleValueError as exc:
1429 1432 raise error.Abort(_('missing support for %s') % exc)
1430 1433
1431 1434 if pullop.fetch:
1432 1435 pullop.cgresult = bundle2.combinechangegroupresults(op)
1433 1436
1434 1437 # processing phases change
1435 1438 for namespace, value in op.records['listkeys']:
1436 1439 if namespace == 'phases':
1437 1440 _pullapplyphases(pullop, value)
1438 1441
1439 1442 # processing bookmark update
1440 1443 for namespace, value in op.records['listkeys']:
1441 1444 if namespace == 'bookmarks':
1442 1445 pullop.remotebookmarks = value
1443 1446
1444 1447 # bookmark data were either already there or pulled in the bundle
1445 1448 if pullop.remotebookmarks is not None:
1446 1449 _pullbookmarks(pullop)
1447 1450
1448 1451 def _pullbundle2extraprepare(pullop, kwargs):
1449 1452 """hook function so that extensions can extend the getbundle call"""
1450 1453
1451 1454 def _pullchangeset(pullop):
1452 1455 """pull changeset from unbundle into the local repo"""
1453 1456 # We delay the open of the transaction as late as possible so we
1454 1457 # don't open transaction for nothing or you break future useful
1455 1458 # rollback call
1456 1459 if 'changegroup' in pullop.stepsdone:
1457 1460 return
1458 1461 pullop.stepsdone.add('changegroup')
1459 1462 if not pullop.fetch:
1460 1463 pullop.repo.ui.status(_("no changes found\n"))
1461 1464 pullop.cgresult = 0
1462 1465 return
1463 1466 tr = pullop.gettransaction()
1464 1467 if pullop.heads is None and list(pullop.common) == [nullid]:
1465 1468 pullop.repo.ui.status(_("requesting all changes\n"))
1466 1469 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1467 1470 # issue1320, avoid a race if remote changed after discovery
1468 1471 pullop.heads = pullop.rheads
1469 1472
1470 1473 if pullop.remote.capable('getbundle'):
1471 1474 # TODO: get bundlecaps from remote
1472 1475 cg = pullop.remote.getbundle('pull', common=pullop.common,
1473 1476 heads=pullop.heads or pullop.rheads)
1474 1477 elif pullop.heads is None:
1475 1478 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1476 1479 elif not pullop.remote.capable('changegroupsubset'):
1477 1480 raise error.Abort(_("partial pull cannot be done because "
1478 1481 "other repository doesn't support "
1479 1482 "changegroupsubset."))
1480 1483 else:
1481 1484 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1482 1485 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1483 1486 pullop.remote.url())
1484 1487 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1485 1488
1486 1489 def _pullphase(pullop):
1487 1490 # Get remote phases data from remote
1488 1491 if 'phases' in pullop.stepsdone:
1489 1492 return
1490 1493 remotephases = pullop.remote.listkeys('phases')
1491 1494 _pullapplyphases(pullop, remotephases)
1492 1495
1493 1496 def _pullapplyphases(pullop, remotephases):
1494 1497 """apply phase movement from observed remote state"""
1495 1498 if 'phases' in pullop.stepsdone:
1496 1499 return
1497 1500 pullop.stepsdone.add('phases')
1498 1501 publishing = bool(remotephases.get('publishing', False))
1499 1502 if remotephases and not publishing:
1500 1503 # remote is new and non-publishing
1501 1504 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1502 1505 pullop.pulledsubset,
1503 1506 remotephases)
1504 1507 dheads = pullop.pulledsubset
1505 1508 else:
1506 1509 # Remote is old or publishing all common changesets
1507 1510 # should be seen as public
1508 1511 pheads = pullop.pulledsubset
1509 1512 dheads = []
1510 1513 unfi = pullop.repo.unfiltered()
1511 1514 phase = unfi._phasecache.phase
1512 1515 rev = unfi.changelog.nodemap.get
1513 1516 public = phases.public
1514 1517 draft = phases.draft
1515 1518
1516 1519 # exclude changesets already public locally and update the others
1517 1520 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1518 1521 if pheads:
1519 1522 tr = pullop.gettransaction()
1520 1523 phases.advanceboundary(pullop.repo, tr, public, pheads)
1521 1524
1522 1525 # exclude changesets already draft locally and update the others
1523 1526 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1524 1527 if dheads:
1525 1528 tr = pullop.gettransaction()
1526 1529 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1527 1530
1528 1531 def _pullbookmarks(pullop):
1529 1532 """process the remote bookmark information to update the local one"""
1530 1533 if 'bookmarks' in pullop.stepsdone:
1531 1534 return
1532 1535 pullop.stepsdone.add('bookmarks')
1533 1536 repo = pullop.repo
1534 1537 remotebookmarks = pullop.remotebookmarks
1535 1538 remotebookmarks = bookmod.unhexlifybookmarks(remotebookmarks)
1536 1539 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1537 1540 pullop.remote.url(),
1538 1541 pullop.gettransaction,
1539 1542 explicit=pullop.explicitbookmarks)
1540 1543
1541 1544 def _pullobsolete(pullop):
1542 1545 """utility function to pull obsolete markers from a remote
1543 1546
1544 1547 The `gettransaction` is function that return the pull transaction, creating
1545 1548 one if necessary. We return the transaction to inform the calling code that
1546 1549 a new transaction have been created (when applicable).
1547 1550
1548 1551 Exists mostly to allow overriding for experimentation purpose"""
1549 1552 if 'obsmarkers' in pullop.stepsdone:
1550 1553 return
1551 1554 pullop.stepsdone.add('obsmarkers')
1552 1555 tr = None
1553 1556 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1554 1557 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1555 1558 remoteobs = pullop.remote.listkeys('obsolete')
1556 1559 if 'dump0' in remoteobs:
1557 1560 tr = pullop.gettransaction()
1558 1561 markers = []
1559 1562 for key in sorted(remoteobs, reverse=True):
1560 1563 if key.startswith('dump'):
1561 1564 data = util.b85decode(remoteobs[key])
1562 1565 version, newmarks = obsolete._readmarkers(data)
1563 1566 markers += newmarks
1564 1567 if markers:
1565 1568 pullop.repo.obsstore.add(tr, markers)
1566 1569 pullop.repo.invalidatevolatilesets()
1567 1570 return tr
1568 1571
1569 1572 def caps20to10(repo):
1570 1573 """return a set with appropriate options to use bundle20 during getbundle"""
1571 1574 caps = {'HG20'}
1572 1575 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1573 1576 caps.add('bundle2=' + urlreq.quote(capsblob))
1574 1577 return caps
1575 1578
1576 1579 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1577 1580 getbundle2partsorder = []
1578 1581
1579 1582 # Mapping between step name and function
1580 1583 #
1581 1584 # This exists to help extensions wrap steps if necessary
1582 1585 getbundle2partsmapping = {}
1583 1586
1584 1587 def getbundle2partsgenerator(stepname, idx=None):
1585 1588 """decorator for function generating bundle2 part for getbundle
1586 1589
1587 1590 The function is added to the step -> function mapping and appended to the
1588 1591 list of steps. Beware that decorated functions will be added in order
1589 1592 (this may matter).
1590 1593
1591 1594 You can only use this decorator for new steps, if you want to wrap a step
1592 1595 from an extension, attack the getbundle2partsmapping dictionary directly."""
1593 1596 def dec(func):
1594 1597 assert stepname not in getbundle2partsmapping
1595 1598 getbundle2partsmapping[stepname] = func
1596 1599 if idx is None:
1597 1600 getbundle2partsorder.append(stepname)
1598 1601 else:
1599 1602 getbundle2partsorder.insert(idx, stepname)
1600 1603 return func
1601 1604 return dec
1602 1605
1603 1606 def bundle2requested(bundlecaps):
1604 1607 if bundlecaps is not None:
1605 1608 return any(cap.startswith('HG2') for cap in bundlecaps)
1606 1609 return False
1607 1610
1608 1611 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1609 1612 **kwargs):
1610 1613 """Return chunks constituting a bundle's raw data.
1611 1614
1612 1615 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1613 1616 passed.
1614 1617
1615 1618 Returns an iterator over raw chunks (of varying sizes).
1616 1619 """
1617 1620 kwargs = pycompat.byteskwargs(kwargs)
1618 1621 usebundle2 = bundle2requested(bundlecaps)
1619 1622 # bundle10 case
1620 1623 if not usebundle2:
1621 1624 if bundlecaps and not kwargs.get('cg', True):
1622 1625 raise ValueError(_('request for bundle10 must include changegroup'))
1623 1626
1624 1627 if kwargs:
1625 1628 raise ValueError(_('unsupported getbundle arguments: %s')
1626 1629 % ', '.join(sorted(kwargs.keys())))
1627 1630 outgoing = _computeoutgoing(repo, heads, common)
1628 1631 return changegroup.makestream(repo, outgoing, '01', source,
1629 1632 bundlecaps=bundlecaps)
1630 1633
1631 1634 # bundle20 case
1632 1635 b2caps = {}
1633 1636 for bcaps in bundlecaps:
1634 1637 if bcaps.startswith('bundle2='):
1635 1638 blob = urlreq.unquote(bcaps[len('bundle2='):])
1636 1639 b2caps.update(bundle2.decodecaps(blob))
1637 1640 bundler = bundle2.bundle20(repo.ui, b2caps)
1638 1641
1639 1642 kwargs['heads'] = heads
1640 1643 kwargs['common'] = common
1641 1644
1642 1645 for name in getbundle2partsorder:
1643 1646 func = getbundle2partsmapping[name]
1644 1647 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1645 1648 **pycompat.strkwargs(kwargs))
1646 1649
1647 1650 return bundler.getchunks()
1648 1651
1649 1652 @getbundle2partsgenerator('changegroup')
1650 1653 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1651 1654 b2caps=None, heads=None, common=None, **kwargs):
1652 1655 """add a changegroup part to the requested bundle"""
1653 1656 cgstream = None
1654 1657 if kwargs.get('cg', True):
1655 1658 # build changegroup bundle here.
1656 1659 version = '01'
1657 1660 cgversions = b2caps.get('changegroup')
1658 1661 if cgversions: # 3.1 and 3.2 ship with an empty value
1659 1662 cgversions = [v for v in cgversions
1660 1663 if v in changegroup.supportedoutgoingversions(repo)]
1661 1664 if not cgversions:
1662 1665 raise ValueError(_('no common changegroup version'))
1663 1666 version = max(cgversions)
1664 1667 outgoing = _computeoutgoing(repo, heads, common)
1665 1668 if outgoing.missing:
1666 1669 cgstream = changegroup.makestream(repo, outgoing, version, source,
1667 1670 bundlecaps=bundlecaps)
1668 1671
1669 1672 if cgstream:
1670 1673 part = bundler.newpart('changegroup', data=cgstream)
1671 1674 if cgversions:
1672 1675 part.addparam('version', version)
1673 1676 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1674 1677 mandatory=False)
1675 1678 if 'treemanifest' in repo.requirements:
1676 1679 part.addparam('treemanifest', '1')
1677 1680
1678 1681 @getbundle2partsgenerator('listkeys')
1679 1682 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1680 1683 b2caps=None, **kwargs):
1681 1684 """add parts containing listkeys namespaces to the requested bundle"""
1682 1685 listkeys = kwargs.get('listkeys', ())
1683 1686 for namespace in listkeys:
1684 1687 part = bundler.newpart('listkeys')
1685 1688 part.addparam('namespace', namespace)
1686 1689 keys = repo.listkeys(namespace).items()
1687 1690 part.data = pushkey.encodekeys(keys)
1688 1691
1689 1692 @getbundle2partsgenerator('obsmarkers')
1690 1693 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1691 1694 b2caps=None, heads=None, **kwargs):
1692 1695 """add an obsolescence markers part to the requested bundle"""
1693 1696 if kwargs.get('obsmarkers', False):
1694 1697 if heads is None:
1695 1698 heads = repo.heads()
1696 1699 subset = [c.node() for c in repo.set('::%ln', heads)]
1697 1700 markers = repo.obsstore.relevantmarkers(subset)
1698 1701 markers = sorted(markers)
1699 1702 bundle2.buildobsmarkerspart(bundler, markers)
1700 1703
1701 1704 @getbundle2partsgenerator('phases')
1702 1705 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1703 1706 b2caps=None, heads=None, **kwargs):
1704 1707 """add phase heads part to the requested bundle"""
1705 1708 if kwargs.get('phases', False):
1706 1709 if not 'heads' in b2caps.get('phases'):
1707 1710 raise ValueError(_('no common phases exchange method'))
1708 1711 if heads is None:
1709 1712 heads = repo.heads()
1710 1713
1711 1714 headsbyphase = collections.defaultdict(set)
1712 1715 if repo.publishing():
1713 1716 headsbyphase[phases.public] = heads
1714 1717 else:
1715 1718 # find the appropriate heads to move
1716 1719
1717 1720 phase = repo._phasecache.phase
1718 1721 node = repo.changelog.node
1719 1722 rev = repo.changelog.rev
1720 1723 for h in heads:
1721 1724 headsbyphase[phase(repo, rev(h))].add(h)
1722 1725 seenphases = list(headsbyphase.keys())
1723 1726
1724 1727 # We do not handle anything but public and draft phase for now)
1725 1728 if seenphases:
1726 1729 assert max(seenphases) <= phases.draft
1727 1730
1728 1731 # if client is pulling non-public changesets, we need to find
1729 1732 # intermediate public heads.
1730 1733 draftheads = headsbyphase.get(phases.draft, set())
1731 1734 if draftheads:
1732 1735 publicheads = headsbyphase.get(phases.public, set())
1733 1736
1734 1737 revset = 'heads(only(%ln, %ln) and public())'
1735 1738 extraheads = repo.revs(revset, draftheads, publicheads)
1736 1739 for r in extraheads:
1737 1740 headsbyphase[phases.public].add(node(r))
1738 1741
1739 1742 # transform data in a format used by the encoding function
1740 1743 phasemapping = []
1741 1744 for phase in phases.allphases:
1742 1745 phasemapping.append(sorted(headsbyphase[phase]))
1743 1746
1744 1747 # generate the actual part
1745 1748 phasedata = phases.binaryencode(phasemapping)
1746 1749 bundler.newpart('phase-heads', data=phasedata)
1747 1750
1748 1751 @getbundle2partsgenerator('hgtagsfnodes')
1749 1752 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1750 1753 b2caps=None, heads=None, common=None,
1751 1754 **kwargs):
1752 1755 """Transfer the .hgtags filenodes mapping.
1753 1756
1754 1757 Only values for heads in this bundle will be transferred.
1755 1758
1756 1759 The part data consists of pairs of 20 byte changeset node and .hgtags
1757 1760 filenodes raw values.
1758 1761 """
1759 1762 # Don't send unless:
1760 1763 # - changeset are being exchanged,
1761 1764 # - the client supports it.
1762 1765 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1763 1766 return
1764 1767
1765 1768 outgoing = _computeoutgoing(repo, heads, common)
1766 1769 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1767 1770
1768 1771 def _getbookmarks(repo, **kwargs):
1769 1772 """Returns bookmark to node mapping.
1770 1773
1771 1774 This function is primarily used to generate `bookmarks` bundle2 part.
1772 1775 It is a separate function in order to make it easy to wrap it
1773 1776 in extensions. Passing `kwargs` to the function makes it easy to
1774 1777 add new parameters in extensions.
1775 1778 """
1776 1779
1777 1780 return dict(bookmod.listbinbookmarks(repo))
1778 1781
1779 1782 def check_heads(repo, their_heads, context):
1780 1783 """check if the heads of a repo have been modified
1781 1784
1782 1785 Used by peer for unbundling.
1783 1786 """
1784 1787 heads = repo.heads()
1785 1788 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1786 1789 if not (their_heads == ['force'] or their_heads == heads or
1787 1790 their_heads == ['hashed', heads_hash]):
1788 1791 # someone else committed/pushed/unbundled while we
1789 1792 # were transferring data
1790 1793 raise error.PushRaced('repository changed while %s - '
1791 1794 'please try again' % context)
1792 1795
1793 1796 def unbundle(repo, cg, heads, source, url):
1794 1797 """Apply a bundle to a repo.
1795 1798
1796 1799 this function makes sure the repo is locked during the application and have
1797 1800 mechanism to check that no push race occurred between the creation of the
1798 1801 bundle and its application.
1799 1802
1800 1803 If the push was raced as PushRaced exception is raised."""
1801 1804 r = 0
1802 1805 # need a transaction when processing a bundle2 stream
1803 1806 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1804 1807 lockandtr = [None, None, None]
1805 1808 recordout = None
1806 1809 # quick fix for output mismatch with bundle2 in 3.4
1807 1810 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
1808 1811 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1809 1812 captureoutput = True
1810 1813 try:
1811 1814 # note: outside bundle1, 'heads' is expected to be empty and this
1812 1815 # 'check_heads' call wil be a no-op
1813 1816 check_heads(repo, heads, 'uploading changes')
1814 1817 # push can proceed
1815 1818 if not isinstance(cg, bundle2.unbundle20):
1816 1819 # legacy case: bundle1 (changegroup 01)
1817 1820 txnname = "\n".join([source, util.hidepassword(url)])
1818 1821 with repo.lock(), repo.transaction(txnname) as tr:
1819 1822 op = bundle2.applybundle(repo, cg, tr, source, url)
1820 1823 r = bundle2.combinechangegroupresults(op)
1821 1824 else:
1822 1825 r = None
1823 1826 try:
1824 1827 def gettransaction():
1825 1828 if not lockandtr[2]:
1826 1829 lockandtr[0] = repo.wlock()
1827 1830 lockandtr[1] = repo.lock()
1828 1831 lockandtr[2] = repo.transaction(source)
1829 1832 lockandtr[2].hookargs['source'] = source
1830 1833 lockandtr[2].hookargs['url'] = url
1831 1834 lockandtr[2].hookargs['bundle2'] = '1'
1832 1835 return lockandtr[2]
1833 1836
1834 1837 # Do greedy locking by default until we're satisfied with lazy
1835 1838 # locking.
1836 1839 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1837 1840 gettransaction()
1838 1841
1839 1842 op = bundle2.bundleoperation(repo, gettransaction,
1840 1843 captureoutput=captureoutput)
1841 1844 try:
1842 1845 op = bundle2.processbundle(repo, cg, op=op)
1843 1846 finally:
1844 1847 r = op.reply
1845 1848 if captureoutput and r is not None:
1846 1849 repo.ui.pushbuffer(error=True, subproc=True)
1847 1850 def recordout(output):
1848 1851 r.newpart('output', data=output, mandatory=False)
1849 1852 if lockandtr[2] is not None:
1850 1853 lockandtr[2].close()
1851 1854 except BaseException as exc:
1852 1855 exc.duringunbundle2 = True
1853 1856 if captureoutput and r is not None:
1854 1857 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1855 1858 def recordout(output):
1856 1859 part = bundle2.bundlepart('output', data=output,
1857 1860 mandatory=False)
1858 1861 parts.append(part)
1859 1862 raise
1860 1863 finally:
1861 1864 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1862 1865 if recordout is not None:
1863 1866 recordout(repo.ui.popbuffer())
1864 1867 return r
1865 1868
1866 1869 def _maybeapplyclonebundle(pullop):
1867 1870 """Apply a clone bundle from a remote, if possible."""
1868 1871
1869 1872 repo = pullop.repo
1870 1873 remote = pullop.remote
1871 1874
1872 1875 if not repo.ui.configbool('ui', 'clonebundles'):
1873 1876 return
1874 1877
1875 1878 # Only run if local repo is empty.
1876 1879 if len(repo):
1877 1880 return
1878 1881
1879 1882 if pullop.heads:
1880 1883 return
1881 1884
1882 1885 if not remote.capable('clonebundles'):
1883 1886 return
1884 1887
1885 1888 res = remote._call('clonebundles')
1886 1889
1887 1890 # If we call the wire protocol command, that's good enough to record the
1888 1891 # attempt.
1889 1892 pullop.clonebundleattempted = True
1890 1893
1891 1894 entries = parseclonebundlesmanifest(repo, res)
1892 1895 if not entries:
1893 1896 repo.ui.note(_('no clone bundles available on remote; '
1894 1897 'falling back to regular clone\n'))
1895 1898 return
1896 1899
1897 1900 entries = filterclonebundleentries(
1898 1901 repo, entries, streamclonerequested=pullop.streamclonerequested)
1899 1902
1900 1903 if not entries:
1901 1904 # There is a thundering herd concern here. However, if a server
1902 1905 # operator doesn't advertise bundles appropriate for its clients,
1903 1906 # they deserve what's coming. Furthermore, from a client's
1904 1907 # perspective, no automatic fallback would mean not being able to
1905 1908 # clone!
1906 1909 repo.ui.warn(_('no compatible clone bundles available on server; '
1907 1910 'falling back to regular clone\n'))
1908 1911 repo.ui.warn(_('(you may want to report this to the server '
1909 1912 'operator)\n'))
1910 1913 return
1911 1914
1912 1915 entries = sortclonebundleentries(repo.ui, entries)
1913 1916
1914 1917 url = entries[0]['URL']
1915 1918 repo.ui.status(_('applying clone bundle from %s\n') % url)
1916 1919 if trypullbundlefromurl(repo.ui, repo, url):
1917 1920 repo.ui.status(_('finished applying clone bundle\n'))
1918 1921 # Bundle failed.
1919 1922 #
1920 1923 # We abort by default to avoid the thundering herd of
1921 1924 # clients flooding a server that was expecting expensive
1922 1925 # clone load to be offloaded.
1923 1926 elif repo.ui.configbool('ui', 'clonebundlefallback'):
1924 1927 repo.ui.warn(_('falling back to normal clone\n'))
1925 1928 else:
1926 1929 raise error.Abort(_('error applying bundle'),
1927 1930 hint=_('if this error persists, consider contacting '
1928 1931 'the server operator or disable clone '
1929 1932 'bundles via '
1930 1933 '"--config ui.clonebundles=false"'))
1931 1934
1932 1935 def parseclonebundlesmanifest(repo, s):
1933 1936 """Parses the raw text of a clone bundles manifest.
1934 1937
1935 1938 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1936 1939 to the URL and other keys are the attributes for the entry.
1937 1940 """
1938 1941 m = []
1939 1942 for line in s.splitlines():
1940 1943 fields = line.split()
1941 1944 if not fields:
1942 1945 continue
1943 1946 attrs = {'URL': fields[0]}
1944 1947 for rawattr in fields[1:]:
1945 1948 key, value = rawattr.split('=', 1)
1946 1949 key = urlreq.unquote(key)
1947 1950 value = urlreq.unquote(value)
1948 1951 attrs[key] = value
1949 1952
1950 1953 # Parse BUNDLESPEC into components. This makes client-side
1951 1954 # preferences easier to specify since you can prefer a single
1952 1955 # component of the BUNDLESPEC.
1953 1956 if key == 'BUNDLESPEC':
1954 1957 try:
1955 1958 comp, version, params = parsebundlespec(repo, value,
1956 1959 externalnames=True)
1957 1960 attrs['COMPRESSION'] = comp
1958 1961 attrs['VERSION'] = version
1959 1962 except error.InvalidBundleSpecification:
1960 1963 pass
1961 1964 except error.UnsupportedBundleSpecification:
1962 1965 pass
1963 1966
1964 1967 m.append(attrs)
1965 1968
1966 1969 return m
1967 1970
1968 1971 def filterclonebundleentries(repo, entries, streamclonerequested=False):
1969 1972 """Remove incompatible clone bundle manifest entries.
1970 1973
1971 1974 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1972 1975 and returns a new list consisting of only the entries that this client
1973 1976 should be able to apply.
1974 1977
1975 1978 There is no guarantee we'll be able to apply all returned entries because
1976 1979 the metadata we use to filter on may be missing or wrong.
1977 1980 """
1978 1981 newentries = []
1979 1982 for entry in entries:
1980 1983 spec = entry.get('BUNDLESPEC')
1981 1984 if spec:
1982 1985 try:
1983 1986 comp, version, params = parsebundlespec(repo, spec, strict=True)
1984 1987
1985 1988 # If a stream clone was requested, filter out non-streamclone
1986 1989 # entries.
1987 1990 if streamclonerequested and (comp != 'UN' or version != 's1'):
1988 1991 repo.ui.debug('filtering %s because not a stream clone\n' %
1989 1992 entry['URL'])
1990 1993 continue
1991 1994
1992 1995 except error.InvalidBundleSpecification as e:
1993 1996 repo.ui.debug(str(e) + '\n')
1994 1997 continue
1995 1998 except error.UnsupportedBundleSpecification as e:
1996 1999 repo.ui.debug('filtering %s because unsupported bundle '
1997 2000 'spec: %s\n' % (entry['URL'], str(e)))
1998 2001 continue
1999 2002 # If we don't have a spec and requested a stream clone, we don't know
2000 2003 # what the entry is so don't attempt to apply it.
2001 2004 elif streamclonerequested:
2002 2005 repo.ui.debug('filtering %s because cannot determine if a stream '
2003 2006 'clone bundle\n' % entry['URL'])
2004 2007 continue
2005 2008
2006 2009 if 'REQUIRESNI' in entry and not sslutil.hassni:
2007 2010 repo.ui.debug('filtering %s because SNI not supported\n' %
2008 2011 entry['URL'])
2009 2012 continue
2010 2013
2011 2014 newentries.append(entry)
2012 2015
2013 2016 return newentries
2014 2017
2015 2018 class clonebundleentry(object):
2016 2019 """Represents an item in a clone bundles manifest.
2017 2020
2018 2021 This rich class is needed to support sorting since sorted() in Python 3
2019 2022 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2020 2023 won't work.
2021 2024 """
2022 2025
2023 2026 def __init__(self, value, prefers):
2024 2027 self.value = value
2025 2028 self.prefers = prefers
2026 2029
2027 2030 def _cmp(self, other):
2028 2031 for prefkey, prefvalue in self.prefers:
2029 2032 avalue = self.value.get(prefkey)
2030 2033 bvalue = other.value.get(prefkey)
2031 2034
2032 2035 # Special case for b missing attribute and a matches exactly.
2033 2036 if avalue is not None and bvalue is None and avalue == prefvalue:
2034 2037 return -1
2035 2038
2036 2039 # Special case for a missing attribute and b matches exactly.
2037 2040 if bvalue is not None and avalue is None and bvalue == prefvalue:
2038 2041 return 1
2039 2042
2040 2043 # We can't compare unless attribute present on both.
2041 2044 if avalue is None or bvalue is None:
2042 2045 continue
2043 2046
2044 2047 # Same values should fall back to next attribute.
2045 2048 if avalue == bvalue:
2046 2049 continue
2047 2050
2048 2051 # Exact matches come first.
2049 2052 if avalue == prefvalue:
2050 2053 return -1
2051 2054 if bvalue == prefvalue:
2052 2055 return 1
2053 2056
2054 2057 # Fall back to next attribute.
2055 2058 continue
2056 2059
2057 2060 # If we got here we couldn't sort by attributes and prefers. Fall
2058 2061 # back to index order.
2059 2062 return 0
2060 2063
2061 2064 def __lt__(self, other):
2062 2065 return self._cmp(other) < 0
2063 2066
2064 2067 def __gt__(self, other):
2065 2068 return self._cmp(other) > 0
2066 2069
2067 2070 def __eq__(self, other):
2068 2071 return self._cmp(other) == 0
2069 2072
2070 2073 def __le__(self, other):
2071 2074 return self._cmp(other) <= 0
2072 2075
2073 2076 def __ge__(self, other):
2074 2077 return self._cmp(other) >= 0
2075 2078
2076 2079 def __ne__(self, other):
2077 2080 return self._cmp(other) != 0
2078 2081
2079 2082 def sortclonebundleentries(ui, entries):
2080 2083 prefers = ui.configlist('ui', 'clonebundleprefers')
2081 2084 if not prefers:
2082 2085 return list(entries)
2083 2086
2084 2087 prefers = [p.split('=', 1) for p in prefers]
2085 2088
2086 2089 items = sorted(clonebundleentry(v, prefers) for v in entries)
2087 2090 return [i.value for i in items]
2088 2091
2089 2092 def trypullbundlefromurl(ui, repo, url):
2090 2093 """Attempt to apply a bundle from a URL."""
2091 2094 with repo.lock(), repo.transaction('bundleurl') as tr:
2092 2095 try:
2093 2096 fh = urlmod.open(ui, url)
2094 2097 cg = readbundle(ui, fh, 'stream')
2095 2098
2096 2099 if isinstance(cg, streamclone.streamcloneapplier):
2097 2100 cg.apply(repo)
2098 2101 else:
2099 2102 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2100 2103 return True
2101 2104 except urlerr.httperror as e:
2102 2105 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2103 2106 except urlerr.urlerror as e:
2104 2107 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2105 2108
2106 2109 return False
General Comments 0
You need to be logged in to leave comments. Login now