##// END OF EJS Templates
exchange: drop unused '_getbookmarks' function...
Boris Feld -
r35032:6370ed65 default
parent child Browse files
Show More
@@ -1,2126 +1,2115 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import errno
12 12 import hashlib
13 13
14 14 from .i18n import _
15 15 from .node import (
16 16 hex,
17 17 nullid,
18 18 )
19 19 from . import (
20 20 bookmarks as bookmod,
21 21 bundle2,
22 22 changegroup,
23 23 discovery,
24 24 error,
25 25 lock as lockmod,
26 26 obsolete,
27 27 phases,
28 28 pushkey,
29 29 pycompat,
30 30 scmutil,
31 31 sslutil,
32 32 streamclone,
33 33 url as urlmod,
34 34 util,
35 35 )
36 36
37 37 urlerr = util.urlerr
38 38 urlreq = util.urlreq
39 39
40 40 # Maps bundle version human names to changegroup versions.
41 41 _bundlespeccgversions = {'v1': '01',
42 42 'v2': '02',
43 43 'packed1': 's1',
44 44 'bundle2': '02', #legacy
45 45 }
46 46
47 47 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
48 48 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
49 49
50 50 def parsebundlespec(repo, spec, strict=True, externalnames=False):
51 51 """Parse a bundle string specification into parts.
52 52
53 53 Bundle specifications denote a well-defined bundle/exchange format.
54 54 The content of a given specification should not change over time in
55 55 order to ensure that bundles produced by a newer version of Mercurial are
56 56 readable from an older version.
57 57
58 58 The string currently has the form:
59 59
60 60 <compression>-<type>[;<parameter0>[;<parameter1>]]
61 61
62 62 Where <compression> is one of the supported compression formats
63 63 and <type> is (currently) a version string. A ";" can follow the type and
64 64 all text afterwards is interpreted as URI encoded, ";" delimited key=value
65 65 pairs.
66 66
67 67 If ``strict`` is True (the default) <compression> is required. Otherwise,
68 68 it is optional.
69 69
70 70 If ``externalnames`` is False (the default), the human-centric names will
71 71 be converted to their internal representation.
72 72
73 73 Returns a 3-tuple of (compression, version, parameters). Compression will
74 74 be ``None`` if not in strict mode and a compression isn't defined.
75 75
76 76 An ``InvalidBundleSpecification`` is raised when the specification is
77 77 not syntactically well formed.
78 78
79 79 An ``UnsupportedBundleSpecification`` is raised when the compression or
80 80 bundle type/version is not recognized.
81 81
82 82 Note: this function will likely eventually return a more complex data
83 83 structure, including bundle2 part information.
84 84 """
85 85 def parseparams(s):
86 86 if ';' not in s:
87 87 return s, {}
88 88
89 89 params = {}
90 90 version, paramstr = s.split(';', 1)
91 91
92 92 for p in paramstr.split(';'):
93 93 if '=' not in p:
94 94 raise error.InvalidBundleSpecification(
95 95 _('invalid bundle specification: '
96 96 'missing "=" in parameter: %s') % p)
97 97
98 98 key, value = p.split('=', 1)
99 99 key = urlreq.unquote(key)
100 100 value = urlreq.unquote(value)
101 101 params[key] = value
102 102
103 103 return version, params
104 104
105 105
106 106 if strict and '-' not in spec:
107 107 raise error.InvalidBundleSpecification(
108 108 _('invalid bundle specification; '
109 109 'must be prefixed with compression: %s') % spec)
110 110
111 111 if '-' in spec:
112 112 compression, version = spec.split('-', 1)
113 113
114 114 if compression not in util.compengines.supportedbundlenames:
115 115 raise error.UnsupportedBundleSpecification(
116 116 _('%s compression is not supported') % compression)
117 117
118 118 version, params = parseparams(version)
119 119
120 120 if version not in _bundlespeccgversions:
121 121 raise error.UnsupportedBundleSpecification(
122 122 _('%s is not a recognized bundle version') % version)
123 123 else:
124 124 # Value could be just the compression or just the version, in which
125 125 # case some defaults are assumed (but only when not in strict mode).
126 126 assert not strict
127 127
128 128 spec, params = parseparams(spec)
129 129
130 130 if spec in util.compengines.supportedbundlenames:
131 131 compression = spec
132 132 version = 'v1'
133 133 # Generaldelta repos require v2.
134 134 if 'generaldelta' in repo.requirements:
135 135 version = 'v2'
136 136 # Modern compression engines require v2.
137 137 if compression not in _bundlespecv1compengines:
138 138 version = 'v2'
139 139 elif spec in _bundlespeccgversions:
140 140 if spec == 'packed1':
141 141 compression = 'none'
142 142 else:
143 143 compression = 'bzip2'
144 144 version = spec
145 145 else:
146 146 raise error.UnsupportedBundleSpecification(
147 147 _('%s is not a recognized bundle specification') % spec)
148 148
149 149 # Bundle version 1 only supports a known set of compression engines.
150 150 if version == 'v1' and compression not in _bundlespecv1compengines:
151 151 raise error.UnsupportedBundleSpecification(
152 152 _('compression engine %s is not supported on v1 bundles') %
153 153 compression)
154 154
155 155 # The specification for packed1 can optionally declare the data formats
156 156 # required to apply it. If we see this metadata, compare against what the
157 157 # repo supports and error if the bundle isn't compatible.
158 158 if version == 'packed1' and 'requirements' in params:
159 159 requirements = set(params['requirements'].split(','))
160 160 missingreqs = requirements - repo.supportedformats
161 161 if missingreqs:
162 162 raise error.UnsupportedBundleSpecification(
163 163 _('missing support for repository features: %s') %
164 164 ', '.join(sorted(missingreqs)))
165 165
166 166 if not externalnames:
167 167 engine = util.compengines.forbundlename(compression)
168 168 compression = engine.bundletype()[1]
169 169 version = _bundlespeccgversions[version]
170 170 return compression, version, params
171 171
172 172 def readbundle(ui, fh, fname, vfs=None):
173 173 header = changegroup.readexactly(fh, 4)
174 174
175 175 alg = None
176 176 if not fname:
177 177 fname = "stream"
178 178 if not header.startswith('HG') and header.startswith('\0'):
179 179 fh = changegroup.headerlessfixup(fh, header)
180 180 header = "HG10"
181 181 alg = 'UN'
182 182 elif vfs:
183 183 fname = vfs.join(fname)
184 184
185 185 magic, version = header[0:2], header[2:4]
186 186
187 187 if magic != 'HG':
188 188 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
189 189 if version == '10':
190 190 if alg is None:
191 191 alg = changegroup.readexactly(fh, 2)
192 192 return changegroup.cg1unpacker(fh, alg)
193 193 elif version.startswith('2'):
194 194 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
195 195 elif version == 'S1':
196 196 return streamclone.streamcloneapplier(fh)
197 197 else:
198 198 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
199 199
200 200 def getbundlespec(ui, fh):
201 201 """Infer the bundlespec from a bundle file handle.
202 202
203 203 The input file handle is seeked and the original seek position is not
204 204 restored.
205 205 """
206 206 def speccompression(alg):
207 207 try:
208 208 return util.compengines.forbundletype(alg).bundletype()[0]
209 209 except KeyError:
210 210 return None
211 211
212 212 b = readbundle(ui, fh, None)
213 213 if isinstance(b, changegroup.cg1unpacker):
214 214 alg = b._type
215 215 if alg == '_truncatedBZ':
216 216 alg = 'BZ'
217 217 comp = speccompression(alg)
218 218 if not comp:
219 219 raise error.Abort(_('unknown compression algorithm: %s') % alg)
220 220 return '%s-v1' % comp
221 221 elif isinstance(b, bundle2.unbundle20):
222 222 if 'Compression' in b.params:
223 223 comp = speccompression(b.params['Compression'])
224 224 if not comp:
225 225 raise error.Abort(_('unknown compression algorithm: %s') % comp)
226 226 else:
227 227 comp = 'none'
228 228
229 229 version = None
230 230 for part in b.iterparts():
231 231 if part.type == 'changegroup':
232 232 version = part.params['version']
233 233 if version in ('01', '02'):
234 234 version = 'v2'
235 235 else:
236 236 raise error.Abort(_('changegroup version %s does not have '
237 237 'a known bundlespec') % version,
238 238 hint=_('try upgrading your Mercurial '
239 239 'client'))
240 240
241 241 if not version:
242 242 raise error.Abort(_('could not identify changegroup version in '
243 243 'bundle'))
244 244
245 245 return '%s-%s' % (comp, version)
246 246 elif isinstance(b, streamclone.streamcloneapplier):
247 247 requirements = streamclone.readbundle1header(fh)[2]
248 248 params = 'requirements=%s' % ','.join(sorted(requirements))
249 249 return 'none-packed1;%s' % urlreq.quote(params)
250 250 else:
251 251 raise error.Abort(_('unknown bundle type: %s') % b)
252 252
253 253 def _computeoutgoing(repo, heads, common):
254 254 """Computes which revs are outgoing given a set of common
255 255 and a set of heads.
256 256
257 257 This is a separate function so extensions can have access to
258 258 the logic.
259 259
260 260 Returns a discovery.outgoing object.
261 261 """
262 262 cl = repo.changelog
263 263 if common:
264 264 hasnode = cl.hasnode
265 265 common = [n for n in common if hasnode(n)]
266 266 else:
267 267 common = [nullid]
268 268 if not heads:
269 269 heads = cl.heads()
270 270 return discovery.outgoing(repo, common, heads)
271 271
272 272 def _forcebundle1(op):
273 273 """return true if a pull/push must use bundle1
274 274
275 275 This function is used to allow testing of the older bundle version"""
276 276 ui = op.repo.ui
277 277 forcebundle1 = False
278 278 # The goal is this config is to allow developer to choose the bundle
279 279 # version used during exchanged. This is especially handy during test.
280 280 # Value is a list of bundle version to be picked from, highest version
281 281 # should be used.
282 282 #
283 283 # developer config: devel.legacy.exchange
284 284 exchange = ui.configlist('devel', 'legacy.exchange')
285 285 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
286 286 return forcebundle1 or not op.remote.capable('bundle2')
287 287
288 288 class pushoperation(object):
289 289 """A object that represent a single push operation
290 290
291 291 Its purpose is to carry push related state and very common operations.
292 292
293 293 A new pushoperation should be created at the beginning of each push and
294 294 discarded afterward.
295 295 """
296 296
297 297 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
298 298 bookmarks=(), pushvars=None):
299 299 # repo we push from
300 300 self.repo = repo
301 301 self.ui = repo.ui
302 302 # repo we push to
303 303 self.remote = remote
304 304 # force option provided
305 305 self.force = force
306 306 # revs to be pushed (None is "all")
307 307 self.revs = revs
308 308 # bookmark explicitly pushed
309 309 self.bookmarks = bookmarks
310 310 # allow push of new branch
311 311 self.newbranch = newbranch
312 312 # step already performed
313 313 # (used to check what steps have been already performed through bundle2)
314 314 self.stepsdone = set()
315 315 # Integer version of the changegroup push result
316 316 # - None means nothing to push
317 317 # - 0 means HTTP error
318 318 # - 1 means we pushed and remote head count is unchanged *or*
319 319 # we have outgoing changesets but refused to push
320 320 # - other values as described by addchangegroup()
321 321 self.cgresult = None
322 322 # Boolean value for the bookmark push
323 323 self.bkresult = None
324 324 # discover.outgoing object (contains common and outgoing data)
325 325 self.outgoing = None
326 326 # all remote topological heads before the push
327 327 self.remoteheads = None
328 328 # Details of the remote branch pre and post push
329 329 #
330 330 # mapping: {'branch': ([remoteheads],
331 331 # [newheads],
332 332 # [unsyncedheads],
333 333 # [discardedheads])}
334 334 # - branch: the branch name
335 335 # - remoteheads: the list of remote heads known locally
336 336 # None if the branch is new
337 337 # - newheads: the new remote heads (known locally) with outgoing pushed
338 338 # - unsyncedheads: the list of remote heads unknown locally.
339 339 # - discardedheads: the list of remote heads made obsolete by the push
340 340 self.pushbranchmap = None
341 341 # testable as a boolean indicating if any nodes are missing locally.
342 342 self.incoming = None
343 343 # summary of the remote phase situation
344 344 self.remotephases = None
345 345 # phases changes that must be pushed along side the changesets
346 346 self.outdatedphases = None
347 347 # phases changes that must be pushed if changeset push fails
348 348 self.fallbackoutdatedphases = None
349 349 # outgoing obsmarkers
350 350 self.outobsmarkers = set()
351 351 # outgoing bookmarks
352 352 self.outbookmarks = []
353 353 # transaction manager
354 354 self.trmanager = None
355 355 # map { pushkey partid -> callback handling failure}
356 356 # used to handle exception from mandatory pushkey part failure
357 357 self.pkfailcb = {}
358 358 # an iterable of pushvars or None
359 359 self.pushvars = pushvars
360 360
361 361 @util.propertycache
362 362 def futureheads(self):
363 363 """future remote heads if the changeset push succeeds"""
364 364 return self.outgoing.missingheads
365 365
366 366 @util.propertycache
367 367 def fallbackheads(self):
368 368 """future remote heads if the changeset push fails"""
369 369 if self.revs is None:
370 370 # not target to push, all common are relevant
371 371 return self.outgoing.commonheads
372 372 unfi = self.repo.unfiltered()
373 373 # I want cheads = heads(::missingheads and ::commonheads)
374 374 # (missingheads is revs with secret changeset filtered out)
375 375 #
376 376 # This can be expressed as:
377 377 # cheads = ( (missingheads and ::commonheads)
378 378 # + (commonheads and ::missingheads))"
379 379 # )
380 380 #
381 381 # while trying to push we already computed the following:
382 382 # common = (::commonheads)
383 383 # missing = ((commonheads::missingheads) - commonheads)
384 384 #
385 385 # We can pick:
386 386 # * missingheads part of common (::commonheads)
387 387 common = self.outgoing.common
388 388 nm = self.repo.changelog.nodemap
389 389 cheads = [node for node in self.revs if nm[node] in common]
390 390 # and
391 391 # * commonheads parents on missing
392 392 revset = unfi.set('%ln and parents(roots(%ln))',
393 393 self.outgoing.commonheads,
394 394 self.outgoing.missing)
395 395 cheads.extend(c.node() for c in revset)
396 396 return cheads
397 397
398 398 @property
399 399 def commonheads(self):
400 400 """set of all common heads after changeset bundle push"""
401 401 if self.cgresult:
402 402 return self.futureheads
403 403 else:
404 404 return self.fallbackheads
405 405
406 406 # mapping of message used when pushing bookmark
407 407 bookmsgmap = {'update': (_("updating bookmark %s\n"),
408 408 _('updating bookmark %s failed!\n')),
409 409 'export': (_("exporting bookmark %s\n"),
410 410 _('exporting bookmark %s failed!\n')),
411 411 'delete': (_("deleting remote bookmark %s\n"),
412 412 _('deleting remote bookmark %s failed!\n')),
413 413 }
414 414
415 415
416 416 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
417 417 opargs=None):
418 418 '''Push outgoing changesets (limited by revs) from a local
419 419 repository to remote. Return an integer:
420 420 - None means nothing to push
421 421 - 0 means HTTP error
422 422 - 1 means we pushed and remote head count is unchanged *or*
423 423 we have outgoing changesets but refused to push
424 424 - other values as described by addchangegroup()
425 425 '''
426 426 if opargs is None:
427 427 opargs = {}
428 428 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
429 429 **pycompat.strkwargs(opargs))
430 430 if pushop.remote.local():
431 431 missing = (set(pushop.repo.requirements)
432 432 - pushop.remote.local().supported)
433 433 if missing:
434 434 msg = _("required features are not"
435 435 " supported in the destination:"
436 436 " %s") % (', '.join(sorted(missing)))
437 437 raise error.Abort(msg)
438 438
439 439 if not pushop.remote.canpush():
440 440 raise error.Abort(_("destination does not support push"))
441 441
442 442 if not pushop.remote.capable('unbundle'):
443 443 raise error.Abort(_('cannot push: destination does not support the '
444 444 'unbundle wire protocol command'))
445 445
446 446 # get lock as we might write phase data
447 447 wlock = lock = None
448 448 try:
449 449 # bundle2 push may receive a reply bundle touching bookmarks or other
450 450 # things requiring the wlock. Take it now to ensure proper ordering.
451 451 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
452 452 if (not _forcebundle1(pushop)) and maypushback:
453 453 wlock = pushop.repo.wlock()
454 454 lock = pushop.repo.lock()
455 455 pushop.trmanager = transactionmanager(pushop.repo,
456 456 'push-response',
457 457 pushop.remote.url())
458 458 except IOError as err:
459 459 if err.errno != errno.EACCES:
460 460 raise
461 461 # source repo cannot be locked.
462 462 # We do not abort the push, but just disable the local phase
463 463 # synchronisation.
464 464 msg = 'cannot lock source repository: %s\n' % err
465 465 pushop.ui.debug(msg)
466 466
467 467 with wlock or util.nullcontextmanager(), \
468 468 lock or util.nullcontextmanager(), \
469 469 pushop.trmanager or util.nullcontextmanager():
470 470 pushop.repo.checkpush(pushop)
471 471 _pushdiscovery(pushop)
472 472 if not _forcebundle1(pushop):
473 473 _pushbundle2(pushop)
474 474 _pushchangeset(pushop)
475 475 _pushsyncphase(pushop)
476 476 _pushobsolete(pushop)
477 477 _pushbookmark(pushop)
478 478
479 479 return pushop
480 480
481 481 # list of steps to perform discovery before push
482 482 pushdiscoveryorder = []
483 483
484 484 # Mapping between step name and function
485 485 #
486 486 # This exists to help extensions wrap steps if necessary
487 487 pushdiscoverymapping = {}
488 488
489 489 def pushdiscovery(stepname):
490 490 """decorator for function performing discovery before push
491 491
492 492 The function is added to the step -> function mapping and appended to the
493 493 list of steps. Beware that decorated function will be added in order (this
494 494 may matter).
495 495
496 496 You can only use this decorator for a new step, if you want to wrap a step
497 497 from an extension, change the pushdiscovery dictionary directly."""
498 498 def dec(func):
499 499 assert stepname not in pushdiscoverymapping
500 500 pushdiscoverymapping[stepname] = func
501 501 pushdiscoveryorder.append(stepname)
502 502 return func
503 503 return dec
504 504
505 505 def _pushdiscovery(pushop):
506 506 """Run all discovery steps"""
507 507 for stepname in pushdiscoveryorder:
508 508 step = pushdiscoverymapping[stepname]
509 509 step(pushop)
510 510
511 511 @pushdiscovery('changeset')
512 512 def _pushdiscoverychangeset(pushop):
513 513 """discover the changeset that need to be pushed"""
514 514 fci = discovery.findcommonincoming
515 515 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
516 516 common, inc, remoteheads = commoninc
517 517 fco = discovery.findcommonoutgoing
518 518 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
519 519 commoninc=commoninc, force=pushop.force)
520 520 pushop.outgoing = outgoing
521 521 pushop.remoteheads = remoteheads
522 522 pushop.incoming = inc
523 523
524 524 @pushdiscovery('phase')
525 525 def _pushdiscoveryphase(pushop):
526 526 """discover the phase that needs to be pushed
527 527
528 528 (computed for both success and failure case for changesets push)"""
529 529 outgoing = pushop.outgoing
530 530 unfi = pushop.repo.unfiltered()
531 531 remotephases = pushop.remote.listkeys('phases')
532 532 if (pushop.ui.configbool('ui', '_usedassubrepo')
533 533 and remotephases # server supports phases
534 534 and not pushop.outgoing.missing # no changesets to be pushed
535 535 and remotephases.get('publishing', False)):
536 536 # When:
537 537 # - this is a subrepo push
538 538 # - and remote support phase
539 539 # - and no changeset are to be pushed
540 540 # - and remote is publishing
541 541 # We may be in issue 3781 case!
542 542 # We drop the possible phase synchronisation done by
543 543 # courtesy to publish changesets possibly locally draft
544 544 # on the remote.
545 545 pushop.outdatedphases = []
546 546 pushop.fallbackoutdatedphases = []
547 547 return
548 548
549 549 pushop.remotephases = phases.remotephasessummary(pushop.repo,
550 550 pushop.fallbackheads,
551 551 remotephases)
552 552 droots = pushop.remotephases.draftroots
553 553
554 554 extracond = ''
555 555 if not pushop.remotephases.publishing:
556 556 extracond = ' and public()'
557 557 revset = 'heads((%%ln::%%ln) %s)' % extracond
558 558 # Get the list of all revs draft on remote by public here.
559 559 # XXX Beware that revset break if droots is not strictly
560 560 # XXX root we may want to ensure it is but it is costly
561 561 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
562 562 if not outgoing.missing:
563 563 future = fallback
564 564 else:
565 565 # adds changeset we are going to push as draft
566 566 #
567 567 # should not be necessary for publishing server, but because of an
568 568 # issue fixed in xxxxx we have to do it anyway.
569 569 fdroots = list(unfi.set('roots(%ln + %ln::)',
570 570 outgoing.missing, droots))
571 571 fdroots = [f.node() for f in fdroots]
572 572 future = list(unfi.set(revset, fdroots, pushop.futureheads))
573 573 pushop.outdatedphases = future
574 574 pushop.fallbackoutdatedphases = fallback
575 575
576 576 @pushdiscovery('obsmarker')
577 577 def _pushdiscoveryobsmarkers(pushop):
578 578 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
579 579 and pushop.repo.obsstore
580 580 and 'obsolete' in pushop.remote.listkeys('namespaces')):
581 581 repo = pushop.repo
582 582 # very naive computation, that can be quite expensive on big repo.
583 583 # However: evolution is currently slow on them anyway.
584 584 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
585 585 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
586 586
587 587 @pushdiscovery('bookmarks')
588 588 def _pushdiscoverybookmarks(pushop):
589 589 ui = pushop.ui
590 590 repo = pushop.repo.unfiltered()
591 591 remote = pushop.remote
592 592 ui.debug("checking for updated bookmarks\n")
593 593 ancestors = ()
594 594 if pushop.revs:
595 595 revnums = map(repo.changelog.rev, pushop.revs)
596 596 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
597 597 remotebookmark = remote.listkeys('bookmarks')
598 598
599 599 explicit = set([repo._bookmarks.expandname(bookmark)
600 600 for bookmark in pushop.bookmarks])
601 601
602 602 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
603 603 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
604 604
605 605 def safehex(x):
606 606 if x is None:
607 607 return x
608 608 return hex(x)
609 609
610 610 def hexifycompbookmarks(bookmarks):
611 611 for b, scid, dcid in bookmarks:
612 612 yield b, safehex(scid), safehex(dcid)
613 613
614 614 comp = [hexifycompbookmarks(marks) for marks in comp]
615 615 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
616 616
617 617 for b, scid, dcid in advsrc:
618 618 if b in explicit:
619 619 explicit.remove(b)
620 620 if not ancestors or repo[scid].rev() in ancestors:
621 621 pushop.outbookmarks.append((b, dcid, scid))
622 622 # search added bookmark
623 623 for b, scid, dcid in addsrc:
624 624 if b in explicit:
625 625 explicit.remove(b)
626 626 pushop.outbookmarks.append((b, '', scid))
627 627 # search for overwritten bookmark
628 628 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
629 629 if b in explicit:
630 630 explicit.remove(b)
631 631 pushop.outbookmarks.append((b, dcid, scid))
632 632 # search for bookmark to delete
633 633 for b, scid, dcid in adddst:
634 634 if b in explicit:
635 635 explicit.remove(b)
636 636 # treat as "deleted locally"
637 637 pushop.outbookmarks.append((b, dcid, ''))
638 638 # identical bookmarks shouldn't get reported
639 639 for b, scid, dcid in same:
640 640 if b in explicit:
641 641 explicit.remove(b)
642 642
643 643 if explicit:
644 644 explicit = sorted(explicit)
645 645 # we should probably list all of them
646 646 ui.warn(_('bookmark %s does not exist on the local '
647 647 'or remote repository!\n') % explicit[0])
648 648 pushop.bkresult = 2
649 649
650 650 pushop.outbookmarks.sort()
651 651
652 652 def _pushcheckoutgoing(pushop):
653 653 outgoing = pushop.outgoing
654 654 unfi = pushop.repo.unfiltered()
655 655 if not outgoing.missing:
656 656 # nothing to push
657 657 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
658 658 return False
659 659 # something to push
660 660 if not pushop.force:
661 661 # if repo.obsstore == False --> no obsolete
662 662 # then, save the iteration
663 663 if unfi.obsstore:
664 664 # this message are here for 80 char limit reason
665 665 mso = _("push includes obsolete changeset: %s!")
666 666 mspd = _("push includes phase-divergent changeset: %s!")
667 667 mscd = _("push includes content-divergent changeset: %s!")
668 668 mst = {"orphan": _("push includes orphan changeset: %s!"),
669 669 "phase-divergent": mspd,
670 670 "content-divergent": mscd}
671 671 # If we are to push if there is at least one
672 672 # obsolete or unstable changeset in missing, at
673 673 # least one of the missinghead will be obsolete or
674 674 # unstable. So checking heads only is ok
675 675 for node in outgoing.missingheads:
676 676 ctx = unfi[node]
677 677 if ctx.obsolete():
678 678 raise error.Abort(mso % ctx)
679 679 elif ctx.isunstable():
680 680 # TODO print more than one instability in the abort
681 681 # message
682 682 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
683 683
684 684 discovery.checkheads(pushop)
685 685 return True
686 686
687 687 # List of names of steps to perform for an outgoing bundle2, order matters.
688 688 b2partsgenorder = []
689 689
690 690 # Mapping between step name and function
691 691 #
692 692 # This exists to help extensions wrap steps if necessary
693 693 b2partsgenmapping = {}
694 694
695 695 def b2partsgenerator(stepname, idx=None):
696 696 """decorator for function generating bundle2 part
697 697
698 698 The function is added to the step -> function mapping and appended to the
699 699 list of steps. Beware that decorated functions will be added in order
700 700 (this may matter).
701 701
702 702 You can only use this decorator for new steps, if you want to wrap a step
703 703 from an extension, attack the b2partsgenmapping dictionary directly."""
704 704 def dec(func):
705 705 assert stepname not in b2partsgenmapping
706 706 b2partsgenmapping[stepname] = func
707 707 if idx is None:
708 708 b2partsgenorder.append(stepname)
709 709 else:
710 710 b2partsgenorder.insert(idx, stepname)
711 711 return func
712 712 return dec
713 713
714 714 def _pushb2ctxcheckheads(pushop, bundler):
715 715 """Generate race condition checking parts
716 716
717 717 Exists as an independent function to aid extensions
718 718 """
719 719 # * 'force' do not check for push race,
720 720 # * if we don't push anything, there are nothing to check.
721 721 if not pushop.force and pushop.outgoing.missingheads:
722 722 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
723 723 emptyremote = pushop.pushbranchmap is None
724 724 if not allowunrelated or emptyremote:
725 725 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
726 726 else:
727 727 affected = set()
728 728 for branch, heads in pushop.pushbranchmap.iteritems():
729 729 remoteheads, newheads, unsyncedheads, discardedheads = heads
730 730 if remoteheads is not None:
731 731 remote = set(remoteheads)
732 732 affected |= set(discardedheads) & remote
733 733 affected |= remote - set(newheads)
734 734 if affected:
735 735 data = iter(sorted(affected))
736 736 bundler.newpart('check:updated-heads', data=data)
737 737
738 738 def _pushing(pushop):
739 739 """return True if we are pushing anything"""
740 740 return bool(pushop.outgoing.missing
741 741 or pushop.outdatedphases
742 742 or pushop.outobsmarkers
743 743 or pushop.outbookmarks)
744 744
745 745 @b2partsgenerator('check-phases')
746 746 def _pushb2checkphases(pushop, bundler):
747 747 """insert phase move checking"""
748 748 if not _pushing(pushop) or pushop.force:
749 749 return
750 750 b2caps = bundle2.bundle2caps(pushop.remote)
751 751 hasphaseheads = 'heads' in b2caps.get('phases', ())
752 752 if pushop.remotephases is not None and hasphaseheads:
753 753 # check that the remote phase has not changed
754 754 checks = [[] for p in phases.allphases]
755 755 checks[phases.public].extend(pushop.remotephases.publicheads)
756 756 checks[phases.draft].extend(pushop.remotephases.draftroots)
757 757 if any(checks):
758 758 for nodes in checks:
759 759 nodes.sort()
760 760 checkdata = phases.binaryencode(checks)
761 761 bundler.newpart('check:phases', data=checkdata)
762 762
763 763 @b2partsgenerator('changeset')
764 764 def _pushb2ctx(pushop, bundler):
765 765 """handle changegroup push through bundle2
766 766
767 767 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
768 768 """
769 769 if 'changesets' in pushop.stepsdone:
770 770 return
771 771 pushop.stepsdone.add('changesets')
772 772 # Send known heads to the server for race detection.
773 773 if not _pushcheckoutgoing(pushop):
774 774 return
775 775 pushop.repo.prepushoutgoinghooks(pushop)
776 776
777 777 _pushb2ctxcheckheads(pushop, bundler)
778 778
779 779 b2caps = bundle2.bundle2caps(pushop.remote)
780 780 version = '01'
781 781 cgversions = b2caps.get('changegroup')
782 782 if cgversions: # 3.1 and 3.2 ship with an empty value
783 783 cgversions = [v for v in cgversions
784 784 if v in changegroup.supportedoutgoingversions(
785 785 pushop.repo)]
786 786 if not cgversions:
787 787 raise ValueError(_('no common changegroup version'))
788 788 version = max(cgversions)
789 789 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
790 790 'push')
791 791 cgpart = bundler.newpart('changegroup', data=cgstream)
792 792 if cgversions:
793 793 cgpart.addparam('version', version)
794 794 if 'treemanifest' in pushop.repo.requirements:
795 795 cgpart.addparam('treemanifest', '1')
796 796 def handlereply(op):
797 797 """extract addchangegroup returns from server reply"""
798 798 cgreplies = op.records.getreplies(cgpart.id)
799 799 assert len(cgreplies['changegroup']) == 1
800 800 pushop.cgresult = cgreplies['changegroup'][0]['return']
801 801 return handlereply
802 802
803 803 @b2partsgenerator('phase')
804 804 def _pushb2phases(pushop, bundler):
805 805 """handle phase push through bundle2"""
806 806 if 'phases' in pushop.stepsdone:
807 807 return
808 808 b2caps = bundle2.bundle2caps(pushop.remote)
809 809 ui = pushop.repo.ui
810 810
811 811 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
812 812 haspushkey = 'pushkey' in b2caps
813 813 hasphaseheads = 'heads' in b2caps.get('phases', ())
814 814
815 815 if hasphaseheads and not legacyphase:
816 816 return _pushb2phaseheads(pushop, bundler)
817 817 elif haspushkey:
818 818 return _pushb2phasespushkey(pushop, bundler)
819 819
820 820 def _pushb2phaseheads(pushop, bundler):
821 821 """push phase information through a bundle2 - binary part"""
822 822 pushop.stepsdone.add('phases')
823 823 if pushop.outdatedphases:
824 824 updates = [[] for p in phases.allphases]
825 825 updates[0].extend(h.node() for h in pushop.outdatedphases)
826 826 phasedata = phases.binaryencode(updates)
827 827 bundler.newpart('phase-heads', data=phasedata)
828 828
829 829 def _pushb2phasespushkey(pushop, bundler):
830 830 """push phase information through a bundle2 - pushkey part"""
831 831 pushop.stepsdone.add('phases')
832 832 part2node = []
833 833
834 834 def handlefailure(pushop, exc):
835 835 targetid = int(exc.partid)
836 836 for partid, node in part2node:
837 837 if partid == targetid:
838 838 raise error.Abort(_('updating %s to public failed') % node)
839 839
840 840 enc = pushkey.encode
841 841 for newremotehead in pushop.outdatedphases:
842 842 part = bundler.newpart('pushkey')
843 843 part.addparam('namespace', enc('phases'))
844 844 part.addparam('key', enc(newremotehead.hex()))
845 845 part.addparam('old', enc('%d' % phases.draft))
846 846 part.addparam('new', enc('%d' % phases.public))
847 847 part2node.append((part.id, newremotehead))
848 848 pushop.pkfailcb[part.id] = handlefailure
849 849
850 850 def handlereply(op):
851 851 for partid, node in part2node:
852 852 partrep = op.records.getreplies(partid)
853 853 results = partrep['pushkey']
854 854 assert len(results) <= 1
855 855 msg = None
856 856 if not results:
857 857 msg = _('server ignored update of %s to public!\n') % node
858 858 elif not int(results[0]['return']):
859 859 msg = _('updating %s to public failed!\n') % node
860 860 if msg is not None:
861 861 pushop.ui.warn(msg)
862 862 return handlereply
863 863
864 864 @b2partsgenerator('obsmarkers')
865 865 def _pushb2obsmarkers(pushop, bundler):
866 866 if 'obsmarkers' in pushop.stepsdone:
867 867 return
868 868 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
869 869 if obsolete.commonversion(remoteversions) is None:
870 870 return
871 871 pushop.stepsdone.add('obsmarkers')
872 872 if pushop.outobsmarkers:
873 873 markers = sorted(pushop.outobsmarkers)
874 874 bundle2.buildobsmarkerspart(bundler, markers)
875 875
876 876 @b2partsgenerator('bookmarks')
877 877 def _pushb2bookmarks(pushop, bundler):
878 878 """handle bookmark push through bundle2"""
879 879 if 'bookmarks' in pushop.stepsdone:
880 880 return
881 881 b2caps = bundle2.bundle2caps(pushop.remote)
882 882 if 'pushkey' not in b2caps:
883 883 return
884 884 pushop.stepsdone.add('bookmarks')
885 885 part2book = []
886 886 enc = pushkey.encode
887 887
888 888 def handlefailure(pushop, exc):
889 889 targetid = int(exc.partid)
890 890 for partid, book, action in part2book:
891 891 if partid == targetid:
892 892 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
893 893 # we should not be called for part we did not generated
894 894 assert False
895 895
896 896 for book, old, new in pushop.outbookmarks:
897 897 part = bundler.newpart('pushkey')
898 898 part.addparam('namespace', enc('bookmarks'))
899 899 part.addparam('key', enc(book))
900 900 part.addparam('old', enc(old))
901 901 part.addparam('new', enc(new))
902 902 action = 'update'
903 903 if not old:
904 904 action = 'export'
905 905 elif not new:
906 906 action = 'delete'
907 907 part2book.append((part.id, book, action))
908 908 pushop.pkfailcb[part.id] = handlefailure
909 909
910 910 def handlereply(op):
911 911 ui = pushop.ui
912 912 for partid, book, action in part2book:
913 913 partrep = op.records.getreplies(partid)
914 914 results = partrep['pushkey']
915 915 assert len(results) <= 1
916 916 if not results:
917 917 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
918 918 else:
919 919 ret = int(results[0]['return'])
920 920 if ret:
921 921 ui.status(bookmsgmap[action][0] % book)
922 922 else:
923 923 ui.warn(bookmsgmap[action][1] % book)
924 924 if pushop.bkresult is not None:
925 925 pushop.bkresult = 1
926 926 return handlereply
927 927
928 928 @b2partsgenerator('pushvars', idx=0)
929 929 def _getbundlesendvars(pushop, bundler):
930 930 '''send shellvars via bundle2'''
931 931 pushvars = pushop.pushvars
932 932 if pushvars:
933 933 shellvars = {}
934 934 for raw in pushvars:
935 935 if '=' not in raw:
936 936 msg = ("unable to parse variable '%s', should follow "
937 937 "'KEY=VALUE' or 'KEY=' format")
938 938 raise error.Abort(msg % raw)
939 939 k, v = raw.split('=', 1)
940 940 shellvars[k] = v
941 941
942 942 part = bundler.newpart('pushvars')
943 943
944 944 for key, value in shellvars.iteritems():
945 945 part.addparam(key, value, mandatory=False)
946 946
947 947 def _pushbundle2(pushop):
948 948 """push data to the remote using bundle2
949 949
950 950 The only currently supported type of data is changegroup but this will
951 951 evolve in the future."""
952 952 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
953 953 pushback = (pushop.trmanager
954 954 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
955 955
956 956 # create reply capability
957 957 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
958 958 allowpushback=pushback))
959 959 bundler.newpart('replycaps', data=capsblob)
960 960 replyhandlers = []
961 961 for partgenname in b2partsgenorder:
962 962 partgen = b2partsgenmapping[partgenname]
963 963 ret = partgen(pushop, bundler)
964 964 if callable(ret):
965 965 replyhandlers.append(ret)
966 966 # do not push if nothing to push
967 967 if bundler.nbparts <= 1:
968 968 return
969 969 stream = util.chunkbuffer(bundler.getchunks())
970 970 try:
971 971 try:
972 972 reply = pushop.remote.unbundle(
973 973 stream, ['force'], pushop.remote.url())
974 974 except error.BundleValueError as exc:
975 975 raise error.Abort(_('missing support for %s') % exc)
976 976 try:
977 977 trgetter = None
978 978 if pushback:
979 979 trgetter = pushop.trmanager.transaction
980 980 op = bundle2.processbundle(pushop.repo, reply, trgetter)
981 981 except error.BundleValueError as exc:
982 982 raise error.Abort(_('missing support for %s') % exc)
983 983 except bundle2.AbortFromPart as exc:
984 984 pushop.ui.status(_('remote: %s\n') % exc)
985 985 if exc.hint is not None:
986 986 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
987 987 raise error.Abort(_('push failed on remote'))
988 988 except error.PushkeyFailed as exc:
989 989 partid = int(exc.partid)
990 990 if partid not in pushop.pkfailcb:
991 991 raise
992 992 pushop.pkfailcb[partid](pushop, exc)
993 993 for rephand in replyhandlers:
994 994 rephand(op)
995 995
996 996 def _pushchangeset(pushop):
997 997 """Make the actual push of changeset bundle to remote repo"""
998 998 if 'changesets' in pushop.stepsdone:
999 999 return
1000 1000 pushop.stepsdone.add('changesets')
1001 1001 if not _pushcheckoutgoing(pushop):
1002 1002 return
1003 1003
1004 1004 # Should have verified this in push().
1005 1005 assert pushop.remote.capable('unbundle')
1006 1006
1007 1007 pushop.repo.prepushoutgoinghooks(pushop)
1008 1008 outgoing = pushop.outgoing
1009 1009 # TODO: get bundlecaps from remote
1010 1010 bundlecaps = None
1011 1011 # create a changegroup from local
1012 1012 if pushop.revs is None and not (outgoing.excluded
1013 1013 or pushop.repo.changelog.filteredrevs):
1014 1014 # push everything,
1015 1015 # use the fast path, no race possible on push
1016 1016 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1017 1017 fastpath=True, bundlecaps=bundlecaps)
1018 1018 else:
1019 1019 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1020 1020 'push', bundlecaps=bundlecaps)
1021 1021
1022 1022 # apply changegroup to remote
1023 1023 # local repo finds heads on server, finds out what
1024 1024 # revs it must push. once revs transferred, if server
1025 1025 # finds it has different heads (someone else won
1026 1026 # commit/push race), server aborts.
1027 1027 if pushop.force:
1028 1028 remoteheads = ['force']
1029 1029 else:
1030 1030 remoteheads = pushop.remoteheads
1031 1031 # ssh: return remote's addchangegroup()
1032 1032 # http: return remote's addchangegroup() or 0 for error
1033 1033 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1034 1034 pushop.repo.url())
1035 1035
1036 1036 def _pushsyncphase(pushop):
1037 1037 """synchronise phase information locally and remotely"""
1038 1038 cheads = pushop.commonheads
1039 1039 # even when we don't push, exchanging phase data is useful
1040 1040 remotephases = pushop.remote.listkeys('phases')
1041 1041 if (pushop.ui.configbool('ui', '_usedassubrepo')
1042 1042 and remotephases # server supports phases
1043 1043 and pushop.cgresult is None # nothing was pushed
1044 1044 and remotephases.get('publishing', False)):
1045 1045 # When:
1046 1046 # - this is a subrepo push
1047 1047 # - and remote support phase
1048 1048 # - and no changeset was pushed
1049 1049 # - and remote is publishing
1050 1050 # We may be in issue 3871 case!
1051 1051 # We drop the possible phase synchronisation done by
1052 1052 # courtesy to publish changesets possibly locally draft
1053 1053 # on the remote.
1054 1054 remotephases = {'publishing': 'True'}
1055 1055 if not remotephases: # old server or public only reply from non-publishing
1056 1056 _localphasemove(pushop, cheads)
1057 1057 # don't push any phase data as there is nothing to push
1058 1058 else:
1059 1059 ana = phases.analyzeremotephases(pushop.repo, cheads,
1060 1060 remotephases)
1061 1061 pheads, droots = ana
1062 1062 ### Apply remote phase on local
1063 1063 if remotephases.get('publishing', False):
1064 1064 _localphasemove(pushop, cheads)
1065 1065 else: # publish = False
1066 1066 _localphasemove(pushop, pheads)
1067 1067 _localphasemove(pushop, cheads, phases.draft)
1068 1068 ### Apply local phase on remote
1069 1069
1070 1070 if pushop.cgresult:
1071 1071 if 'phases' in pushop.stepsdone:
1072 1072 # phases already pushed though bundle2
1073 1073 return
1074 1074 outdated = pushop.outdatedphases
1075 1075 else:
1076 1076 outdated = pushop.fallbackoutdatedphases
1077 1077
1078 1078 pushop.stepsdone.add('phases')
1079 1079
1080 1080 # filter heads already turned public by the push
1081 1081 outdated = [c for c in outdated if c.node() not in pheads]
1082 1082 # fallback to independent pushkey command
1083 1083 for newremotehead in outdated:
1084 1084 r = pushop.remote.pushkey('phases',
1085 1085 newremotehead.hex(),
1086 1086 str(phases.draft),
1087 1087 str(phases.public))
1088 1088 if not r:
1089 1089 pushop.ui.warn(_('updating %s to public failed!\n')
1090 1090 % newremotehead)
1091 1091
1092 1092 def _localphasemove(pushop, nodes, phase=phases.public):
1093 1093 """move <nodes> to <phase> in the local source repo"""
1094 1094 if pushop.trmanager:
1095 1095 phases.advanceboundary(pushop.repo,
1096 1096 pushop.trmanager.transaction(),
1097 1097 phase,
1098 1098 nodes)
1099 1099 else:
1100 1100 # repo is not locked, do not change any phases!
1101 1101 # Informs the user that phases should have been moved when
1102 1102 # applicable.
1103 1103 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1104 1104 phasestr = phases.phasenames[phase]
1105 1105 if actualmoves:
1106 1106 pushop.ui.status(_('cannot lock source repo, skipping '
1107 1107 'local %s phase update\n') % phasestr)
1108 1108
1109 1109 def _pushobsolete(pushop):
1110 1110 """utility function to push obsolete markers to a remote"""
1111 1111 if 'obsmarkers' in pushop.stepsdone:
1112 1112 return
1113 1113 repo = pushop.repo
1114 1114 remote = pushop.remote
1115 1115 pushop.stepsdone.add('obsmarkers')
1116 1116 if pushop.outobsmarkers:
1117 1117 pushop.ui.debug('try to push obsolete markers to remote\n')
1118 1118 rslts = []
1119 1119 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1120 1120 for key in sorted(remotedata, reverse=True):
1121 1121 # reverse sort to ensure we end with dump0
1122 1122 data = remotedata[key]
1123 1123 rslts.append(remote.pushkey('obsolete', key, '', data))
1124 1124 if [r for r in rslts if not r]:
1125 1125 msg = _('failed to push some obsolete markers!\n')
1126 1126 repo.ui.warn(msg)
1127 1127
1128 1128 def _pushbookmark(pushop):
1129 1129 """Update bookmark position on remote"""
1130 1130 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1131 1131 return
1132 1132 pushop.stepsdone.add('bookmarks')
1133 1133 ui = pushop.ui
1134 1134 remote = pushop.remote
1135 1135
1136 1136 for b, old, new in pushop.outbookmarks:
1137 1137 action = 'update'
1138 1138 if not old:
1139 1139 action = 'export'
1140 1140 elif not new:
1141 1141 action = 'delete'
1142 1142 if remote.pushkey('bookmarks', b, old, new):
1143 1143 ui.status(bookmsgmap[action][0] % b)
1144 1144 else:
1145 1145 ui.warn(bookmsgmap[action][1] % b)
1146 1146 # discovery can have set the value form invalid entry
1147 1147 if pushop.bkresult is not None:
1148 1148 pushop.bkresult = 1
1149 1149
1150 1150 class pulloperation(object):
1151 1151 """A object that represent a single pull operation
1152 1152
1153 1153 It purpose is to carry pull related state and very common operation.
1154 1154
1155 1155 A new should be created at the beginning of each pull and discarded
1156 1156 afterward.
1157 1157 """
1158 1158
1159 1159 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1160 1160 remotebookmarks=None, streamclonerequested=None):
1161 1161 # repo we pull into
1162 1162 self.repo = repo
1163 1163 # repo we pull from
1164 1164 self.remote = remote
1165 1165 # revision we try to pull (None is "all")
1166 1166 self.heads = heads
1167 1167 # bookmark pulled explicitly
1168 1168 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1169 1169 for bookmark in bookmarks]
1170 1170 # do we force pull?
1171 1171 self.force = force
1172 1172 # whether a streaming clone was requested
1173 1173 self.streamclonerequested = streamclonerequested
1174 1174 # transaction manager
1175 1175 self.trmanager = None
1176 1176 # set of common changeset between local and remote before pull
1177 1177 self.common = None
1178 1178 # set of pulled head
1179 1179 self.rheads = None
1180 1180 # list of missing changeset to fetch remotely
1181 1181 self.fetch = None
1182 1182 # remote bookmarks data
1183 1183 self.remotebookmarks = remotebookmarks
1184 1184 # result of changegroup pulling (used as return code by pull)
1185 1185 self.cgresult = None
1186 1186 # list of step already done
1187 1187 self.stepsdone = set()
1188 1188 # Whether we attempted a clone from pre-generated bundles.
1189 1189 self.clonebundleattempted = False
1190 1190
1191 1191 @util.propertycache
1192 1192 def pulledsubset(self):
1193 1193 """heads of the set of changeset target by the pull"""
1194 1194 # compute target subset
1195 1195 if self.heads is None:
1196 1196 # We pulled every thing possible
1197 1197 # sync on everything common
1198 1198 c = set(self.common)
1199 1199 ret = list(self.common)
1200 1200 for n in self.rheads:
1201 1201 if n not in c:
1202 1202 ret.append(n)
1203 1203 return ret
1204 1204 else:
1205 1205 # We pulled a specific subset
1206 1206 # sync on this subset
1207 1207 return self.heads
1208 1208
1209 1209 @util.propertycache
1210 1210 def canusebundle2(self):
1211 1211 return not _forcebundle1(self)
1212 1212
1213 1213 @util.propertycache
1214 1214 def remotebundle2caps(self):
1215 1215 return bundle2.bundle2caps(self.remote)
1216 1216
1217 1217 def gettransaction(self):
1218 1218 # deprecated; talk to trmanager directly
1219 1219 return self.trmanager.transaction()
1220 1220
1221 1221 class transactionmanager(util.transactional):
1222 1222 """An object to manage the life cycle of a transaction
1223 1223
1224 1224 It creates the transaction on demand and calls the appropriate hooks when
1225 1225 closing the transaction."""
1226 1226 def __init__(self, repo, source, url):
1227 1227 self.repo = repo
1228 1228 self.source = source
1229 1229 self.url = url
1230 1230 self._tr = None
1231 1231
1232 1232 def transaction(self):
1233 1233 """Return an open transaction object, constructing if necessary"""
1234 1234 if not self._tr:
1235 1235 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1236 1236 self._tr = self.repo.transaction(trname)
1237 1237 self._tr.hookargs['source'] = self.source
1238 1238 self._tr.hookargs['url'] = self.url
1239 1239 return self._tr
1240 1240
1241 1241 def close(self):
1242 1242 """close transaction if created"""
1243 1243 if self._tr is not None:
1244 1244 self._tr.close()
1245 1245
1246 1246 def release(self):
1247 1247 """release transaction if created"""
1248 1248 if self._tr is not None:
1249 1249 self._tr.release()
1250 1250
1251 1251 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1252 1252 streamclonerequested=None):
1253 1253 """Fetch repository data from a remote.
1254 1254
1255 1255 This is the main function used to retrieve data from a remote repository.
1256 1256
1257 1257 ``repo`` is the local repository to clone into.
1258 1258 ``remote`` is a peer instance.
1259 1259 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1260 1260 default) means to pull everything from the remote.
1261 1261 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1262 1262 default, all remote bookmarks are pulled.
1263 1263 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1264 1264 initialization.
1265 1265 ``streamclonerequested`` is a boolean indicating whether a "streaming
1266 1266 clone" is requested. A "streaming clone" is essentially a raw file copy
1267 1267 of revlogs from the server. This only works when the local repository is
1268 1268 empty. The default value of ``None`` means to respect the server
1269 1269 configuration for preferring stream clones.
1270 1270
1271 1271 Returns the ``pulloperation`` created for this pull.
1272 1272 """
1273 1273 if opargs is None:
1274 1274 opargs = {}
1275 1275 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1276 1276 streamclonerequested=streamclonerequested, **opargs)
1277 1277
1278 1278 peerlocal = pullop.remote.local()
1279 1279 if peerlocal:
1280 1280 missing = set(peerlocal.requirements) - pullop.repo.supported
1281 1281 if missing:
1282 1282 msg = _("required features are not"
1283 1283 " supported in the destination:"
1284 1284 " %s") % (', '.join(sorted(missing)))
1285 1285 raise error.Abort(msg)
1286 1286
1287 1287 wlock = lock = None
1288 1288 try:
1289 1289 wlock = pullop.repo.wlock()
1290 1290 lock = pullop.repo.lock()
1291 1291 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1292 1292 # This should ideally be in _pullbundle2(). However, it needs to run
1293 1293 # before discovery to avoid extra work.
1294 1294 _maybeapplyclonebundle(pullop)
1295 1295 streamclone.maybeperformlegacystreamclone(pullop)
1296 1296 _pulldiscovery(pullop)
1297 1297 if pullop.canusebundle2:
1298 1298 _pullbundle2(pullop)
1299 1299 _pullchangeset(pullop)
1300 1300 _pullphase(pullop)
1301 1301 _pullbookmarks(pullop)
1302 1302 _pullobsolete(pullop)
1303 1303 pullop.trmanager.close()
1304 1304 finally:
1305 1305 lockmod.release(pullop.trmanager, lock, wlock)
1306 1306
1307 1307 return pullop
1308 1308
1309 1309 # list of steps to perform discovery before pull
1310 1310 pulldiscoveryorder = []
1311 1311
1312 1312 # Mapping between step name and function
1313 1313 #
1314 1314 # This exists to help extensions wrap steps if necessary
1315 1315 pulldiscoverymapping = {}
1316 1316
1317 1317 def pulldiscovery(stepname):
1318 1318 """decorator for function performing discovery before pull
1319 1319
1320 1320 The function is added to the step -> function mapping and appended to the
1321 1321 list of steps. Beware that decorated function will be added in order (this
1322 1322 may matter).
1323 1323
1324 1324 You can only use this decorator for a new step, if you want to wrap a step
1325 1325 from an extension, change the pulldiscovery dictionary directly."""
1326 1326 def dec(func):
1327 1327 assert stepname not in pulldiscoverymapping
1328 1328 pulldiscoverymapping[stepname] = func
1329 1329 pulldiscoveryorder.append(stepname)
1330 1330 return func
1331 1331 return dec
1332 1332
1333 1333 def _pulldiscovery(pullop):
1334 1334 """Run all discovery steps"""
1335 1335 for stepname in pulldiscoveryorder:
1336 1336 step = pulldiscoverymapping[stepname]
1337 1337 step(pullop)
1338 1338
1339 1339 @pulldiscovery('b1:bookmarks')
1340 1340 def _pullbookmarkbundle1(pullop):
1341 1341 """fetch bookmark data in bundle1 case
1342 1342
1343 1343 If not using bundle2, we have to fetch bookmarks before changeset
1344 1344 discovery to reduce the chance and impact of race conditions."""
1345 1345 if pullop.remotebookmarks is not None:
1346 1346 return
1347 1347 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1348 1348 # all known bundle2 servers now support listkeys, but lets be nice with
1349 1349 # new implementation.
1350 1350 return
1351 1351 books = pullop.remote.listkeys('bookmarks')
1352 1352 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1353 1353
1354 1354
1355 1355 @pulldiscovery('changegroup')
1356 1356 def _pulldiscoverychangegroup(pullop):
1357 1357 """discovery phase for the pull
1358 1358
1359 1359 Current handle changeset discovery only, will change handle all discovery
1360 1360 at some point."""
1361 1361 tmp = discovery.findcommonincoming(pullop.repo,
1362 1362 pullop.remote,
1363 1363 heads=pullop.heads,
1364 1364 force=pullop.force)
1365 1365 common, fetch, rheads = tmp
1366 1366 nm = pullop.repo.unfiltered().changelog.nodemap
1367 1367 if fetch and rheads:
1368 1368 # If a remote heads is filtered locally, put in back in common.
1369 1369 #
1370 1370 # This is a hackish solution to catch most of "common but locally
1371 1371 # hidden situation". We do not performs discovery on unfiltered
1372 1372 # repository because it end up doing a pathological amount of round
1373 1373 # trip for w huge amount of changeset we do not care about.
1374 1374 #
1375 1375 # If a set of such "common but filtered" changeset exist on the server
1376 1376 # but are not including a remote heads, we'll not be able to detect it,
1377 1377 scommon = set(common)
1378 1378 for n in rheads:
1379 1379 if n in nm:
1380 1380 if n not in scommon:
1381 1381 common.append(n)
1382 1382 if set(rheads).issubset(set(common)):
1383 1383 fetch = []
1384 1384 pullop.common = common
1385 1385 pullop.fetch = fetch
1386 1386 pullop.rheads = rheads
1387 1387
1388 1388 def _pullbundle2(pullop):
1389 1389 """pull data using bundle2
1390 1390
1391 1391 For now, the only supported data are changegroup."""
1392 1392 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1393 1393
1394 1394 # At the moment we don't do stream clones over bundle2. If that is
1395 1395 # implemented then here's where the check for that will go.
1396 1396 streaming = False
1397 1397
1398 1398 # pulling changegroup
1399 1399 pullop.stepsdone.add('changegroup')
1400 1400
1401 1401 kwargs['common'] = pullop.common
1402 1402 kwargs['heads'] = pullop.heads or pullop.rheads
1403 1403 kwargs['cg'] = pullop.fetch
1404 1404
1405 1405 ui = pullop.repo.ui
1406 1406 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1407 1407 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1408 1408 if (not legacyphase and hasbinaryphase):
1409 1409 kwargs['phases'] = True
1410 1410 pullop.stepsdone.add('phases')
1411 1411
1412 1412 if 'listkeys' in pullop.remotebundle2caps:
1413 1413 if 'phases' not in pullop.stepsdone:
1414 1414 kwargs['listkeys'] = ['phases']
1415 1415 if pullop.remotebookmarks is None:
1416 1416 # make sure to always includes bookmark data when migrating
1417 1417 # `hg incoming --bundle` to using this function.
1418 1418 kwargs.setdefault('listkeys', []).append('bookmarks')
1419 1419
1420 1420 # If this is a full pull / clone and the server supports the clone bundles
1421 1421 # feature, tell the server whether we attempted a clone bundle. The
1422 1422 # presence of this flag indicates the client supports clone bundles. This
1423 1423 # will enable the server to treat clients that support clone bundles
1424 1424 # differently from those that don't.
1425 1425 if (pullop.remote.capable('clonebundles')
1426 1426 and pullop.heads is None and list(pullop.common) == [nullid]):
1427 1427 kwargs['cbattempted'] = pullop.clonebundleattempted
1428 1428
1429 1429 if streaming:
1430 1430 pullop.repo.ui.status(_('streaming all changes\n'))
1431 1431 elif not pullop.fetch:
1432 1432 pullop.repo.ui.status(_("no changes found\n"))
1433 1433 pullop.cgresult = 0
1434 1434 else:
1435 1435 if pullop.heads is None and list(pullop.common) == [nullid]:
1436 1436 pullop.repo.ui.status(_("requesting all changes\n"))
1437 1437 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1438 1438 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1439 1439 if obsolete.commonversion(remoteversions) is not None:
1440 1440 kwargs['obsmarkers'] = True
1441 1441 pullop.stepsdone.add('obsmarkers')
1442 1442 _pullbundle2extraprepare(pullop, kwargs)
1443 1443 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1444 1444 try:
1445 1445 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1446 1446 except bundle2.AbortFromPart as exc:
1447 1447 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1448 1448 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1449 1449 except error.BundleValueError as exc:
1450 1450 raise error.Abort(_('missing support for %s') % exc)
1451 1451
1452 1452 if pullop.fetch:
1453 1453 pullop.cgresult = bundle2.combinechangegroupresults(op)
1454 1454
1455 1455 # processing phases change
1456 1456 for namespace, value in op.records['listkeys']:
1457 1457 if namespace == 'phases':
1458 1458 _pullapplyphases(pullop, value)
1459 1459
1460 1460 # processing bookmark update
1461 1461 for namespace, value in op.records['listkeys']:
1462 1462 if namespace == 'bookmarks':
1463 1463 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1464 1464
1465 1465 # bookmark data were either already there or pulled in the bundle
1466 1466 if pullop.remotebookmarks is not None:
1467 1467 _pullbookmarks(pullop)
1468 1468
1469 1469 def _pullbundle2extraprepare(pullop, kwargs):
1470 1470 """hook function so that extensions can extend the getbundle call"""
1471 1471
1472 1472 def _pullchangeset(pullop):
1473 1473 """pull changeset from unbundle into the local repo"""
1474 1474 # We delay the open of the transaction as late as possible so we
1475 1475 # don't open transaction for nothing or you break future useful
1476 1476 # rollback call
1477 1477 if 'changegroup' in pullop.stepsdone:
1478 1478 return
1479 1479 pullop.stepsdone.add('changegroup')
1480 1480 if not pullop.fetch:
1481 1481 pullop.repo.ui.status(_("no changes found\n"))
1482 1482 pullop.cgresult = 0
1483 1483 return
1484 1484 tr = pullop.gettransaction()
1485 1485 if pullop.heads is None and list(pullop.common) == [nullid]:
1486 1486 pullop.repo.ui.status(_("requesting all changes\n"))
1487 1487 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1488 1488 # issue1320, avoid a race if remote changed after discovery
1489 1489 pullop.heads = pullop.rheads
1490 1490
1491 1491 if pullop.remote.capable('getbundle'):
1492 1492 # TODO: get bundlecaps from remote
1493 1493 cg = pullop.remote.getbundle('pull', common=pullop.common,
1494 1494 heads=pullop.heads or pullop.rheads)
1495 1495 elif pullop.heads is None:
1496 1496 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1497 1497 elif not pullop.remote.capable('changegroupsubset'):
1498 1498 raise error.Abort(_("partial pull cannot be done because "
1499 1499 "other repository doesn't support "
1500 1500 "changegroupsubset."))
1501 1501 else:
1502 1502 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1503 1503 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1504 1504 pullop.remote.url())
1505 1505 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1506 1506
1507 1507 def _pullphase(pullop):
1508 1508 # Get remote phases data from remote
1509 1509 if 'phases' in pullop.stepsdone:
1510 1510 return
1511 1511 remotephases = pullop.remote.listkeys('phases')
1512 1512 _pullapplyphases(pullop, remotephases)
1513 1513
1514 1514 def _pullapplyphases(pullop, remotephases):
1515 1515 """apply phase movement from observed remote state"""
1516 1516 if 'phases' in pullop.stepsdone:
1517 1517 return
1518 1518 pullop.stepsdone.add('phases')
1519 1519 publishing = bool(remotephases.get('publishing', False))
1520 1520 if remotephases and not publishing:
1521 1521 # remote is new and non-publishing
1522 1522 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1523 1523 pullop.pulledsubset,
1524 1524 remotephases)
1525 1525 dheads = pullop.pulledsubset
1526 1526 else:
1527 1527 # Remote is old or publishing all common changesets
1528 1528 # should be seen as public
1529 1529 pheads = pullop.pulledsubset
1530 1530 dheads = []
1531 1531 unfi = pullop.repo.unfiltered()
1532 1532 phase = unfi._phasecache.phase
1533 1533 rev = unfi.changelog.nodemap.get
1534 1534 public = phases.public
1535 1535 draft = phases.draft
1536 1536
1537 1537 # exclude changesets already public locally and update the others
1538 1538 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1539 1539 if pheads:
1540 1540 tr = pullop.gettransaction()
1541 1541 phases.advanceboundary(pullop.repo, tr, public, pheads)
1542 1542
1543 1543 # exclude changesets already draft locally and update the others
1544 1544 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1545 1545 if dheads:
1546 1546 tr = pullop.gettransaction()
1547 1547 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1548 1548
1549 1549 def _pullbookmarks(pullop):
1550 1550 """process the remote bookmark information to update the local one"""
1551 1551 if 'bookmarks' in pullop.stepsdone:
1552 1552 return
1553 1553 pullop.stepsdone.add('bookmarks')
1554 1554 repo = pullop.repo
1555 1555 remotebookmarks = pullop.remotebookmarks
1556 1556 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1557 1557 pullop.remote.url(),
1558 1558 pullop.gettransaction,
1559 1559 explicit=pullop.explicitbookmarks)
1560 1560
1561 1561 def _pullobsolete(pullop):
1562 1562 """utility function to pull obsolete markers from a remote
1563 1563
1564 1564 The `gettransaction` is function that return the pull transaction, creating
1565 1565 one if necessary. We return the transaction to inform the calling code that
1566 1566 a new transaction have been created (when applicable).
1567 1567
1568 1568 Exists mostly to allow overriding for experimentation purpose"""
1569 1569 if 'obsmarkers' in pullop.stepsdone:
1570 1570 return
1571 1571 pullop.stepsdone.add('obsmarkers')
1572 1572 tr = None
1573 1573 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1574 1574 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1575 1575 remoteobs = pullop.remote.listkeys('obsolete')
1576 1576 if 'dump0' in remoteobs:
1577 1577 tr = pullop.gettransaction()
1578 1578 markers = []
1579 1579 for key in sorted(remoteobs, reverse=True):
1580 1580 if key.startswith('dump'):
1581 1581 data = util.b85decode(remoteobs[key])
1582 1582 version, newmarks = obsolete._readmarkers(data)
1583 1583 markers += newmarks
1584 1584 if markers:
1585 1585 pullop.repo.obsstore.add(tr, markers)
1586 1586 pullop.repo.invalidatevolatilesets()
1587 1587 return tr
1588 1588
1589 1589 def caps20to10(repo):
1590 1590 """return a set with appropriate options to use bundle20 during getbundle"""
1591 1591 caps = {'HG20'}
1592 1592 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1593 1593 caps.add('bundle2=' + urlreq.quote(capsblob))
1594 1594 return caps
1595 1595
1596 1596 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1597 1597 getbundle2partsorder = []
1598 1598
1599 1599 # Mapping between step name and function
1600 1600 #
1601 1601 # This exists to help extensions wrap steps if necessary
1602 1602 getbundle2partsmapping = {}
1603 1603
1604 1604 def getbundle2partsgenerator(stepname, idx=None):
1605 1605 """decorator for function generating bundle2 part for getbundle
1606 1606
1607 1607 The function is added to the step -> function mapping and appended to the
1608 1608 list of steps. Beware that decorated functions will be added in order
1609 1609 (this may matter).
1610 1610
1611 1611 You can only use this decorator for new steps, if you want to wrap a step
1612 1612 from an extension, attack the getbundle2partsmapping dictionary directly."""
1613 1613 def dec(func):
1614 1614 assert stepname not in getbundle2partsmapping
1615 1615 getbundle2partsmapping[stepname] = func
1616 1616 if idx is None:
1617 1617 getbundle2partsorder.append(stepname)
1618 1618 else:
1619 1619 getbundle2partsorder.insert(idx, stepname)
1620 1620 return func
1621 1621 return dec
1622 1622
1623 1623 def bundle2requested(bundlecaps):
1624 1624 if bundlecaps is not None:
1625 1625 return any(cap.startswith('HG2') for cap in bundlecaps)
1626 1626 return False
1627 1627
1628 1628 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1629 1629 **kwargs):
1630 1630 """Return chunks constituting a bundle's raw data.
1631 1631
1632 1632 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1633 1633 passed.
1634 1634
1635 1635 Returns an iterator over raw chunks (of varying sizes).
1636 1636 """
1637 1637 kwargs = pycompat.byteskwargs(kwargs)
1638 1638 usebundle2 = bundle2requested(bundlecaps)
1639 1639 # bundle10 case
1640 1640 if not usebundle2:
1641 1641 if bundlecaps and not kwargs.get('cg', True):
1642 1642 raise ValueError(_('request for bundle10 must include changegroup'))
1643 1643
1644 1644 if kwargs:
1645 1645 raise ValueError(_('unsupported getbundle arguments: %s')
1646 1646 % ', '.join(sorted(kwargs.keys())))
1647 1647 outgoing = _computeoutgoing(repo, heads, common)
1648 1648 return changegroup.makestream(repo, outgoing, '01', source,
1649 1649 bundlecaps=bundlecaps)
1650 1650
1651 1651 # bundle20 case
1652 1652 b2caps = {}
1653 1653 for bcaps in bundlecaps:
1654 1654 if bcaps.startswith('bundle2='):
1655 1655 blob = urlreq.unquote(bcaps[len('bundle2='):])
1656 1656 b2caps.update(bundle2.decodecaps(blob))
1657 1657 bundler = bundle2.bundle20(repo.ui, b2caps)
1658 1658
1659 1659 kwargs['heads'] = heads
1660 1660 kwargs['common'] = common
1661 1661
1662 1662 for name in getbundle2partsorder:
1663 1663 func = getbundle2partsmapping[name]
1664 1664 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1665 1665 **pycompat.strkwargs(kwargs))
1666 1666
1667 1667 return bundler.getchunks()
1668 1668
1669 1669 @getbundle2partsgenerator('changegroup')
1670 1670 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1671 1671 b2caps=None, heads=None, common=None, **kwargs):
1672 1672 """add a changegroup part to the requested bundle"""
1673 1673 cgstream = None
1674 1674 if kwargs.get('cg', True):
1675 1675 # build changegroup bundle here.
1676 1676 version = '01'
1677 1677 cgversions = b2caps.get('changegroup')
1678 1678 if cgversions: # 3.1 and 3.2 ship with an empty value
1679 1679 cgversions = [v for v in cgversions
1680 1680 if v in changegroup.supportedoutgoingversions(repo)]
1681 1681 if not cgversions:
1682 1682 raise ValueError(_('no common changegroup version'))
1683 1683 version = max(cgversions)
1684 1684 outgoing = _computeoutgoing(repo, heads, common)
1685 1685 if outgoing.missing:
1686 1686 cgstream = changegroup.makestream(repo, outgoing, version, source,
1687 1687 bundlecaps=bundlecaps)
1688 1688
1689 1689 if cgstream:
1690 1690 part = bundler.newpart('changegroup', data=cgstream)
1691 1691 if cgversions:
1692 1692 part.addparam('version', version)
1693 1693 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1694 1694 mandatory=False)
1695 1695 if 'treemanifest' in repo.requirements:
1696 1696 part.addparam('treemanifest', '1')
1697 1697
1698 1698 @getbundle2partsgenerator('listkeys')
1699 1699 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1700 1700 b2caps=None, **kwargs):
1701 1701 """add parts containing listkeys namespaces to the requested bundle"""
1702 1702 listkeys = kwargs.get('listkeys', ())
1703 1703 for namespace in listkeys:
1704 1704 part = bundler.newpart('listkeys')
1705 1705 part.addparam('namespace', namespace)
1706 1706 keys = repo.listkeys(namespace).items()
1707 1707 part.data = pushkey.encodekeys(keys)
1708 1708
1709 1709 @getbundle2partsgenerator('obsmarkers')
1710 1710 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1711 1711 b2caps=None, heads=None, **kwargs):
1712 1712 """add an obsolescence markers part to the requested bundle"""
1713 1713 if kwargs.get('obsmarkers', False):
1714 1714 if heads is None:
1715 1715 heads = repo.heads()
1716 1716 subset = [c.node() for c in repo.set('::%ln', heads)]
1717 1717 markers = repo.obsstore.relevantmarkers(subset)
1718 1718 markers = sorted(markers)
1719 1719 bundle2.buildobsmarkerspart(bundler, markers)
1720 1720
1721 1721 @getbundle2partsgenerator('phases')
1722 1722 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1723 1723 b2caps=None, heads=None, **kwargs):
1724 1724 """add phase heads part to the requested bundle"""
1725 1725 if kwargs.get('phases', False):
1726 1726 if not 'heads' in b2caps.get('phases'):
1727 1727 raise ValueError(_('no common phases exchange method'))
1728 1728 if heads is None:
1729 1729 heads = repo.heads()
1730 1730
1731 1731 headsbyphase = collections.defaultdict(set)
1732 1732 if repo.publishing():
1733 1733 headsbyphase[phases.public] = heads
1734 1734 else:
1735 1735 # find the appropriate heads to move
1736 1736
1737 1737 phase = repo._phasecache.phase
1738 1738 node = repo.changelog.node
1739 1739 rev = repo.changelog.rev
1740 1740 for h in heads:
1741 1741 headsbyphase[phase(repo, rev(h))].add(h)
1742 1742 seenphases = list(headsbyphase.keys())
1743 1743
1744 1744 # We do not handle anything but public and draft phase for now)
1745 1745 if seenphases:
1746 1746 assert max(seenphases) <= phases.draft
1747 1747
1748 1748 # if client is pulling non-public changesets, we need to find
1749 1749 # intermediate public heads.
1750 1750 draftheads = headsbyphase.get(phases.draft, set())
1751 1751 if draftheads:
1752 1752 publicheads = headsbyphase.get(phases.public, set())
1753 1753
1754 1754 revset = 'heads(only(%ln, %ln) and public())'
1755 1755 extraheads = repo.revs(revset, draftheads, publicheads)
1756 1756 for r in extraheads:
1757 1757 headsbyphase[phases.public].add(node(r))
1758 1758
1759 1759 # transform data in a format used by the encoding function
1760 1760 phasemapping = []
1761 1761 for phase in phases.allphases:
1762 1762 phasemapping.append(sorted(headsbyphase[phase]))
1763 1763
1764 1764 # generate the actual part
1765 1765 phasedata = phases.binaryencode(phasemapping)
1766 1766 bundler.newpart('phase-heads', data=phasedata)
1767 1767
1768 1768 @getbundle2partsgenerator('hgtagsfnodes')
1769 1769 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1770 1770 b2caps=None, heads=None, common=None,
1771 1771 **kwargs):
1772 1772 """Transfer the .hgtags filenodes mapping.
1773 1773
1774 1774 Only values for heads in this bundle will be transferred.
1775 1775
1776 1776 The part data consists of pairs of 20 byte changeset node and .hgtags
1777 1777 filenodes raw values.
1778 1778 """
1779 1779 # Don't send unless:
1780 1780 # - changeset are being exchanged,
1781 1781 # - the client supports it.
1782 1782 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1783 1783 return
1784 1784
1785 1785 outgoing = _computeoutgoing(repo, heads, common)
1786 1786 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1787 1787
1788 def _getbookmarks(repo, **kwargs):
1789 """Returns bookmark to node mapping.
1790
1791 This function is primarily used to generate `bookmarks` bundle2 part.
1792 It is a separate function in order to make it easy to wrap it
1793 in extensions. Passing `kwargs` to the function makes it easy to
1794 add new parameters in extensions.
1795 """
1796
1797 return dict(bookmod.listbinbookmarks(repo))
1798
1799 1788 def check_heads(repo, their_heads, context):
1800 1789 """check if the heads of a repo have been modified
1801 1790
1802 1791 Used by peer for unbundling.
1803 1792 """
1804 1793 heads = repo.heads()
1805 1794 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1806 1795 if not (their_heads == ['force'] or their_heads == heads or
1807 1796 their_heads == ['hashed', heads_hash]):
1808 1797 # someone else committed/pushed/unbundled while we
1809 1798 # were transferring data
1810 1799 raise error.PushRaced('repository changed while %s - '
1811 1800 'please try again' % context)
1812 1801
1813 1802 def unbundle(repo, cg, heads, source, url):
1814 1803 """Apply a bundle to a repo.
1815 1804
1816 1805 this function makes sure the repo is locked during the application and have
1817 1806 mechanism to check that no push race occurred between the creation of the
1818 1807 bundle and its application.
1819 1808
1820 1809 If the push was raced as PushRaced exception is raised."""
1821 1810 r = 0
1822 1811 # need a transaction when processing a bundle2 stream
1823 1812 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1824 1813 lockandtr = [None, None, None]
1825 1814 recordout = None
1826 1815 # quick fix for output mismatch with bundle2 in 3.4
1827 1816 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
1828 1817 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1829 1818 captureoutput = True
1830 1819 try:
1831 1820 # note: outside bundle1, 'heads' is expected to be empty and this
1832 1821 # 'check_heads' call wil be a no-op
1833 1822 check_heads(repo, heads, 'uploading changes')
1834 1823 # push can proceed
1835 1824 if not isinstance(cg, bundle2.unbundle20):
1836 1825 # legacy case: bundle1 (changegroup 01)
1837 1826 txnname = "\n".join([source, util.hidepassword(url)])
1838 1827 with repo.lock(), repo.transaction(txnname) as tr:
1839 1828 op = bundle2.applybundle(repo, cg, tr, source, url)
1840 1829 r = bundle2.combinechangegroupresults(op)
1841 1830 else:
1842 1831 r = None
1843 1832 try:
1844 1833 def gettransaction():
1845 1834 if not lockandtr[2]:
1846 1835 lockandtr[0] = repo.wlock()
1847 1836 lockandtr[1] = repo.lock()
1848 1837 lockandtr[2] = repo.transaction(source)
1849 1838 lockandtr[2].hookargs['source'] = source
1850 1839 lockandtr[2].hookargs['url'] = url
1851 1840 lockandtr[2].hookargs['bundle2'] = '1'
1852 1841 return lockandtr[2]
1853 1842
1854 1843 # Do greedy locking by default until we're satisfied with lazy
1855 1844 # locking.
1856 1845 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1857 1846 gettransaction()
1858 1847
1859 1848 op = bundle2.bundleoperation(repo, gettransaction,
1860 1849 captureoutput=captureoutput)
1861 1850 try:
1862 1851 op = bundle2.processbundle(repo, cg, op=op)
1863 1852 finally:
1864 1853 r = op.reply
1865 1854 if captureoutput and r is not None:
1866 1855 repo.ui.pushbuffer(error=True, subproc=True)
1867 1856 def recordout(output):
1868 1857 r.newpart('output', data=output, mandatory=False)
1869 1858 if lockandtr[2] is not None:
1870 1859 lockandtr[2].close()
1871 1860 except BaseException as exc:
1872 1861 exc.duringunbundle2 = True
1873 1862 if captureoutput and r is not None:
1874 1863 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1875 1864 def recordout(output):
1876 1865 part = bundle2.bundlepart('output', data=output,
1877 1866 mandatory=False)
1878 1867 parts.append(part)
1879 1868 raise
1880 1869 finally:
1881 1870 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1882 1871 if recordout is not None:
1883 1872 recordout(repo.ui.popbuffer())
1884 1873 return r
1885 1874
1886 1875 def _maybeapplyclonebundle(pullop):
1887 1876 """Apply a clone bundle from a remote, if possible."""
1888 1877
1889 1878 repo = pullop.repo
1890 1879 remote = pullop.remote
1891 1880
1892 1881 if not repo.ui.configbool('ui', 'clonebundles'):
1893 1882 return
1894 1883
1895 1884 # Only run if local repo is empty.
1896 1885 if len(repo):
1897 1886 return
1898 1887
1899 1888 if pullop.heads:
1900 1889 return
1901 1890
1902 1891 if not remote.capable('clonebundles'):
1903 1892 return
1904 1893
1905 1894 res = remote._call('clonebundles')
1906 1895
1907 1896 # If we call the wire protocol command, that's good enough to record the
1908 1897 # attempt.
1909 1898 pullop.clonebundleattempted = True
1910 1899
1911 1900 entries = parseclonebundlesmanifest(repo, res)
1912 1901 if not entries:
1913 1902 repo.ui.note(_('no clone bundles available on remote; '
1914 1903 'falling back to regular clone\n'))
1915 1904 return
1916 1905
1917 1906 entries = filterclonebundleentries(
1918 1907 repo, entries, streamclonerequested=pullop.streamclonerequested)
1919 1908
1920 1909 if not entries:
1921 1910 # There is a thundering herd concern here. However, if a server
1922 1911 # operator doesn't advertise bundles appropriate for its clients,
1923 1912 # they deserve what's coming. Furthermore, from a client's
1924 1913 # perspective, no automatic fallback would mean not being able to
1925 1914 # clone!
1926 1915 repo.ui.warn(_('no compatible clone bundles available on server; '
1927 1916 'falling back to regular clone\n'))
1928 1917 repo.ui.warn(_('(you may want to report this to the server '
1929 1918 'operator)\n'))
1930 1919 return
1931 1920
1932 1921 entries = sortclonebundleentries(repo.ui, entries)
1933 1922
1934 1923 url = entries[0]['URL']
1935 1924 repo.ui.status(_('applying clone bundle from %s\n') % url)
1936 1925 if trypullbundlefromurl(repo.ui, repo, url):
1937 1926 repo.ui.status(_('finished applying clone bundle\n'))
1938 1927 # Bundle failed.
1939 1928 #
1940 1929 # We abort by default to avoid the thundering herd of
1941 1930 # clients flooding a server that was expecting expensive
1942 1931 # clone load to be offloaded.
1943 1932 elif repo.ui.configbool('ui', 'clonebundlefallback'):
1944 1933 repo.ui.warn(_('falling back to normal clone\n'))
1945 1934 else:
1946 1935 raise error.Abort(_('error applying bundle'),
1947 1936 hint=_('if this error persists, consider contacting '
1948 1937 'the server operator or disable clone '
1949 1938 'bundles via '
1950 1939 '"--config ui.clonebundles=false"'))
1951 1940
1952 1941 def parseclonebundlesmanifest(repo, s):
1953 1942 """Parses the raw text of a clone bundles manifest.
1954 1943
1955 1944 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1956 1945 to the URL and other keys are the attributes for the entry.
1957 1946 """
1958 1947 m = []
1959 1948 for line in s.splitlines():
1960 1949 fields = line.split()
1961 1950 if not fields:
1962 1951 continue
1963 1952 attrs = {'URL': fields[0]}
1964 1953 for rawattr in fields[1:]:
1965 1954 key, value = rawattr.split('=', 1)
1966 1955 key = urlreq.unquote(key)
1967 1956 value = urlreq.unquote(value)
1968 1957 attrs[key] = value
1969 1958
1970 1959 # Parse BUNDLESPEC into components. This makes client-side
1971 1960 # preferences easier to specify since you can prefer a single
1972 1961 # component of the BUNDLESPEC.
1973 1962 if key == 'BUNDLESPEC':
1974 1963 try:
1975 1964 comp, version, params = parsebundlespec(repo, value,
1976 1965 externalnames=True)
1977 1966 attrs['COMPRESSION'] = comp
1978 1967 attrs['VERSION'] = version
1979 1968 except error.InvalidBundleSpecification:
1980 1969 pass
1981 1970 except error.UnsupportedBundleSpecification:
1982 1971 pass
1983 1972
1984 1973 m.append(attrs)
1985 1974
1986 1975 return m
1987 1976
1988 1977 def filterclonebundleentries(repo, entries, streamclonerequested=False):
1989 1978 """Remove incompatible clone bundle manifest entries.
1990 1979
1991 1980 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1992 1981 and returns a new list consisting of only the entries that this client
1993 1982 should be able to apply.
1994 1983
1995 1984 There is no guarantee we'll be able to apply all returned entries because
1996 1985 the metadata we use to filter on may be missing or wrong.
1997 1986 """
1998 1987 newentries = []
1999 1988 for entry in entries:
2000 1989 spec = entry.get('BUNDLESPEC')
2001 1990 if spec:
2002 1991 try:
2003 1992 comp, version, params = parsebundlespec(repo, spec, strict=True)
2004 1993
2005 1994 # If a stream clone was requested, filter out non-streamclone
2006 1995 # entries.
2007 1996 if streamclonerequested and (comp != 'UN' or version != 's1'):
2008 1997 repo.ui.debug('filtering %s because not a stream clone\n' %
2009 1998 entry['URL'])
2010 1999 continue
2011 2000
2012 2001 except error.InvalidBundleSpecification as e:
2013 2002 repo.ui.debug(str(e) + '\n')
2014 2003 continue
2015 2004 except error.UnsupportedBundleSpecification as e:
2016 2005 repo.ui.debug('filtering %s because unsupported bundle '
2017 2006 'spec: %s\n' % (entry['URL'], str(e)))
2018 2007 continue
2019 2008 # If we don't have a spec and requested a stream clone, we don't know
2020 2009 # what the entry is so don't attempt to apply it.
2021 2010 elif streamclonerequested:
2022 2011 repo.ui.debug('filtering %s because cannot determine if a stream '
2023 2012 'clone bundle\n' % entry['URL'])
2024 2013 continue
2025 2014
2026 2015 if 'REQUIRESNI' in entry and not sslutil.hassni:
2027 2016 repo.ui.debug('filtering %s because SNI not supported\n' %
2028 2017 entry['URL'])
2029 2018 continue
2030 2019
2031 2020 newentries.append(entry)
2032 2021
2033 2022 return newentries
2034 2023
2035 2024 class clonebundleentry(object):
2036 2025 """Represents an item in a clone bundles manifest.
2037 2026
2038 2027 This rich class is needed to support sorting since sorted() in Python 3
2039 2028 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2040 2029 won't work.
2041 2030 """
2042 2031
2043 2032 def __init__(self, value, prefers):
2044 2033 self.value = value
2045 2034 self.prefers = prefers
2046 2035
2047 2036 def _cmp(self, other):
2048 2037 for prefkey, prefvalue in self.prefers:
2049 2038 avalue = self.value.get(prefkey)
2050 2039 bvalue = other.value.get(prefkey)
2051 2040
2052 2041 # Special case for b missing attribute and a matches exactly.
2053 2042 if avalue is not None and bvalue is None and avalue == prefvalue:
2054 2043 return -1
2055 2044
2056 2045 # Special case for a missing attribute and b matches exactly.
2057 2046 if bvalue is not None and avalue is None and bvalue == prefvalue:
2058 2047 return 1
2059 2048
2060 2049 # We can't compare unless attribute present on both.
2061 2050 if avalue is None or bvalue is None:
2062 2051 continue
2063 2052
2064 2053 # Same values should fall back to next attribute.
2065 2054 if avalue == bvalue:
2066 2055 continue
2067 2056
2068 2057 # Exact matches come first.
2069 2058 if avalue == prefvalue:
2070 2059 return -1
2071 2060 if bvalue == prefvalue:
2072 2061 return 1
2073 2062
2074 2063 # Fall back to next attribute.
2075 2064 continue
2076 2065
2077 2066 # If we got here we couldn't sort by attributes and prefers. Fall
2078 2067 # back to index order.
2079 2068 return 0
2080 2069
2081 2070 def __lt__(self, other):
2082 2071 return self._cmp(other) < 0
2083 2072
2084 2073 def __gt__(self, other):
2085 2074 return self._cmp(other) > 0
2086 2075
2087 2076 def __eq__(self, other):
2088 2077 return self._cmp(other) == 0
2089 2078
2090 2079 def __le__(self, other):
2091 2080 return self._cmp(other) <= 0
2092 2081
2093 2082 def __ge__(self, other):
2094 2083 return self._cmp(other) >= 0
2095 2084
2096 2085 def __ne__(self, other):
2097 2086 return self._cmp(other) != 0
2098 2087
2099 2088 def sortclonebundleentries(ui, entries):
2100 2089 prefers = ui.configlist('ui', 'clonebundleprefers')
2101 2090 if not prefers:
2102 2091 return list(entries)
2103 2092
2104 2093 prefers = [p.split('=', 1) for p in prefers]
2105 2094
2106 2095 items = sorted(clonebundleentry(v, prefers) for v in entries)
2107 2096 return [i.value for i in items]
2108 2097
2109 2098 def trypullbundlefromurl(ui, repo, url):
2110 2099 """Attempt to apply a bundle from a URL."""
2111 2100 with repo.lock(), repo.transaction('bundleurl') as tr:
2112 2101 try:
2113 2102 fh = urlmod.open(ui, url)
2114 2103 cg = readbundle(ui, fh, 'stream')
2115 2104
2116 2105 if isinstance(cg, streamclone.streamcloneapplier):
2117 2106 cg.apply(repo)
2118 2107 else:
2119 2108 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2120 2109 return True
2121 2110 except urlerr.httperror as e:
2122 2111 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2123 2112 except urlerr.urlerror as e:
2124 2113 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2125 2114
2126 2115 return False
General Comments 0
You need to be logged in to leave comments. Login now