##// END OF EJS Templates
streamclone: extract requirements formatting...
Boris Feld -
r35830:84965e5f stable
parent child Browse files
Show More
@@ -1,2254 +1,2261 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import errno
12 12 import hashlib
13 13
14 14 from .i18n import _
15 15 from .node import (
16 16 bin,
17 17 hex,
18 18 nullid,
19 19 )
20 20 from . import (
21 21 bookmarks as bookmod,
22 22 bundle2,
23 23 changegroup,
24 24 discovery,
25 25 error,
26 26 lock as lockmod,
27 27 logexchange,
28 28 obsolete,
29 29 phases,
30 30 pushkey,
31 31 pycompat,
32 32 scmutil,
33 33 sslutil,
34 34 streamclone,
35 35 url as urlmod,
36 36 util,
37 37 )
38 38
39 39 urlerr = util.urlerr
40 40 urlreq = util.urlreq
41 41
42 42 # Maps bundle version human names to changegroup versions.
43 43 _bundlespeccgversions = {'v1': '01',
44 44 'v2': '02',
45 45 'packed1': 's1',
46 46 'bundle2': '02', #legacy
47 47 }
48 48
49 49 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
50 50 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
51 51
52 52 def parsebundlespec(repo, spec, strict=True, externalnames=False):
53 53 """Parse a bundle string specification into parts.
54 54
55 55 Bundle specifications denote a well-defined bundle/exchange format.
56 56 The content of a given specification should not change over time in
57 57 order to ensure that bundles produced by a newer version of Mercurial are
58 58 readable from an older version.
59 59
60 60 The string currently has the form:
61 61
62 62 <compression>-<type>[;<parameter0>[;<parameter1>]]
63 63
64 64 Where <compression> is one of the supported compression formats
65 65 and <type> is (currently) a version string. A ";" can follow the type and
66 66 all text afterwards is interpreted as URI encoded, ";" delimited key=value
67 67 pairs.
68 68
69 69 If ``strict`` is True (the default) <compression> is required. Otherwise,
70 70 it is optional.
71 71
72 72 If ``externalnames`` is False (the default), the human-centric names will
73 73 be converted to their internal representation.
74 74
75 75 Returns a 3-tuple of (compression, version, parameters). Compression will
76 76 be ``None`` if not in strict mode and a compression isn't defined.
77 77
78 78 An ``InvalidBundleSpecification`` is raised when the specification is
79 79 not syntactically well formed.
80 80
81 81 An ``UnsupportedBundleSpecification`` is raised when the compression or
82 82 bundle type/version is not recognized.
83 83
84 84 Note: this function will likely eventually return a more complex data
85 85 structure, including bundle2 part information.
86 86 """
87 87 def parseparams(s):
88 88 if ';' not in s:
89 89 return s, {}
90 90
91 91 params = {}
92 92 version, paramstr = s.split(';', 1)
93 93
94 94 for p in paramstr.split(';'):
95 95 if '=' not in p:
96 96 raise error.InvalidBundleSpecification(
97 97 _('invalid bundle specification: '
98 98 'missing "=" in parameter: %s') % p)
99 99
100 100 key, value = p.split('=', 1)
101 101 key = urlreq.unquote(key)
102 102 value = urlreq.unquote(value)
103 103 params[key] = value
104 104
105 105 return version, params
106 106
107 107
108 108 if strict and '-' not in spec:
109 109 raise error.InvalidBundleSpecification(
110 110 _('invalid bundle specification; '
111 111 'must be prefixed with compression: %s') % spec)
112 112
113 113 if '-' in spec:
114 114 compression, version = spec.split('-', 1)
115 115
116 116 if compression not in util.compengines.supportedbundlenames:
117 117 raise error.UnsupportedBundleSpecification(
118 118 _('%s compression is not supported') % compression)
119 119
120 120 version, params = parseparams(version)
121 121
122 122 if version not in _bundlespeccgversions:
123 123 raise error.UnsupportedBundleSpecification(
124 124 _('%s is not a recognized bundle version') % version)
125 125 else:
126 126 # Value could be just the compression or just the version, in which
127 127 # case some defaults are assumed (but only when not in strict mode).
128 128 assert not strict
129 129
130 130 spec, params = parseparams(spec)
131 131
132 132 if spec in util.compengines.supportedbundlenames:
133 133 compression = spec
134 134 version = 'v1'
135 135 # Generaldelta repos require v2.
136 136 if 'generaldelta' in repo.requirements:
137 137 version = 'v2'
138 138 # Modern compression engines require v2.
139 139 if compression not in _bundlespecv1compengines:
140 140 version = 'v2'
141 141 elif spec in _bundlespeccgversions:
142 142 if spec == 'packed1':
143 143 compression = 'none'
144 144 else:
145 145 compression = 'bzip2'
146 146 version = spec
147 147 else:
148 148 raise error.UnsupportedBundleSpecification(
149 149 _('%s is not a recognized bundle specification') % spec)
150 150
151 151 # Bundle version 1 only supports a known set of compression engines.
152 152 if version == 'v1' and compression not in _bundlespecv1compengines:
153 153 raise error.UnsupportedBundleSpecification(
154 154 _('compression engine %s is not supported on v1 bundles') %
155 155 compression)
156 156
157 157 # The specification for packed1 can optionally declare the data formats
158 158 # required to apply it. If we see this metadata, compare against what the
159 159 # repo supports and error if the bundle isn't compatible.
160 160 if version == 'packed1' and 'requirements' in params:
161 161 requirements = set(params['requirements'].split(','))
162 162 missingreqs = requirements - repo.supportedformats
163 163 if missingreqs:
164 164 raise error.UnsupportedBundleSpecification(
165 165 _('missing support for repository features: %s') %
166 166 ', '.join(sorted(missingreqs)))
167 167
168 168 if not externalnames:
169 169 engine = util.compengines.forbundlename(compression)
170 170 compression = engine.bundletype()[1]
171 171 version = _bundlespeccgversions[version]
172 172 return compression, version, params
173 173
174 174 def readbundle(ui, fh, fname, vfs=None):
175 175 header = changegroup.readexactly(fh, 4)
176 176
177 177 alg = None
178 178 if not fname:
179 179 fname = "stream"
180 180 if not header.startswith('HG') and header.startswith('\0'):
181 181 fh = changegroup.headerlessfixup(fh, header)
182 182 header = "HG10"
183 183 alg = 'UN'
184 184 elif vfs:
185 185 fname = vfs.join(fname)
186 186
187 187 magic, version = header[0:2], header[2:4]
188 188
189 189 if magic != 'HG':
190 190 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
191 191 if version == '10':
192 192 if alg is None:
193 193 alg = changegroup.readexactly(fh, 2)
194 194 return changegroup.cg1unpacker(fh, alg)
195 195 elif version.startswith('2'):
196 196 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
197 197 elif version == 'S1':
198 198 return streamclone.streamcloneapplier(fh)
199 199 else:
200 200 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
201 201
202 def _formatrequirementsspec(requirements):
203 return urlreq.quote(','.join(sorted(requirements)))
204
205 def _formatrequirementsparams(requirements):
206 requirements = _formatrequirementsspec(requirements)
207 params = "%s%s" % (urlreq.quote("requirements="), requirements)
208 return params
209
202 210 def getbundlespec(ui, fh):
203 211 """Infer the bundlespec from a bundle file handle.
204 212
205 213 The input file handle is seeked and the original seek position is not
206 214 restored.
207 215 """
208 216 def speccompression(alg):
209 217 try:
210 218 return util.compengines.forbundletype(alg).bundletype()[0]
211 219 except KeyError:
212 220 return None
213 221
214 222 b = readbundle(ui, fh, None)
215 223 if isinstance(b, changegroup.cg1unpacker):
216 224 alg = b._type
217 225 if alg == '_truncatedBZ':
218 226 alg = 'BZ'
219 227 comp = speccompression(alg)
220 228 if not comp:
221 229 raise error.Abort(_('unknown compression algorithm: %s') % alg)
222 230 return '%s-v1' % comp
223 231 elif isinstance(b, bundle2.unbundle20):
224 232 if 'Compression' in b.params:
225 233 comp = speccompression(b.params['Compression'])
226 234 if not comp:
227 235 raise error.Abort(_('unknown compression algorithm: %s') % comp)
228 236 else:
229 237 comp = 'none'
230 238
231 239 version = None
232 240 for part in b.iterparts():
233 241 if part.type == 'changegroup':
234 242 version = part.params['version']
235 243 if version in ('01', '02'):
236 244 version = 'v2'
237 245 else:
238 246 raise error.Abort(_('changegroup version %s does not have '
239 247 'a known bundlespec') % version,
240 248 hint=_('try upgrading your Mercurial '
241 249 'client'))
242 250
243 251 if not version:
244 252 raise error.Abort(_('could not identify changegroup version in '
245 253 'bundle'))
246 254
247 255 return '%s-%s' % (comp, version)
248 256 elif isinstance(b, streamclone.streamcloneapplier):
249 257 requirements = streamclone.readbundle1header(fh)[2]
250 params = 'requirements=%s' % ','.join(sorted(requirements))
251 return 'none-packed1;%s' % urlreq.quote(params)
258 return 'none-packed1;%s' % _formatrequirementsparams(requirements)
252 259 else:
253 260 raise error.Abort(_('unknown bundle type: %s') % b)
254 261
255 262 def _computeoutgoing(repo, heads, common):
256 263 """Computes which revs are outgoing given a set of common
257 264 and a set of heads.
258 265
259 266 This is a separate function so extensions can have access to
260 267 the logic.
261 268
262 269 Returns a discovery.outgoing object.
263 270 """
264 271 cl = repo.changelog
265 272 if common:
266 273 hasnode = cl.hasnode
267 274 common = [n for n in common if hasnode(n)]
268 275 else:
269 276 common = [nullid]
270 277 if not heads:
271 278 heads = cl.heads()
272 279 return discovery.outgoing(repo, common, heads)
273 280
274 281 def _forcebundle1(op):
275 282 """return true if a pull/push must use bundle1
276 283
277 284 This function is used to allow testing of the older bundle version"""
278 285 ui = op.repo.ui
279 286 forcebundle1 = False
280 287 # The goal is this config is to allow developer to choose the bundle
281 288 # version used during exchanged. This is especially handy during test.
282 289 # Value is a list of bundle version to be picked from, highest version
283 290 # should be used.
284 291 #
285 292 # developer config: devel.legacy.exchange
286 293 exchange = ui.configlist('devel', 'legacy.exchange')
287 294 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
288 295 return forcebundle1 or not op.remote.capable('bundle2')
289 296
290 297 class pushoperation(object):
291 298 """A object that represent a single push operation
292 299
293 300 Its purpose is to carry push related state and very common operations.
294 301
295 302 A new pushoperation should be created at the beginning of each push and
296 303 discarded afterward.
297 304 """
298 305
299 306 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
300 307 bookmarks=(), pushvars=None):
301 308 # repo we push from
302 309 self.repo = repo
303 310 self.ui = repo.ui
304 311 # repo we push to
305 312 self.remote = remote
306 313 # force option provided
307 314 self.force = force
308 315 # revs to be pushed (None is "all")
309 316 self.revs = revs
310 317 # bookmark explicitly pushed
311 318 self.bookmarks = bookmarks
312 319 # allow push of new branch
313 320 self.newbranch = newbranch
314 321 # step already performed
315 322 # (used to check what steps have been already performed through bundle2)
316 323 self.stepsdone = set()
317 324 # Integer version of the changegroup push result
318 325 # - None means nothing to push
319 326 # - 0 means HTTP error
320 327 # - 1 means we pushed and remote head count is unchanged *or*
321 328 # we have outgoing changesets but refused to push
322 329 # - other values as described by addchangegroup()
323 330 self.cgresult = None
324 331 # Boolean value for the bookmark push
325 332 self.bkresult = None
326 333 # discover.outgoing object (contains common and outgoing data)
327 334 self.outgoing = None
328 335 # all remote topological heads before the push
329 336 self.remoteheads = None
330 337 # Details of the remote branch pre and post push
331 338 #
332 339 # mapping: {'branch': ([remoteheads],
333 340 # [newheads],
334 341 # [unsyncedheads],
335 342 # [discardedheads])}
336 343 # - branch: the branch name
337 344 # - remoteheads: the list of remote heads known locally
338 345 # None if the branch is new
339 346 # - newheads: the new remote heads (known locally) with outgoing pushed
340 347 # - unsyncedheads: the list of remote heads unknown locally.
341 348 # - discardedheads: the list of remote heads made obsolete by the push
342 349 self.pushbranchmap = None
343 350 # testable as a boolean indicating if any nodes are missing locally.
344 351 self.incoming = None
345 352 # summary of the remote phase situation
346 353 self.remotephases = None
347 354 # phases changes that must be pushed along side the changesets
348 355 self.outdatedphases = None
349 356 # phases changes that must be pushed if changeset push fails
350 357 self.fallbackoutdatedphases = None
351 358 # outgoing obsmarkers
352 359 self.outobsmarkers = set()
353 360 # outgoing bookmarks
354 361 self.outbookmarks = []
355 362 # transaction manager
356 363 self.trmanager = None
357 364 # map { pushkey partid -> callback handling failure}
358 365 # used to handle exception from mandatory pushkey part failure
359 366 self.pkfailcb = {}
360 367 # an iterable of pushvars or None
361 368 self.pushvars = pushvars
362 369
363 370 @util.propertycache
364 371 def futureheads(self):
365 372 """future remote heads if the changeset push succeeds"""
366 373 return self.outgoing.missingheads
367 374
368 375 @util.propertycache
369 376 def fallbackheads(self):
370 377 """future remote heads if the changeset push fails"""
371 378 if self.revs is None:
372 379 # not target to push, all common are relevant
373 380 return self.outgoing.commonheads
374 381 unfi = self.repo.unfiltered()
375 382 # I want cheads = heads(::missingheads and ::commonheads)
376 383 # (missingheads is revs with secret changeset filtered out)
377 384 #
378 385 # This can be expressed as:
379 386 # cheads = ( (missingheads and ::commonheads)
380 387 # + (commonheads and ::missingheads))"
381 388 # )
382 389 #
383 390 # while trying to push we already computed the following:
384 391 # common = (::commonheads)
385 392 # missing = ((commonheads::missingheads) - commonheads)
386 393 #
387 394 # We can pick:
388 395 # * missingheads part of common (::commonheads)
389 396 common = self.outgoing.common
390 397 nm = self.repo.changelog.nodemap
391 398 cheads = [node for node in self.revs if nm[node] in common]
392 399 # and
393 400 # * commonheads parents on missing
394 401 revset = unfi.set('%ln and parents(roots(%ln))',
395 402 self.outgoing.commonheads,
396 403 self.outgoing.missing)
397 404 cheads.extend(c.node() for c in revset)
398 405 return cheads
399 406
400 407 @property
401 408 def commonheads(self):
402 409 """set of all common heads after changeset bundle push"""
403 410 if self.cgresult:
404 411 return self.futureheads
405 412 else:
406 413 return self.fallbackheads
407 414
408 415 # mapping of message used when pushing bookmark
409 416 bookmsgmap = {'update': (_("updating bookmark %s\n"),
410 417 _('updating bookmark %s failed!\n')),
411 418 'export': (_("exporting bookmark %s\n"),
412 419 _('exporting bookmark %s failed!\n')),
413 420 'delete': (_("deleting remote bookmark %s\n"),
414 421 _('deleting remote bookmark %s failed!\n')),
415 422 }
416 423
417 424
418 425 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
419 426 opargs=None):
420 427 '''Push outgoing changesets (limited by revs) from a local
421 428 repository to remote. Return an integer:
422 429 - None means nothing to push
423 430 - 0 means HTTP error
424 431 - 1 means we pushed and remote head count is unchanged *or*
425 432 we have outgoing changesets but refused to push
426 433 - other values as described by addchangegroup()
427 434 '''
428 435 if opargs is None:
429 436 opargs = {}
430 437 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
431 438 **pycompat.strkwargs(opargs))
432 439 if pushop.remote.local():
433 440 missing = (set(pushop.repo.requirements)
434 441 - pushop.remote.local().supported)
435 442 if missing:
436 443 msg = _("required features are not"
437 444 " supported in the destination:"
438 445 " %s") % (', '.join(sorted(missing)))
439 446 raise error.Abort(msg)
440 447
441 448 if not pushop.remote.canpush():
442 449 raise error.Abort(_("destination does not support push"))
443 450
444 451 if not pushop.remote.capable('unbundle'):
445 452 raise error.Abort(_('cannot push: destination does not support the '
446 453 'unbundle wire protocol command'))
447 454
448 455 # get lock as we might write phase data
449 456 wlock = lock = None
450 457 try:
451 458 # bundle2 push may receive a reply bundle touching bookmarks or other
452 459 # things requiring the wlock. Take it now to ensure proper ordering.
453 460 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
454 461 if (not _forcebundle1(pushop)) and maypushback:
455 462 wlock = pushop.repo.wlock()
456 463 lock = pushop.repo.lock()
457 464 pushop.trmanager = transactionmanager(pushop.repo,
458 465 'push-response',
459 466 pushop.remote.url())
460 467 except IOError as err:
461 468 if err.errno != errno.EACCES:
462 469 raise
463 470 # source repo cannot be locked.
464 471 # We do not abort the push, but just disable the local phase
465 472 # synchronisation.
466 473 msg = 'cannot lock source repository: %s\n' % err
467 474 pushop.ui.debug(msg)
468 475
469 476 with wlock or util.nullcontextmanager(), \
470 477 lock or util.nullcontextmanager(), \
471 478 pushop.trmanager or util.nullcontextmanager():
472 479 pushop.repo.checkpush(pushop)
473 480 _pushdiscovery(pushop)
474 481 if not _forcebundle1(pushop):
475 482 _pushbundle2(pushop)
476 483 _pushchangeset(pushop)
477 484 _pushsyncphase(pushop)
478 485 _pushobsolete(pushop)
479 486 _pushbookmark(pushop)
480 487
481 488 return pushop
482 489
483 490 # list of steps to perform discovery before push
484 491 pushdiscoveryorder = []
485 492
486 493 # Mapping between step name and function
487 494 #
488 495 # This exists to help extensions wrap steps if necessary
489 496 pushdiscoverymapping = {}
490 497
491 498 def pushdiscovery(stepname):
492 499 """decorator for function performing discovery before push
493 500
494 501 The function is added to the step -> function mapping and appended to the
495 502 list of steps. Beware that decorated function will be added in order (this
496 503 may matter).
497 504
498 505 You can only use this decorator for a new step, if you want to wrap a step
499 506 from an extension, change the pushdiscovery dictionary directly."""
500 507 def dec(func):
501 508 assert stepname not in pushdiscoverymapping
502 509 pushdiscoverymapping[stepname] = func
503 510 pushdiscoveryorder.append(stepname)
504 511 return func
505 512 return dec
506 513
507 514 def _pushdiscovery(pushop):
508 515 """Run all discovery steps"""
509 516 for stepname in pushdiscoveryorder:
510 517 step = pushdiscoverymapping[stepname]
511 518 step(pushop)
512 519
513 520 @pushdiscovery('changeset')
514 521 def _pushdiscoverychangeset(pushop):
515 522 """discover the changeset that need to be pushed"""
516 523 fci = discovery.findcommonincoming
517 524 if pushop.revs:
518 525 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
519 526 ancestorsof=pushop.revs)
520 527 else:
521 528 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
522 529 common, inc, remoteheads = commoninc
523 530 fco = discovery.findcommonoutgoing
524 531 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
525 532 commoninc=commoninc, force=pushop.force)
526 533 pushop.outgoing = outgoing
527 534 pushop.remoteheads = remoteheads
528 535 pushop.incoming = inc
529 536
530 537 @pushdiscovery('phase')
531 538 def _pushdiscoveryphase(pushop):
532 539 """discover the phase that needs to be pushed
533 540
534 541 (computed for both success and failure case for changesets push)"""
535 542 outgoing = pushop.outgoing
536 543 unfi = pushop.repo.unfiltered()
537 544 remotephases = pushop.remote.listkeys('phases')
538 545 if (pushop.ui.configbool('ui', '_usedassubrepo')
539 546 and remotephases # server supports phases
540 547 and not pushop.outgoing.missing # no changesets to be pushed
541 548 and remotephases.get('publishing', False)):
542 549 # When:
543 550 # - this is a subrepo push
544 551 # - and remote support phase
545 552 # - and no changeset are to be pushed
546 553 # - and remote is publishing
547 554 # We may be in issue 3781 case!
548 555 # We drop the possible phase synchronisation done by
549 556 # courtesy to publish changesets possibly locally draft
550 557 # on the remote.
551 558 pushop.outdatedphases = []
552 559 pushop.fallbackoutdatedphases = []
553 560 return
554 561
555 562 pushop.remotephases = phases.remotephasessummary(pushop.repo,
556 563 pushop.fallbackheads,
557 564 remotephases)
558 565 droots = pushop.remotephases.draftroots
559 566
560 567 extracond = ''
561 568 if not pushop.remotephases.publishing:
562 569 extracond = ' and public()'
563 570 revset = 'heads((%%ln::%%ln) %s)' % extracond
564 571 # Get the list of all revs draft on remote by public here.
565 572 # XXX Beware that revset break if droots is not strictly
566 573 # XXX root we may want to ensure it is but it is costly
567 574 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
568 575 if not outgoing.missing:
569 576 future = fallback
570 577 else:
571 578 # adds changeset we are going to push as draft
572 579 #
573 580 # should not be necessary for publishing server, but because of an
574 581 # issue fixed in xxxxx we have to do it anyway.
575 582 fdroots = list(unfi.set('roots(%ln + %ln::)',
576 583 outgoing.missing, droots))
577 584 fdroots = [f.node() for f in fdroots]
578 585 future = list(unfi.set(revset, fdroots, pushop.futureheads))
579 586 pushop.outdatedphases = future
580 587 pushop.fallbackoutdatedphases = fallback
581 588
582 589 @pushdiscovery('obsmarker')
583 590 def _pushdiscoveryobsmarkers(pushop):
584 591 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
585 592 and pushop.repo.obsstore
586 593 and 'obsolete' in pushop.remote.listkeys('namespaces')):
587 594 repo = pushop.repo
588 595 # very naive computation, that can be quite expensive on big repo.
589 596 # However: evolution is currently slow on them anyway.
590 597 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
591 598 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
592 599
593 600 @pushdiscovery('bookmarks')
594 601 def _pushdiscoverybookmarks(pushop):
595 602 ui = pushop.ui
596 603 repo = pushop.repo.unfiltered()
597 604 remote = pushop.remote
598 605 ui.debug("checking for updated bookmarks\n")
599 606 ancestors = ()
600 607 if pushop.revs:
601 608 revnums = map(repo.changelog.rev, pushop.revs)
602 609 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
603 610 remotebookmark = remote.listkeys('bookmarks')
604 611
605 612 explicit = set([repo._bookmarks.expandname(bookmark)
606 613 for bookmark in pushop.bookmarks])
607 614
608 615 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
609 616 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
610 617
611 618 def safehex(x):
612 619 if x is None:
613 620 return x
614 621 return hex(x)
615 622
616 623 def hexifycompbookmarks(bookmarks):
617 624 for b, scid, dcid in bookmarks:
618 625 yield b, safehex(scid), safehex(dcid)
619 626
620 627 comp = [hexifycompbookmarks(marks) for marks in comp]
621 628 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
622 629
623 630 for b, scid, dcid in advsrc:
624 631 if b in explicit:
625 632 explicit.remove(b)
626 633 if not ancestors or repo[scid].rev() in ancestors:
627 634 pushop.outbookmarks.append((b, dcid, scid))
628 635 # search added bookmark
629 636 for b, scid, dcid in addsrc:
630 637 if b in explicit:
631 638 explicit.remove(b)
632 639 pushop.outbookmarks.append((b, '', scid))
633 640 # search for overwritten bookmark
634 641 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
635 642 if b in explicit:
636 643 explicit.remove(b)
637 644 pushop.outbookmarks.append((b, dcid, scid))
638 645 # search for bookmark to delete
639 646 for b, scid, dcid in adddst:
640 647 if b in explicit:
641 648 explicit.remove(b)
642 649 # treat as "deleted locally"
643 650 pushop.outbookmarks.append((b, dcid, ''))
644 651 # identical bookmarks shouldn't get reported
645 652 for b, scid, dcid in same:
646 653 if b in explicit:
647 654 explicit.remove(b)
648 655
649 656 if explicit:
650 657 explicit = sorted(explicit)
651 658 # we should probably list all of them
652 659 ui.warn(_('bookmark %s does not exist on the local '
653 660 'or remote repository!\n') % explicit[0])
654 661 pushop.bkresult = 2
655 662
656 663 pushop.outbookmarks.sort()
657 664
658 665 def _pushcheckoutgoing(pushop):
659 666 outgoing = pushop.outgoing
660 667 unfi = pushop.repo.unfiltered()
661 668 if not outgoing.missing:
662 669 # nothing to push
663 670 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
664 671 return False
665 672 # something to push
666 673 if not pushop.force:
667 674 # if repo.obsstore == False --> no obsolete
668 675 # then, save the iteration
669 676 if unfi.obsstore:
670 677 # this message are here for 80 char limit reason
671 678 mso = _("push includes obsolete changeset: %s!")
672 679 mspd = _("push includes phase-divergent changeset: %s!")
673 680 mscd = _("push includes content-divergent changeset: %s!")
674 681 mst = {"orphan": _("push includes orphan changeset: %s!"),
675 682 "phase-divergent": mspd,
676 683 "content-divergent": mscd}
677 684 # If we are to push if there is at least one
678 685 # obsolete or unstable changeset in missing, at
679 686 # least one of the missinghead will be obsolete or
680 687 # unstable. So checking heads only is ok
681 688 for node in outgoing.missingheads:
682 689 ctx = unfi[node]
683 690 if ctx.obsolete():
684 691 raise error.Abort(mso % ctx)
685 692 elif ctx.isunstable():
686 693 # TODO print more than one instability in the abort
687 694 # message
688 695 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
689 696
690 697 discovery.checkheads(pushop)
691 698 return True
692 699
693 700 # List of names of steps to perform for an outgoing bundle2, order matters.
694 701 b2partsgenorder = []
695 702
696 703 # Mapping between step name and function
697 704 #
698 705 # This exists to help extensions wrap steps if necessary
699 706 b2partsgenmapping = {}
700 707
701 708 def b2partsgenerator(stepname, idx=None):
702 709 """decorator for function generating bundle2 part
703 710
704 711 The function is added to the step -> function mapping and appended to the
705 712 list of steps. Beware that decorated functions will be added in order
706 713 (this may matter).
707 714
708 715 You can only use this decorator for new steps, if you want to wrap a step
709 716 from an extension, attack the b2partsgenmapping dictionary directly."""
710 717 def dec(func):
711 718 assert stepname not in b2partsgenmapping
712 719 b2partsgenmapping[stepname] = func
713 720 if idx is None:
714 721 b2partsgenorder.append(stepname)
715 722 else:
716 723 b2partsgenorder.insert(idx, stepname)
717 724 return func
718 725 return dec
719 726
720 727 def _pushb2ctxcheckheads(pushop, bundler):
721 728 """Generate race condition checking parts
722 729
723 730 Exists as an independent function to aid extensions
724 731 """
725 732 # * 'force' do not check for push race,
726 733 # * if we don't push anything, there are nothing to check.
727 734 if not pushop.force and pushop.outgoing.missingheads:
728 735 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
729 736 emptyremote = pushop.pushbranchmap is None
730 737 if not allowunrelated or emptyremote:
731 738 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
732 739 else:
733 740 affected = set()
734 741 for branch, heads in pushop.pushbranchmap.iteritems():
735 742 remoteheads, newheads, unsyncedheads, discardedheads = heads
736 743 if remoteheads is not None:
737 744 remote = set(remoteheads)
738 745 affected |= set(discardedheads) & remote
739 746 affected |= remote - set(newheads)
740 747 if affected:
741 748 data = iter(sorted(affected))
742 749 bundler.newpart('check:updated-heads', data=data)
743 750
744 751 def _pushing(pushop):
745 752 """return True if we are pushing anything"""
746 753 return bool(pushop.outgoing.missing
747 754 or pushop.outdatedphases
748 755 or pushop.outobsmarkers
749 756 or pushop.outbookmarks)
750 757
751 758 @b2partsgenerator('check-bookmarks')
752 759 def _pushb2checkbookmarks(pushop, bundler):
753 760 """insert bookmark move checking"""
754 761 if not _pushing(pushop) or pushop.force:
755 762 return
756 763 b2caps = bundle2.bundle2caps(pushop.remote)
757 764 hasbookmarkcheck = 'bookmarks' in b2caps
758 765 if not (pushop.outbookmarks and hasbookmarkcheck):
759 766 return
760 767 data = []
761 768 for book, old, new in pushop.outbookmarks:
762 769 old = bin(old)
763 770 data.append((book, old))
764 771 checkdata = bookmod.binaryencode(data)
765 772 bundler.newpart('check:bookmarks', data=checkdata)
766 773
767 774 @b2partsgenerator('check-phases')
768 775 def _pushb2checkphases(pushop, bundler):
769 776 """insert phase move checking"""
770 777 if not _pushing(pushop) or pushop.force:
771 778 return
772 779 b2caps = bundle2.bundle2caps(pushop.remote)
773 780 hasphaseheads = 'heads' in b2caps.get('phases', ())
774 781 if pushop.remotephases is not None and hasphaseheads:
775 782 # check that the remote phase has not changed
776 783 checks = [[] for p in phases.allphases]
777 784 checks[phases.public].extend(pushop.remotephases.publicheads)
778 785 checks[phases.draft].extend(pushop.remotephases.draftroots)
779 786 if any(checks):
780 787 for nodes in checks:
781 788 nodes.sort()
782 789 checkdata = phases.binaryencode(checks)
783 790 bundler.newpart('check:phases', data=checkdata)
784 791
785 792 @b2partsgenerator('changeset')
786 793 def _pushb2ctx(pushop, bundler):
787 794 """handle changegroup push through bundle2
788 795
789 796 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
790 797 """
791 798 if 'changesets' in pushop.stepsdone:
792 799 return
793 800 pushop.stepsdone.add('changesets')
794 801 # Send known heads to the server for race detection.
795 802 if not _pushcheckoutgoing(pushop):
796 803 return
797 804 pushop.repo.prepushoutgoinghooks(pushop)
798 805
799 806 _pushb2ctxcheckheads(pushop, bundler)
800 807
801 808 b2caps = bundle2.bundle2caps(pushop.remote)
802 809 version = '01'
803 810 cgversions = b2caps.get('changegroup')
804 811 if cgversions: # 3.1 and 3.2 ship with an empty value
805 812 cgversions = [v for v in cgversions
806 813 if v in changegroup.supportedoutgoingversions(
807 814 pushop.repo)]
808 815 if not cgversions:
809 816 raise ValueError(_('no common changegroup version'))
810 817 version = max(cgversions)
811 818 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
812 819 'push')
813 820 cgpart = bundler.newpart('changegroup', data=cgstream)
814 821 if cgversions:
815 822 cgpart.addparam('version', version)
816 823 if 'treemanifest' in pushop.repo.requirements:
817 824 cgpart.addparam('treemanifest', '1')
818 825 def handlereply(op):
819 826 """extract addchangegroup returns from server reply"""
820 827 cgreplies = op.records.getreplies(cgpart.id)
821 828 assert len(cgreplies['changegroup']) == 1
822 829 pushop.cgresult = cgreplies['changegroup'][0]['return']
823 830 return handlereply
824 831
825 832 @b2partsgenerator('phase')
826 833 def _pushb2phases(pushop, bundler):
827 834 """handle phase push through bundle2"""
828 835 if 'phases' in pushop.stepsdone:
829 836 return
830 837 b2caps = bundle2.bundle2caps(pushop.remote)
831 838 ui = pushop.repo.ui
832 839
833 840 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
834 841 haspushkey = 'pushkey' in b2caps
835 842 hasphaseheads = 'heads' in b2caps.get('phases', ())
836 843
837 844 if hasphaseheads and not legacyphase:
838 845 return _pushb2phaseheads(pushop, bundler)
839 846 elif haspushkey:
840 847 return _pushb2phasespushkey(pushop, bundler)
841 848
842 849 def _pushb2phaseheads(pushop, bundler):
843 850 """push phase information through a bundle2 - binary part"""
844 851 pushop.stepsdone.add('phases')
845 852 if pushop.outdatedphases:
846 853 updates = [[] for p in phases.allphases]
847 854 updates[0].extend(h.node() for h in pushop.outdatedphases)
848 855 phasedata = phases.binaryencode(updates)
849 856 bundler.newpart('phase-heads', data=phasedata)
850 857
851 858 def _pushb2phasespushkey(pushop, bundler):
852 859 """push phase information through a bundle2 - pushkey part"""
853 860 pushop.stepsdone.add('phases')
854 861 part2node = []
855 862
856 863 def handlefailure(pushop, exc):
857 864 targetid = int(exc.partid)
858 865 for partid, node in part2node:
859 866 if partid == targetid:
860 867 raise error.Abort(_('updating %s to public failed') % node)
861 868
862 869 enc = pushkey.encode
863 870 for newremotehead in pushop.outdatedphases:
864 871 part = bundler.newpart('pushkey')
865 872 part.addparam('namespace', enc('phases'))
866 873 part.addparam('key', enc(newremotehead.hex()))
867 874 part.addparam('old', enc('%d' % phases.draft))
868 875 part.addparam('new', enc('%d' % phases.public))
869 876 part2node.append((part.id, newremotehead))
870 877 pushop.pkfailcb[part.id] = handlefailure
871 878
872 879 def handlereply(op):
873 880 for partid, node in part2node:
874 881 partrep = op.records.getreplies(partid)
875 882 results = partrep['pushkey']
876 883 assert len(results) <= 1
877 884 msg = None
878 885 if not results:
879 886 msg = _('server ignored update of %s to public!\n') % node
880 887 elif not int(results[0]['return']):
881 888 msg = _('updating %s to public failed!\n') % node
882 889 if msg is not None:
883 890 pushop.ui.warn(msg)
884 891 return handlereply
885 892
886 893 @b2partsgenerator('obsmarkers')
887 894 def _pushb2obsmarkers(pushop, bundler):
888 895 if 'obsmarkers' in pushop.stepsdone:
889 896 return
890 897 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
891 898 if obsolete.commonversion(remoteversions) is None:
892 899 return
893 900 pushop.stepsdone.add('obsmarkers')
894 901 if pushop.outobsmarkers:
895 902 markers = sorted(pushop.outobsmarkers)
896 903 bundle2.buildobsmarkerspart(bundler, markers)
897 904
898 905 @b2partsgenerator('bookmarks')
899 906 def _pushb2bookmarks(pushop, bundler):
900 907 """handle bookmark push through bundle2"""
901 908 if 'bookmarks' in pushop.stepsdone:
902 909 return
903 910 b2caps = bundle2.bundle2caps(pushop.remote)
904 911
905 912 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
906 913 legacybooks = 'bookmarks' in legacy
907 914
908 915 if not legacybooks and 'bookmarks' in b2caps:
909 916 return _pushb2bookmarkspart(pushop, bundler)
910 917 elif 'pushkey' in b2caps:
911 918 return _pushb2bookmarkspushkey(pushop, bundler)
912 919
913 920 def _bmaction(old, new):
914 921 """small utility for bookmark pushing"""
915 922 if not old:
916 923 return 'export'
917 924 elif not new:
918 925 return 'delete'
919 926 return 'update'
920 927
921 928 def _pushb2bookmarkspart(pushop, bundler):
922 929 pushop.stepsdone.add('bookmarks')
923 930 if not pushop.outbookmarks:
924 931 return
925 932
926 933 allactions = []
927 934 data = []
928 935 for book, old, new in pushop.outbookmarks:
929 936 new = bin(new)
930 937 data.append((book, new))
931 938 allactions.append((book, _bmaction(old, new)))
932 939 checkdata = bookmod.binaryencode(data)
933 940 bundler.newpart('bookmarks', data=checkdata)
934 941
935 942 def handlereply(op):
936 943 ui = pushop.ui
937 944 # if success
938 945 for book, action in allactions:
939 946 ui.status(bookmsgmap[action][0] % book)
940 947
941 948 return handlereply
942 949
943 950 def _pushb2bookmarkspushkey(pushop, bundler):
944 951 pushop.stepsdone.add('bookmarks')
945 952 part2book = []
946 953 enc = pushkey.encode
947 954
948 955 def handlefailure(pushop, exc):
949 956 targetid = int(exc.partid)
950 957 for partid, book, action in part2book:
951 958 if partid == targetid:
952 959 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
953 960 # we should not be called for part we did not generated
954 961 assert False
955 962
956 963 for book, old, new in pushop.outbookmarks:
957 964 part = bundler.newpart('pushkey')
958 965 part.addparam('namespace', enc('bookmarks'))
959 966 part.addparam('key', enc(book))
960 967 part.addparam('old', enc(old))
961 968 part.addparam('new', enc(new))
962 969 action = 'update'
963 970 if not old:
964 971 action = 'export'
965 972 elif not new:
966 973 action = 'delete'
967 974 part2book.append((part.id, book, action))
968 975 pushop.pkfailcb[part.id] = handlefailure
969 976
970 977 def handlereply(op):
971 978 ui = pushop.ui
972 979 for partid, book, action in part2book:
973 980 partrep = op.records.getreplies(partid)
974 981 results = partrep['pushkey']
975 982 assert len(results) <= 1
976 983 if not results:
977 984 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
978 985 else:
979 986 ret = int(results[0]['return'])
980 987 if ret:
981 988 ui.status(bookmsgmap[action][0] % book)
982 989 else:
983 990 ui.warn(bookmsgmap[action][1] % book)
984 991 if pushop.bkresult is not None:
985 992 pushop.bkresult = 1
986 993 return handlereply
987 994
988 995 @b2partsgenerator('pushvars', idx=0)
989 996 def _getbundlesendvars(pushop, bundler):
990 997 '''send shellvars via bundle2'''
991 998 pushvars = pushop.pushvars
992 999 if pushvars:
993 1000 shellvars = {}
994 1001 for raw in pushvars:
995 1002 if '=' not in raw:
996 1003 msg = ("unable to parse variable '%s', should follow "
997 1004 "'KEY=VALUE' or 'KEY=' format")
998 1005 raise error.Abort(msg % raw)
999 1006 k, v = raw.split('=', 1)
1000 1007 shellvars[k] = v
1001 1008
1002 1009 part = bundler.newpart('pushvars')
1003 1010
1004 1011 for key, value in shellvars.iteritems():
1005 1012 part.addparam(key, value, mandatory=False)
1006 1013
1007 1014 def _pushbundle2(pushop):
1008 1015 """push data to the remote using bundle2
1009 1016
1010 1017 The only currently supported type of data is changegroup but this will
1011 1018 evolve in the future."""
1012 1019 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1013 1020 pushback = (pushop.trmanager
1014 1021 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1015 1022
1016 1023 # create reply capability
1017 1024 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1018 1025 allowpushback=pushback,
1019 1026 role='client'))
1020 1027 bundler.newpart('replycaps', data=capsblob)
1021 1028 replyhandlers = []
1022 1029 for partgenname in b2partsgenorder:
1023 1030 partgen = b2partsgenmapping[partgenname]
1024 1031 ret = partgen(pushop, bundler)
1025 1032 if callable(ret):
1026 1033 replyhandlers.append(ret)
1027 1034 # do not push if nothing to push
1028 1035 if bundler.nbparts <= 1:
1029 1036 return
1030 1037 stream = util.chunkbuffer(bundler.getchunks())
1031 1038 try:
1032 1039 try:
1033 1040 reply = pushop.remote.unbundle(
1034 1041 stream, ['force'], pushop.remote.url())
1035 1042 except error.BundleValueError as exc:
1036 1043 raise error.Abort(_('missing support for %s') % exc)
1037 1044 try:
1038 1045 trgetter = None
1039 1046 if pushback:
1040 1047 trgetter = pushop.trmanager.transaction
1041 1048 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1042 1049 except error.BundleValueError as exc:
1043 1050 raise error.Abort(_('missing support for %s') % exc)
1044 1051 except bundle2.AbortFromPart as exc:
1045 1052 pushop.ui.status(_('remote: %s\n') % exc)
1046 1053 if exc.hint is not None:
1047 1054 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1048 1055 raise error.Abort(_('push failed on remote'))
1049 1056 except error.PushkeyFailed as exc:
1050 1057 partid = int(exc.partid)
1051 1058 if partid not in pushop.pkfailcb:
1052 1059 raise
1053 1060 pushop.pkfailcb[partid](pushop, exc)
1054 1061 for rephand in replyhandlers:
1055 1062 rephand(op)
1056 1063
1057 1064 def _pushchangeset(pushop):
1058 1065 """Make the actual push of changeset bundle to remote repo"""
1059 1066 if 'changesets' in pushop.stepsdone:
1060 1067 return
1061 1068 pushop.stepsdone.add('changesets')
1062 1069 if not _pushcheckoutgoing(pushop):
1063 1070 return
1064 1071
1065 1072 # Should have verified this in push().
1066 1073 assert pushop.remote.capable('unbundle')
1067 1074
1068 1075 pushop.repo.prepushoutgoinghooks(pushop)
1069 1076 outgoing = pushop.outgoing
1070 1077 # TODO: get bundlecaps from remote
1071 1078 bundlecaps = None
1072 1079 # create a changegroup from local
1073 1080 if pushop.revs is None and not (outgoing.excluded
1074 1081 or pushop.repo.changelog.filteredrevs):
1075 1082 # push everything,
1076 1083 # use the fast path, no race possible on push
1077 1084 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1078 1085 fastpath=True, bundlecaps=bundlecaps)
1079 1086 else:
1080 1087 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1081 1088 'push', bundlecaps=bundlecaps)
1082 1089
1083 1090 # apply changegroup to remote
1084 1091 # local repo finds heads on server, finds out what
1085 1092 # revs it must push. once revs transferred, if server
1086 1093 # finds it has different heads (someone else won
1087 1094 # commit/push race), server aborts.
1088 1095 if pushop.force:
1089 1096 remoteheads = ['force']
1090 1097 else:
1091 1098 remoteheads = pushop.remoteheads
1092 1099 # ssh: return remote's addchangegroup()
1093 1100 # http: return remote's addchangegroup() or 0 for error
1094 1101 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1095 1102 pushop.repo.url())
1096 1103
1097 1104 def _pushsyncphase(pushop):
1098 1105 """synchronise phase information locally and remotely"""
1099 1106 cheads = pushop.commonheads
1100 1107 # even when we don't push, exchanging phase data is useful
1101 1108 remotephases = pushop.remote.listkeys('phases')
1102 1109 if (pushop.ui.configbool('ui', '_usedassubrepo')
1103 1110 and remotephases # server supports phases
1104 1111 and pushop.cgresult is None # nothing was pushed
1105 1112 and remotephases.get('publishing', False)):
1106 1113 # When:
1107 1114 # - this is a subrepo push
1108 1115 # - and remote support phase
1109 1116 # - and no changeset was pushed
1110 1117 # - and remote is publishing
1111 1118 # We may be in issue 3871 case!
1112 1119 # We drop the possible phase synchronisation done by
1113 1120 # courtesy to publish changesets possibly locally draft
1114 1121 # on the remote.
1115 1122 remotephases = {'publishing': 'True'}
1116 1123 if not remotephases: # old server or public only reply from non-publishing
1117 1124 _localphasemove(pushop, cheads)
1118 1125 # don't push any phase data as there is nothing to push
1119 1126 else:
1120 1127 ana = phases.analyzeremotephases(pushop.repo, cheads,
1121 1128 remotephases)
1122 1129 pheads, droots = ana
1123 1130 ### Apply remote phase on local
1124 1131 if remotephases.get('publishing', False):
1125 1132 _localphasemove(pushop, cheads)
1126 1133 else: # publish = False
1127 1134 _localphasemove(pushop, pheads)
1128 1135 _localphasemove(pushop, cheads, phases.draft)
1129 1136 ### Apply local phase on remote
1130 1137
1131 1138 if pushop.cgresult:
1132 1139 if 'phases' in pushop.stepsdone:
1133 1140 # phases already pushed though bundle2
1134 1141 return
1135 1142 outdated = pushop.outdatedphases
1136 1143 else:
1137 1144 outdated = pushop.fallbackoutdatedphases
1138 1145
1139 1146 pushop.stepsdone.add('phases')
1140 1147
1141 1148 # filter heads already turned public by the push
1142 1149 outdated = [c for c in outdated if c.node() not in pheads]
1143 1150 # fallback to independent pushkey command
1144 1151 for newremotehead in outdated:
1145 1152 r = pushop.remote.pushkey('phases',
1146 1153 newremotehead.hex(),
1147 1154 str(phases.draft),
1148 1155 str(phases.public))
1149 1156 if not r:
1150 1157 pushop.ui.warn(_('updating %s to public failed!\n')
1151 1158 % newremotehead)
1152 1159
1153 1160 def _localphasemove(pushop, nodes, phase=phases.public):
1154 1161 """move <nodes> to <phase> in the local source repo"""
1155 1162 if pushop.trmanager:
1156 1163 phases.advanceboundary(pushop.repo,
1157 1164 pushop.trmanager.transaction(),
1158 1165 phase,
1159 1166 nodes)
1160 1167 else:
1161 1168 # repo is not locked, do not change any phases!
1162 1169 # Informs the user that phases should have been moved when
1163 1170 # applicable.
1164 1171 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1165 1172 phasestr = phases.phasenames[phase]
1166 1173 if actualmoves:
1167 1174 pushop.ui.status(_('cannot lock source repo, skipping '
1168 1175 'local %s phase update\n') % phasestr)
1169 1176
1170 1177 def _pushobsolete(pushop):
1171 1178 """utility function to push obsolete markers to a remote"""
1172 1179 if 'obsmarkers' in pushop.stepsdone:
1173 1180 return
1174 1181 repo = pushop.repo
1175 1182 remote = pushop.remote
1176 1183 pushop.stepsdone.add('obsmarkers')
1177 1184 if pushop.outobsmarkers:
1178 1185 pushop.ui.debug('try to push obsolete markers to remote\n')
1179 1186 rslts = []
1180 1187 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1181 1188 for key in sorted(remotedata, reverse=True):
1182 1189 # reverse sort to ensure we end with dump0
1183 1190 data = remotedata[key]
1184 1191 rslts.append(remote.pushkey('obsolete', key, '', data))
1185 1192 if [r for r in rslts if not r]:
1186 1193 msg = _('failed to push some obsolete markers!\n')
1187 1194 repo.ui.warn(msg)
1188 1195
1189 1196 def _pushbookmark(pushop):
1190 1197 """Update bookmark position on remote"""
1191 1198 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1192 1199 return
1193 1200 pushop.stepsdone.add('bookmarks')
1194 1201 ui = pushop.ui
1195 1202 remote = pushop.remote
1196 1203
1197 1204 for b, old, new in pushop.outbookmarks:
1198 1205 action = 'update'
1199 1206 if not old:
1200 1207 action = 'export'
1201 1208 elif not new:
1202 1209 action = 'delete'
1203 1210 if remote.pushkey('bookmarks', b, old, new):
1204 1211 ui.status(bookmsgmap[action][0] % b)
1205 1212 else:
1206 1213 ui.warn(bookmsgmap[action][1] % b)
1207 1214 # discovery can have set the value form invalid entry
1208 1215 if pushop.bkresult is not None:
1209 1216 pushop.bkresult = 1
1210 1217
1211 1218 class pulloperation(object):
1212 1219 """A object that represent a single pull operation
1213 1220
1214 1221 It purpose is to carry pull related state and very common operation.
1215 1222
1216 1223 A new should be created at the beginning of each pull and discarded
1217 1224 afterward.
1218 1225 """
1219 1226
1220 1227 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1221 1228 remotebookmarks=None, streamclonerequested=None):
1222 1229 # repo we pull into
1223 1230 self.repo = repo
1224 1231 # repo we pull from
1225 1232 self.remote = remote
1226 1233 # revision we try to pull (None is "all")
1227 1234 self.heads = heads
1228 1235 # bookmark pulled explicitly
1229 1236 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1230 1237 for bookmark in bookmarks]
1231 1238 # do we force pull?
1232 1239 self.force = force
1233 1240 # whether a streaming clone was requested
1234 1241 self.streamclonerequested = streamclonerequested
1235 1242 # transaction manager
1236 1243 self.trmanager = None
1237 1244 # set of common changeset between local and remote before pull
1238 1245 self.common = None
1239 1246 # set of pulled head
1240 1247 self.rheads = None
1241 1248 # list of missing changeset to fetch remotely
1242 1249 self.fetch = None
1243 1250 # remote bookmarks data
1244 1251 self.remotebookmarks = remotebookmarks
1245 1252 # result of changegroup pulling (used as return code by pull)
1246 1253 self.cgresult = None
1247 1254 # list of step already done
1248 1255 self.stepsdone = set()
1249 1256 # Whether we attempted a clone from pre-generated bundles.
1250 1257 self.clonebundleattempted = False
1251 1258
1252 1259 @util.propertycache
1253 1260 def pulledsubset(self):
1254 1261 """heads of the set of changeset target by the pull"""
1255 1262 # compute target subset
1256 1263 if self.heads is None:
1257 1264 # We pulled every thing possible
1258 1265 # sync on everything common
1259 1266 c = set(self.common)
1260 1267 ret = list(self.common)
1261 1268 for n in self.rheads:
1262 1269 if n not in c:
1263 1270 ret.append(n)
1264 1271 return ret
1265 1272 else:
1266 1273 # We pulled a specific subset
1267 1274 # sync on this subset
1268 1275 return self.heads
1269 1276
1270 1277 @util.propertycache
1271 1278 def canusebundle2(self):
1272 1279 return not _forcebundle1(self)
1273 1280
1274 1281 @util.propertycache
1275 1282 def remotebundle2caps(self):
1276 1283 return bundle2.bundle2caps(self.remote)
1277 1284
1278 1285 def gettransaction(self):
1279 1286 # deprecated; talk to trmanager directly
1280 1287 return self.trmanager.transaction()
1281 1288
1282 1289 class transactionmanager(util.transactional):
1283 1290 """An object to manage the life cycle of a transaction
1284 1291
1285 1292 It creates the transaction on demand and calls the appropriate hooks when
1286 1293 closing the transaction."""
1287 1294 def __init__(self, repo, source, url):
1288 1295 self.repo = repo
1289 1296 self.source = source
1290 1297 self.url = url
1291 1298 self._tr = None
1292 1299
1293 1300 def transaction(self):
1294 1301 """Return an open transaction object, constructing if necessary"""
1295 1302 if not self._tr:
1296 1303 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1297 1304 self._tr = self.repo.transaction(trname)
1298 1305 self._tr.hookargs['source'] = self.source
1299 1306 self._tr.hookargs['url'] = self.url
1300 1307 return self._tr
1301 1308
1302 1309 def close(self):
1303 1310 """close transaction if created"""
1304 1311 if self._tr is not None:
1305 1312 self._tr.close()
1306 1313
1307 1314 def release(self):
1308 1315 """release transaction if created"""
1309 1316 if self._tr is not None:
1310 1317 self._tr.release()
1311 1318
1312 1319 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1313 1320 streamclonerequested=None):
1314 1321 """Fetch repository data from a remote.
1315 1322
1316 1323 This is the main function used to retrieve data from a remote repository.
1317 1324
1318 1325 ``repo`` is the local repository to clone into.
1319 1326 ``remote`` is a peer instance.
1320 1327 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1321 1328 default) means to pull everything from the remote.
1322 1329 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1323 1330 default, all remote bookmarks are pulled.
1324 1331 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1325 1332 initialization.
1326 1333 ``streamclonerequested`` is a boolean indicating whether a "streaming
1327 1334 clone" is requested. A "streaming clone" is essentially a raw file copy
1328 1335 of revlogs from the server. This only works when the local repository is
1329 1336 empty. The default value of ``None`` means to respect the server
1330 1337 configuration for preferring stream clones.
1331 1338
1332 1339 Returns the ``pulloperation`` created for this pull.
1333 1340 """
1334 1341 if opargs is None:
1335 1342 opargs = {}
1336 1343 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1337 1344 streamclonerequested=streamclonerequested,
1338 1345 **pycompat.strkwargs(opargs))
1339 1346
1340 1347 peerlocal = pullop.remote.local()
1341 1348 if peerlocal:
1342 1349 missing = set(peerlocal.requirements) - pullop.repo.supported
1343 1350 if missing:
1344 1351 msg = _("required features are not"
1345 1352 " supported in the destination:"
1346 1353 " %s") % (', '.join(sorted(missing)))
1347 1354 raise error.Abort(msg)
1348 1355
1349 1356 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1350 1357 with repo.wlock(), repo.lock(), pullop.trmanager:
1351 1358 # This should ideally be in _pullbundle2(). However, it needs to run
1352 1359 # before discovery to avoid extra work.
1353 1360 _maybeapplyclonebundle(pullop)
1354 1361 streamclone.maybeperformlegacystreamclone(pullop)
1355 1362 _pulldiscovery(pullop)
1356 1363 if pullop.canusebundle2:
1357 1364 _pullbundle2(pullop)
1358 1365 _pullchangeset(pullop)
1359 1366 _pullphase(pullop)
1360 1367 _pullbookmarks(pullop)
1361 1368 _pullobsolete(pullop)
1362 1369
1363 1370 # storing remotenames
1364 1371 if repo.ui.configbool('experimental', 'remotenames'):
1365 1372 logexchange.pullremotenames(repo, remote)
1366 1373
1367 1374 return pullop
1368 1375
1369 1376 # list of steps to perform discovery before pull
1370 1377 pulldiscoveryorder = []
1371 1378
1372 1379 # Mapping between step name and function
1373 1380 #
1374 1381 # This exists to help extensions wrap steps if necessary
1375 1382 pulldiscoverymapping = {}
1376 1383
1377 1384 def pulldiscovery(stepname):
1378 1385 """decorator for function performing discovery before pull
1379 1386
1380 1387 The function is added to the step -> function mapping and appended to the
1381 1388 list of steps. Beware that decorated function will be added in order (this
1382 1389 may matter).
1383 1390
1384 1391 You can only use this decorator for a new step, if you want to wrap a step
1385 1392 from an extension, change the pulldiscovery dictionary directly."""
1386 1393 def dec(func):
1387 1394 assert stepname not in pulldiscoverymapping
1388 1395 pulldiscoverymapping[stepname] = func
1389 1396 pulldiscoveryorder.append(stepname)
1390 1397 return func
1391 1398 return dec
1392 1399
1393 1400 def _pulldiscovery(pullop):
1394 1401 """Run all discovery steps"""
1395 1402 for stepname in pulldiscoveryorder:
1396 1403 step = pulldiscoverymapping[stepname]
1397 1404 step(pullop)
1398 1405
1399 1406 @pulldiscovery('b1:bookmarks')
1400 1407 def _pullbookmarkbundle1(pullop):
1401 1408 """fetch bookmark data in bundle1 case
1402 1409
1403 1410 If not using bundle2, we have to fetch bookmarks before changeset
1404 1411 discovery to reduce the chance and impact of race conditions."""
1405 1412 if pullop.remotebookmarks is not None:
1406 1413 return
1407 1414 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1408 1415 # all known bundle2 servers now support listkeys, but lets be nice with
1409 1416 # new implementation.
1410 1417 return
1411 1418 books = pullop.remote.listkeys('bookmarks')
1412 1419 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1413 1420
1414 1421
1415 1422 @pulldiscovery('changegroup')
1416 1423 def _pulldiscoverychangegroup(pullop):
1417 1424 """discovery phase for the pull
1418 1425
1419 1426 Current handle changeset discovery only, will change handle all discovery
1420 1427 at some point."""
1421 1428 tmp = discovery.findcommonincoming(pullop.repo,
1422 1429 pullop.remote,
1423 1430 heads=pullop.heads,
1424 1431 force=pullop.force)
1425 1432 common, fetch, rheads = tmp
1426 1433 nm = pullop.repo.unfiltered().changelog.nodemap
1427 1434 if fetch and rheads:
1428 1435 # If a remote heads is filtered locally, put in back in common.
1429 1436 #
1430 1437 # This is a hackish solution to catch most of "common but locally
1431 1438 # hidden situation". We do not performs discovery on unfiltered
1432 1439 # repository because it end up doing a pathological amount of round
1433 1440 # trip for w huge amount of changeset we do not care about.
1434 1441 #
1435 1442 # If a set of such "common but filtered" changeset exist on the server
1436 1443 # but are not including a remote heads, we'll not be able to detect it,
1437 1444 scommon = set(common)
1438 1445 for n in rheads:
1439 1446 if n in nm:
1440 1447 if n not in scommon:
1441 1448 common.append(n)
1442 1449 if set(rheads).issubset(set(common)):
1443 1450 fetch = []
1444 1451 pullop.common = common
1445 1452 pullop.fetch = fetch
1446 1453 pullop.rheads = rheads
1447 1454
1448 1455 def _pullbundle2(pullop):
1449 1456 """pull data using bundle2
1450 1457
1451 1458 For now, the only supported data are changegroup."""
1452 1459 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1453 1460
1454 1461 # make ui easier to access
1455 1462 ui = pullop.repo.ui
1456 1463
1457 1464 # At the moment we don't do stream clones over bundle2. If that is
1458 1465 # implemented then here's where the check for that will go.
1459 1466 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1460 1467
1461 1468 # declare pull perimeters
1462 1469 kwargs['common'] = pullop.common
1463 1470 kwargs['heads'] = pullop.heads or pullop.rheads
1464 1471
1465 1472 if streaming:
1466 1473 kwargs['cg'] = False
1467 1474 kwargs['stream'] = True
1468 1475 pullop.stepsdone.add('changegroup')
1469 1476 pullop.stepsdone.add('phases')
1470 1477
1471 1478 else:
1472 1479 # pulling changegroup
1473 1480 pullop.stepsdone.add('changegroup')
1474 1481
1475 1482 kwargs['cg'] = pullop.fetch
1476 1483
1477 1484 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1478 1485 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1479 1486 if (not legacyphase and hasbinaryphase):
1480 1487 kwargs['phases'] = True
1481 1488 pullop.stepsdone.add('phases')
1482 1489
1483 1490 if 'listkeys' in pullop.remotebundle2caps:
1484 1491 if 'phases' not in pullop.stepsdone:
1485 1492 kwargs['listkeys'] = ['phases']
1486 1493
1487 1494 bookmarksrequested = False
1488 1495 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1489 1496 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1490 1497
1491 1498 if pullop.remotebookmarks is not None:
1492 1499 pullop.stepsdone.add('request-bookmarks')
1493 1500
1494 1501 if ('request-bookmarks' not in pullop.stepsdone
1495 1502 and pullop.remotebookmarks is None
1496 1503 and not legacybookmark and hasbinarybook):
1497 1504 kwargs['bookmarks'] = True
1498 1505 bookmarksrequested = True
1499 1506
1500 1507 if 'listkeys' in pullop.remotebundle2caps:
1501 1508 if 'request-bookmarks' not in pullop.stepsdone:
1502 1509 # make sure to always includes bookmark data when migrating
1503 1510 # `hg incoming --bundle` to using this function.
1504 1511 pullop.stepsdone.add('request-bookmarks')
1505 1512 kwargs.setdefault('listkeys', []).append('bookmarks')
1506 1513
1507 1514 # If this is a full pull / clone and the server supports the clone bundles
1508 1515 # feature, tell the server whether we attempted a clone bundle. The
1509 1516 # presence of this flag indicates the client supports clone bundles. This
1510 1517 # will enable the server to treat clients that support clone bundles
1511 1518 # differently from those that don't.
1512 1519 if (pullop.remote.capable('clonebundles')
1513 1520 and pullop.heads is None and list(pullop.common) == [nullid]):
1514 1521 kwargs['cbattempted'] = pullop.clonebundleattempted
1515 1522
1516 1523 if streaming:
1517 1524 pullop.repo.ui.status(_('streaming all changes\n'))
1518 1525 elif not pullop.fetch:
1519 1526 pullop.repo.ui.status(_("no changes found\n"))
1520 1527 pullop.cgresult = 0
1521 1528 else:
1522 1529 if pullop.heads is None and list(pullop.common) == [nullid]:
1523 1530 pullop.repo.ui.status(_("requesting all changes\n"))
1524 1531 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1525 1532 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1526 1533 if obsolete.commonversion(remoteversions) is not None:
1527 1534 kwargs['obsmarkers'] = True
1528 1535 pullop.stepsdone.add('obsmarkers')
1529 1536 _pullbundle2extraprepare(pullop, kwargs)
1530 1537 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1531 1538 try:
1532 1539 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction)
1533 1540 op.modes['bookmarks'] = 'records'
1534 1541 bundle2.processbundle(pullop.repo, bundle, op=op)
1535 1542 except bundle2.AbortFromPart as exc:
1536 1543 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1537 1544 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1538 1545 except error.BundleValueError as exc:
1539 1546 raise error.Abort(_('missing support for %s') % exc)
1540 1547
1541 1548 if pullop.fetch:
1542 1549 pullop.cgresult = bundle2.combinechangegroupresults(op)
1543 1550
1544 1551 # processing phases change
1545 1552 for namespace, value in op.records['listkeys']:
1546 1553 if namespace == 'phases':
1547 1554 _pullapplyphases(pullop, value)
1548 1555
1549 1556 # processing bookmark update
1550 1557 if bookmarksrequested:
1551 1558 books = {}
1552 1559 for record in op.records['bookmarks']:
1553 1560 books[record['bookmark']] = record["node"]
1554 1561 pullop.remotebookmarks = books
1555 1562 else:
1556 1563 for namespace, value in op.records['listkeys']:
1557 1564 if namespace == 'bookmarks':
1558 1565 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1559 1566
1560 1567 # bookmark data were either already there or pulled in the bundle
1561 1568 if pullop.remotebookmarks is not None:
1562 1569 _pullbookmarks(pullop)
1563 1570
1564 1571 def _pullbundle2extraprepare(pullop, kwargs):
1565 1572 """hook function so that extensions can extend the getbundle call"""
1566 1573
1567 1574 def _pullchangeset(pullop):
1568 1575 """pull changeset from unbundle into the local repo"""
1569 1576 # We delay the open of the transaction as late as possible so we
1570 1577 # don't open transaction for nothing or you break future useful
1571 1578 # rollback call
1572 1579 if 'changegroup' in pullop.stepsdone:
1573 1580 return
1574 1581 pullop.stepsdone.add('changegroup')
1575 1582 if not pullop.fetch:
1576 1583 pullop.repo.ui.status(_("no changes found\n"))
1577 1584 pullop.cgresult = 0
1578 1585 return
1579 1586 tr = pullop.gettransaction()
1580 1587 if pullop.heads is None and list(pullop.common) == [nullid]:
1581 1588 pullop.repo.ui.status(_("requesting all changes\n"))
1582 1589 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1583 1590 # issue1320, avoid a race if remote changed after discovery
1584 1591 pullop.heads = pullop.rheads
1585 1592
1586 1593 if pullop.remote.capable('getbundle'):
1587 1594 # TODO: get bundlecaps from remote
1588 1595 cg = pullop.remote.getbundle('pull', common=pullop.common,
1589 1596 heads=pullop.heads or pullop.rheads)
1590 1597 elif pullop.heads is None:
1591 1598 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1592 1599 elif not pullop.remote.capable('changegroupsubset'):
1593 1600 raise error.Abort(_("partial pull cannot be done because "
1594 1601 "other repository doesn't support "
1595 1602 "changegroupsubset."))
1596 1603 else:
1597 1604 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1598 1605 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1599 1606 pullop.remote.url())
1600 1607 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1601 1608
1602 1609 def _pullphase(pullop):
1603 1610 # Get remote phases data from remote
1604 1611 if 'phases' in pullop.stepsdone:
1605 1612 return
1606 1613 remotephases = pullop.remote.listkeys('phases')
1607 1614 _pullapplyphases(pullop, remotephases)
1608 1615
1609 1616 def _pullapplyphases(pullop, remotephases):
1610 1617 """apply phase movement from observed remote state"""
1611 1618 if 'phases' in pullop.stepsdone:
1612 1619 return
1613 1620 pullop.stepsdone.add('phases')
1614 1621 publishing = bool(remotephases.get('publishing', False))
1615 1622 if remotephases and not publishing:
1616 1623 # remote is new and non-publishing
1617 1624 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1618 1625 pullop.pulledsubset,
1619 1626 remotephases)
1620 1627 dheads = pullop.pulledsubset
1621 1628 else:
1622 1629 # Remote is old or publishing all common changesets
1623 1630 # should be seen as public
1624 1631 pheads = pullop.pulledsubset
1625 1632 dheads = []
1626 1633 unfi = pullop.repo.unfiltered()
1627 1634 phase = unfi._phasecache.phase
1628 1635 rev = unfi.changelog.nodemap.get
1629 1636 public = phases.public
1630 1637 draft = phases.draft
1631 1638
1632 1639 # exclude changesets already public locally and update the others
1633 1640 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1634 1641 if pheads:
1635 1642 tr = pullop.gettransaction()
1636 1643 phases.advanceboundary(pullop.repo, tr, public, pheads)
1637 1644
1638 1645 # exclude changesets already draft locally and update the others
1639 1646 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1640 1647 if dheads:
1641 1648 tr = pullop.gettransaction()
1642 1649 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1643 1650
1644 1651 def _pullbookmarks(pullop):
1645 1652 """process the remote bookmark information to update the local one"""
1646 1653 if 'bookmarks' in pullop.stepsdone:
1647 1654 return
1648 1655 pullop.stepsdone.add('bookmarks')
1649 1656 repo = pullop.repo
1650 1657 remotebookmarks = pullop.remotebookmarks
1651 1658 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1652 1659 pullop.remote.url(),
1653 1660 pullop.gettransaction,
1654 1661 explicit=pullop.explicitbookmarks)
1655 1662
1656 1663 def _pullobsolete(pullop):
1657 1664 """utility function to pull obsolete markers from a remote
1658 1665
1659 1666 The `gettransaction` is function that return the pull transaction, creating
1660 1667 one if necessary. We return the transaction to inform the calling code that
1661 1668 a new transaction have been created (when applicable).
1662 1669
1663 1670 Exists mostly to allow overriding for experimentation purpose"""
1664 1671 if 'obsmarkers' in pullop.stepsdone:
1665 1672 return
1666 1673 pullop.stepsdone.add('obsmarkers')
1667 1674 tr = None
1668 1675 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1669 1676 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1670 1677 remoteobs = pullop.remote.listkeys('obsolete')
1671 1678 if 'dump0' in remoteobs:
1672 1679 tr = pullop.gettransaction()
1673 1680 markers = []
1674 1681 for key in sorted(remoteobs, reverse=True):
1675 1682 if key.startswith('dump'):
1676 1683 data = util.b85decode(remoteobs[key])
1677 1684 version, newmarks = obsolete._readmarkers(data)
1678 1685 markers += newmarks
1679 1686 if markers:
1680 1687 pullop.repo.obsstore.add(tr, markers)
1681 1688 pullop.repo.invalidatevolatilesets()
1682 1689 return tr
1683 1690
1684 1691 def caps20to10(repo, role):
1685 1692 """return a set with appropriate options to use bundle20 during getbundle"""
1686 1693 caps = {'HG20'}
1687 1694 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
1688 1695 caps.add('bundle2=' + urlreq.quote(capsblob))
1689 1696 return caps
1690 1697
1691 1698 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1692 1699 getbundle2partsorder = []
1693 1700
1694 1701 # Mapping between step name and function
1695 1702 #
1696 1703 # This exists to help extensions wrap steps if necessary
1697 1704 getbundle2partsmapping = {}
1698 1705
1699 1706 def getbundle2partsgenerator(stepname, idx=None):
1700 1707 """decorator for function generating bundle2 part for getbundle
1701 1708
1702 1709 The function is added to the step -> function mapping and appended to the
1703 1710 list of steps. Beware that decorated functions will be added in order
1704 1711 (this may matter).
1705 1712
1706 1713 You can only use this decorator for new steps, if you want to wrap a step
1707 1714 from an extension, attack the getbundle2partsmapping dictionary directly."""
1708 1715 def dec(func):
1709 1716 assert stepname not in getbundle2partsmapping
1710 1717 getbundle2partsmapping[stepname] = func
1711 1718 if idx is None:
1712 1719 getbundle2partsorder.append(stepname)
1713 1720 else:
1714 1721 getbundle2partsorder.insert(idx, stepname)
1715 1722 return func
1716 1723 return dec
1717 1724
1718 1725 def bundle2requested(bundlecaps):
1719 1726 if bundlecaps is not None:
1720 1727 return any(cap.startswith('HG2') for cap in bundlecaps)
1721 1728 return False
1722 1729
1723 1730 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1724 1731 **kwargs):
1725 1732 """Return chunks constituting a bundle's raw data.
1726 1733
1727 1734 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1728 1735 passed.
1729 1736
1730 1737 Returns a 2-tuple of a dict with metadata about the generated bundle
1731 1738 and an iterator over raw chunks (of varying sizes).
1732 1739 """
1733 1740 kwargs = pycompat.byteskwargs(kwargs)
1734 1741 info = {}
1735 1742 usebundle2 = bundle2requested(bundlecaps)
1736 1743 # bundle10 case
1737 1744 if not usebundle2:
1738 1745 if bundlecaps and not kwargs.get('cg', True):
1739 1746 raise ValueError(_('request for bundle10 must include changegroup'))
1740 1747
1741 1748 if kwargs:
1742 1749 raise ValueError(_('unsupported getbundle arguments: %s')
1743 1750 % ', '.join(sorted(kwargs.keys())))
1744 1751 outgoing = _computeoutgoing(repo, heads, common)
1745 1752 info['bundleversion'] = 1
1746 1753 return info, changegroup.makestream(repo, outgoing, '01', source,
1747 1754 bundlecaps=bundlecaps)
1748 1755
1749 1756 # bundle20 case
1750 1757 info['bundleversion'] = 2
1751 1758 b2caps = {}
1752 1759 for bcaps in bundlecaps:
1753 1760 if bcaps.startswith('bundle2='):
1754 1761 blob = urlreq.unquote(bcaps[len('bundle2='):])
1755 1762 b2caps.update(bundle2.decodecaps(blob))
1756 1763 bundler = bundle2.bundle20(repo.ui, b2caps)
1757 1764
1758 1765 kwargs['heads'] = heads
1759 1766 kwargs['common'] = common
1760 1767
1761 1768 for name in getbundle2partsorder:
1762 1769 func = getbundle2partsmapping[name]
1763 1770 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1764 1771 **pycompat.strkwargs(kwargs))
1765 1772
1766 1773 info['prefercompressed'] = bundler.prefercompressed
1767 1774
1768 1775 return info, bundler.getchunks()
1769 1776
1770 1777 @getbundle2partsgenerator('stream2')
1771 1778 def _getbundlestream2(bundler, repo, source, bundlecaps=None,
1772 1779 b2caps=None, heads=None, common=None, **kwargs):
1773 1780 if not kwargs.get('stream', False):
1774 1781 return
1775 1782
1776 1783 if not streamclone.allowservergeneration(repo):
1777 1784 raise error.Abort(_('stream data requested but server does not allow '
1778 1785 'this feature'),
1779 1786 hint=_('well-behaved clients should not be '
1780 1787 'requesting stream data from servers not '
1781 1788 'advertising it; the client may be buggy'))
1782 1789
1783 1790 # Stream clones don't compress well. And compression undermines a
1784 1791 # goal of stream clones, which is to be fast. Communicate the desire
1785 1792 # to avoid compression to consumers of the bundle.
1786 1793 bundler.prefercompressed = False
1787 1794
1788 1795 filecount, bytecount, it = streamclone.generatev2(repo)
1789 1796 requirements = ' '.join(sorted(repo.requirements))
1790 1797 part = bundler.newpart('stream2', data=it)
1791 1798 part.addparam('bytecount', '%d' % bytecount, mandatory=True)
1792 1799 part.addparam('filecount', '%d' % filecount, mandatory=True)
1793 1800 part.addparam('requirements', requirements, mandatory=True)
1794 1801
1795 1802 @getbundle2partsgenerator('changegroup')
1796 1803 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1797 1804 b2caps=None, heads=None, common=None, **kwargs):
1798 1805 """add a changegroup part to the requested bundle"""
1799 1806 cgstream = None
1800 1807 if kwargs.get(r'cg', True):
1801 1808 # build changegroup bundle here.
1802 1809 version = '01'
1803 1810 cgversions = b2caps.get('changegroup')
1804 1811 if cgversions: # 3.1 and 3.2 ship with an empty value
1805 1812 cgversions = [v for v in cgversions
1806 1813 if v in changegroup.supportedoutgoingversions(repo)]
1807 1814 if not cgversions:
1808 1815 raise ValueError(_('no common changegroup version'))
1809 1816 version = max(cgversions)
1810 1817 outgoing = _computeoutgoing(repo, heads, common)
1811 1818 if outgoing.missing:
1812 1819 cgstream = changegroup.makestream(repo, outgoing, version, source,
1813 1820 bundlecaps=bundlecaps)
1814 1821
1815 1822 if cgstream:
1816 1823 part = bundler.newpart('changegroup', data=cgstream)
1817 1824 if cgversions:
1818 1825 part.addparam('version', version)
1819 1826 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1820 1827 mandatory=False)
1821 1828 if 'treemanifest' in repo.requirements:
1822 1829 part.addparam('treemanifest', '1')
1823 1830
1824 1831 @getbundle2partsgenerator('bookmarks')
1825 1832 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
1826 1833 b2caps=None, **kwargs):
1827 1834 """add a bookmark part to the requested bundle"""
1828 1835 if not kwargs.get(r'bookmarks', False):
1829 1836 return
1830 1837 if 'bookmarks' not in b2caps:
1831 1838 raise ValueError(_('no common bookmarks exchange method'))
1832 1839 books = bookmod.listbinbookmarks(repo)
1833 1840 data = bookmod.binaryencode(books)
1834 1841 if data:
1835 1842 bundler.newpart('bookmarks', data=data)
1836 1843
1837 1844 @getbundle2partsgenerator('listkeys')
1838 1845 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1839 1846 b2caps=None, **kwargs):
1840 1847 """add parts containing listkeys namespaces to the requested bundle"""
1841 1848 listkeys = kwargs.get(r'listkeys', ())
1842 1849 for namespace in listkeys:
1843 1850 part = bundler.newpart('listkeys')
1844 1851 part.addparam('namespace', namespace)
1845 1852 keys = repo.listkeys(namespace).items()
1846 1853 part.data = pushkey.encodekeys(keys)
1847 1854
1848 1855 @getbundle2partsgenerator('obsmarkers')
1849 1856 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1850 1857 b2caps=None, heads=None, **kwargs):
1851 1858 """add an obsolescence markers part to the requested bundle"""
1852 1859 if kwargs.get(r'obsmarkers', False):
1853 1860 if heads is None:
1854 1861 heads = repo.heads()
1855 1862 subset = [c.node() for c in repo.set('::%ln', heads)]
1856 1863 markers = repo.obsstore.relevantmarkers(subset)
1857 1864 markers = sorted(markers)
1858 1865 bundle2.buildobsmarkerspart(bundler, markers)
1859 1866
1860 1867 @getbundle2partsgenerator('phases')
1861 1868 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1862 1869 b2caps=None, heads=None, **kwargs):
1863 1870 """add phase heads part to the requested bundle"""
1864 1871 if kwargs.get(r'phases', False):
1865 1872 if not 'heads' in b2caps.get('phases'):
1866 1873 raise ValueError(_('no common phases exchange method'))
1867 1874 if heads is None:
1868 1875 heads = repo.heads()
1869 1876
1870 1877 headsbyphase = collections.defaultdict(set)
1871 1878 if repo.publishing():
1872 1879 headsbyphase[phases.public] = heads
1873 1880 else:
1874 1881 # find the appropriate heads to move
1875 1882
1876 1883 phase = repo._phasecache.phase
1877 1884 node = repo.changelog.node
1878 1885 rev = repo.changelog.rev
1879 1886 for h in heads:
1880 1887 headsbyphase[phase(repo, rev(h))].add(h)
1881 1888 seenphases = list(headsbyphase.keys())
1882 1889
1883 1890 # We do not handle anything but public and draft phase for now)
1884 1891 if seenphases:
1885 1892 assert max(seenphases) <= phases.draft
1886 1893
1887 1894 # if client is pulling non-public changesets, we need to find
1888 1895 # intermediate public heads.
1889 1896 draftheads = headsbyphase.get(phases.draft, set())
1890 1897 if draftheads:
1891 1898 publicheads = headsbyphase.get(phases.public, set())
1892 1899
1893 1900 revset = 'heads(only(%ln, %ln) and public())'
1894 1901 extraheads = repo.revs(revset, draftheads, publicheads)
1895 1902 for r in extraheads:
1896 1903 headsbyphase[phases.public].add(node(r))
1897 1904
1898 1905 # transform data in a format used by the encoding function
1899 1906 phasemapping = []
1900 1907 for phase in phases.allphases:
1901 1908 phasemapping.append(sorted(headsbyphase[phase]))
1902 1909
1903 1910 # generate the actual part
1904 1911 phasedata = phases.binaryencode(phasemapping)
1905 1912 bundler.newpart('phase-heads', data=phasedata)
1906 1913
1907 1914 @getbundle2partsgenerator('hgtagsfnodes')
1908 1915 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1909 1916 b2caps=None, heads=None, common=None,
1910 1917 **kwargs):
1911 1918 """Transfer the .hgtags filenodes mapping.
1912 1919
1913 1920 Only values for heads in this bundle will be transferred.
1914 1921
1915 1922 The part data consists of pairs of 20 byte changeset node and .hgtags
1916 1923 filenodes raw values.
1917 1924 """
1918 1925 # Don't send unless:
1919 1926 # - changeset are being exchanged,
1920 1927 # - the client supports it.
1921 1928 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
1922 1929 return
1923 1930
1924 1931 outgoing = _computeoutgoing(repo, heads, common)
1925 1932 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1926 1933
1927 1934 def check_heads(repo, their_heads, context):
1928 1935 """check if the heads of a repo have been modified
1929 1936
1930 1937 Used by peer for unbundling.
1931 1938 """
1932 1939 heads = repo.heads()
1933 1940 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1934 1941 if not (their_heads == ['force'] or their_heads == heads or
1935 1942 their_heads == ['hashed', heads_hash]):
1936 1943 # someone else committed/pushed/unbundled while we
1937 1944 # were transferring data
1938 1945 raise error.PushRaced('repository changed while %s - '
1939 1946 'please try again' % context)
1940 1947
1941 1948 def unbundle(repo, cg, heads, source, url):
1942 1949 """Apply a bundle to a repo.
1943 1950
1944 1951 this function makes sure the repo is locked during the application and have
1945 1952 mechanism to check that no push race occurred between the creation of the
1946 1953 bundle and its application.
1947 1954
1948 1955 If the push was raced as PushRaced exception is raised."""
1949 1956 r = 0
1950 1957 # need a transaction when processing a bundle2 stream
1951 1958 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1952 1959 lockandtr = [None, None, None]
1953 1960 recordout = None
1954 1961 # quick fix for output mismatch with bundle2 in 3.4
1955 1962 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
1956 1963 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1957 1964 captureoutput = True
1958 1965 try:
1959 1966 # note: outside bundle1, 'heads' is expected to be empty and this
1960 1967 # 'check_heads' call wil be a no-op
1961 1968 check_heads(repo, heads, 'uploading changes')
1962 1969 # push can proceed
1963 1970 if not isinstance(cg, bundle2.unbundle20):
1964 1971 # legacy case: bundle1 (changegroup 01)
1965 1972 txnname = "\n".join([source, util.hidepassword(url)])
1966 1973 with repo.lock(), repo.transaction(txnname) as tr:
1967 1974 op = bundle2.applybundle(repo, cg, tr, source, url)
1968 1975 r = bundle2.combinechangegroupresults(op)
1969 1976 else:
1970 1977 r = None
1971 1978 try:
1972 1979 def gettransaction():
1973 1980 if not lockandtr[2]:
1974 1981 lockandtr[0] = repo.wlock()
1975 1982 lockandtr[1] = repo.lock()
1976 1983 lockandtr[2] = repo.transaction(source)
1977 1984 lockandtr[2].hookargs['source'] = source
1978 1985 lockandtr[2].hookargs['url'] = url
1979 1986 lockandtr[2].hookargs['bundle2'] = '1'
1980 1987 return lockandtr[2]
1981 1988
1982 1989 # Do greedy locking by default until we're satisfied with lazy
1983 1990 # locking.
1984 1991 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1985 1992 gettransaction()
1986 1993
1987 1994 op = bundle2.bundleoperation(repo, gettransaction,
1988 1995 captureoutput=captureoutput)
1989 1996 try:
1990 1997 op = bundle2.processbundle(repo, cg, op=op)
1991 1998 finally:
1992 1999 r = op.reply
1993 2000 if captureoutput and r is not None:
1994 2001 repo.ui.pushbuffer(error=True, subproc=True)
1995 2002 def recordout(output):
1996 2003 r.newpart('output', data=output, mandatory=False)
1997 2004 if lockandtr[2] is not None:
1998 2005 lockandtr[2].close()
1999 2006 except BaseException as exc:
2000 2007 exc.duringunbundle2 = True
2001 2008 if captureoutput and r is not None:
2002 2009 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2003 2010 def recordout(output):
2004 2011 part = bundle2.bundlepart('output', data=output,
2005 2012 mandatory=False)
2006 2013 parts.append(part)
2007 2014 raise
2008 2015 finally:
2009 2016 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2010 2017 if recordout is not None:
2011 2018 recordout(repo.ui.popbuffer())
2012 2019 return r
2013 2020
2014 2021 def _maybeapplyclonebundle(pullop):
2015 2022 """Apply a clone bundle from a remote, if possible."""
2016 2023
2017 2024 repo = pullop.repo
2018 2025 remote = pullop.remote
2019 2026
2020 2027 if not repo.ui.configbool('ui', 'clonebundles'):
2021 2028 return
2022 2029
2023 2030 # Only run if local repo is empty.
2024 2031 if len(repo):
2025 2032 return
2026 2033
2027 2034 if pullop.heads:
2028 2035 return
2029 2036
2030 2037 if not remote.capable('clonebundles'):
2031 2038 return
2032 2039
2033 2040 res = remote._call('clonebundles')
2034 2041
2035 2042 # If we call the wire protocol command, that's good enough to record the
2036 2043 # attempt.
2037 2044 pullop.clonebundleattempted = True
2038 2045
2039 2046 entries = parseclonebundlesmanifest(repo, res)
2040 2047 if not entries:
2041 2048 repo.ui.note(_('no clone bundles available on remote; '
2042 2049 'falling back to regular clone\n'))
2043 2050 return
2044 2051
2045 2052 entries = filterclonebundleentries(
2046 2053 repo, entries, streamclonerequested=pullop.streamclonerequested)
2047 2054
2048 2055 if not entries:
2049 2056 # There is a thundering herd concern here. However, if a server
2050 2057 # operator doesn't advertise bundles appropriate for its clients,
2051 2058 # they deserve what's coming. Furthermore, from a client's
2052 2059 # perspective, no automatic fallback would mean not being able to
2053 2060 # clone!
2054 2061 repo.ui.warn(_('no compatible clone bundles available on server; '
2055 2062 'falling back to regular clone\n'))
2056 2063 repo.ui.warn(_('(you may want to report this to the server '
2057 2064 'operator)\n'))
2058 2065 return
2059 2066
2060 2067 entries = sortclonebundleentries(repo.ui, entries)
2061 2068
2062 2069 url = entries[0]['URL']
2063 2070 repo.ui.status(_('applying clone bundle from %s\n') % url)
2064 2071 if trypullbundlefromurl(repo.ui, repo, url):
2065 2072 repo.ui.status(_('finished applying clone bundle\n'))
2066 2073 # Bundle failed.
2067 2074 #
2068 2075 # We abort by default to avoid the thundering herd of
2069 2076 # clients flooding a server that was expecting expensive
2070 2077 # clone load to be offloaded.
2071 2078 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2072 2079 repo.ui.warn(_('falling back to normal clone\n'))
2073 2080 else:
2074 2081 raise error.Abort(_('error applying bundle'),
2075 2082 hint=_('if this error persists, consider contacting '
2076 2083 'the server operator or disable clone '
2077 2084 'bundles via '
2078 2085 '"--config ui.clonebundles=false"'))
2079 2086
2080 2087 def parseclonebundlesmanifest(repo, s):
2081 2088 """Parses the raw text of a clone bundles manifest.
2082 2089
2083 2090 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2084 2091 to the URL and other keys are the attributes for the entry.
2085 2092 """
2086 2093 m = []
2087 2094 for line in s.splitlines():
2088 2095 fields = line.split()
2089 2096 if not fields:
2090 2097 continue
2091 2098 attrs = {'URL': fields[0]}
2092 2099 for rawattr in fields[1:]:
2093 2100 key, value = rawattr.split('=', 1)
2094 2101 key = urlreq.unquote(key)
2095 2102 value = urlreq.unquote(value)
2096 2103 attrs[key] = value
2097 2104
2098 2105 # Parse BUNDLESPEC into components. This makes client-side
2099 2106 # preferences easier to specify since you can prefer a single
2100 2107 # component of the BUNDLESPEC.
2101 2108 if key == 'BUNDLESPEC':
2102 2109 try:
2103 2110 comp, version, params = parsebundlespec(repo, value,
2104 2111 externalnames=True)
2105 2112 attrs['COMPRESSION'] = comp
2106 2113 attrs['VERSION'] = version
2107 2114 except error.InvalidBundleSpecification:
2108 2115 pass
2109 2116 except error.UnsupportedBundleSpecification:
2110 2117 pass
2111 2118
2112 2119 m.append(attrs)
2113 2120
2114 2121 return m
2115 2122
2116 2123 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2117 2124 """Remove incompatible clone bundle manifest entries.
2118 2125
2119 2126 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2120 2127 and returns a new list consisting of only the entries that this client
2121 2128 should be able to apply.
2122 2129
2123 2130 There is no guarantee we'll be able to apply all returned entries because
2124 2131 the metadata we use to filter on may be missing or wrong.
2125 2132 """
2126 2133 newentries = []
2127 2134 for entry in entries:
2128 2135 spec = entry.get('BUNDLESPEC')
2129 2136 if spec:
2130 2137 try:
2131 2138 comp, version, params = parsebundlespec(repo, spec, strict=True)
2132 2139
2133 2140 # If a stream clone was requested, filter out non-streamclone
2134 2141 # entries.
2135 2142 if streamclonerequested and (comp != 'UN' or version != 's1'):
2136 2143 repo.ui.debug('filtering %s because not a stream clone\n' %
2137 2144 entry['URL'])
2138 2145 continue
2139 2146
2140 2147 except error.InvalidBundleSpecification as e:
2141 2148 repo.ui.debug(str(e) + '\n')
2142 2149 continue
2143 2150 except error.UnsupportedBundleSpecification as e:
2144 2151 repo.ui.debug('filtering %s because unsupported bundle '
2145 2152 'spec: %s\n' % (entry['URL'], str(e)))
2146 2153 continue
2147 2154 # If we don't have a spec and requested a stream clone, we don't know
2148 2155 # what the entry is so don't attempt to apply it.
2149 2156 elif streamclonerequested:
2150 2157 repo.ui.debug('filtering %s because cannot determine if a stream '
2151 2158 'clone bundle\n' % entry['URL'])
2152 2159 continue
2153 2160
2154 2161 if 'REQUIRESNI' in entry and not sslutil.hassni:
2155 2162 repo.ui.debug('filtering %s because SNI not supported\n' %
2156 2163 entry['URL'])
2157 2164 continue
2158 2165
2159 2166 newentries.append(entry)
2160 2167
2161 2168 return newentries
2162 2169
2163 2170 class clonebundleentry(object):
2164 2171 """Represents an item in a clone bundles manifest.
2165 2172
2166 2173 This rich class is needed to support sorting since sorted() in Python 3
2167 2174 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2168 2175 won't work.
2169 2176 """
2170 2177
2171 2178 def __init__(self, value, prefers):
2172 2179 self.value = value
2173 2180 self.prefers = prefers
2174 2181
2175 2182 def _cmp(self, other):
2176 2183 for prefkey, prefvalue in self.prefers:
2177 2184 avalue = self.value.get(prefkey)
2178 2185 bvalue = other.value.get(prefkey)
2179 2186
2180 2187 # Special case for b missing attribute and a matches exactly.
2181 2188 if avalue is not None and bvalue is None and avalue == prefvalue:
2182 2189 return -1
2183 2190
2184 2191 # Special case for a missing attribute and b matches exactly.
2185 2192 if bvalue is not None and avalue is None and bvalue == prefvalue:
2186 2193 return 1
2187 2194
2188 2195 # We can't compare unless attribute present on both.
2189 2196 if avalue is None or bvalue is None:
2190 2197 continue
2191 2198
2192 2199 # Same values should fall back to next attribute.
2193 2200 if avalue == bvalue:
2194 2201 continue
2195 2202
2196 2203 # Exact matches come first.
2197 2204 if avalue == prefvalue:
2198 2205 return -1
2199 2206 if bvalue == prefvalue:
2200 2207 return 1
2201 2208
2202 2209 # Fall back to next attribute.
2203 2210 continue
2204 2211
2205 2212 # If we got here we couldn't sort by attributes and prefers. Fall
2206 2213 # back to index order.
2207 2214 return 0
2208 2215
2209 2216 def __lt__(self, other):
2210 2217 return self._cmp(other) < 0
2211 2218
2212 2219 def __gt__(self, other):
2213 2220 return self._cmp(other) > 0
2214 2221
2215 2222 def __eq__(self, other):
2216 2223 return self._cmp(other) == 0
2217 2224
2218 2225 def __le__(self, other):
2219 2226 return self._cmp(other) <= 0
2220 2227
2221 2228 def __ge__(self, other):
2222 2229 return self._cmp(other) >= 0
2223 2230
2224 2231 def __ne__(self, other):
2225 2232 return self._cmp(other) != 0
2226 2233
2227 2234 def sortclonebundleentries(ui, entries):
2228 2235 prefers = ui.configlist('ui', 'clonebundleprefers')
2229 2236 if not prefers:
2230 2237 return list(entries)
2231 2238
2232 2239 prefers = [p.split('=', 1) for p in prefers]
2233 2240
2234 2241 items = sorted(clonebundleentry(v, prefers) for v in entries)
2235 2242 return [i.value for i in items]
2236 2243
2237 2244 def trypullbundlefromurl(ui, repo, url):
2238 2245 """Attempt to apply a bundle from a URL."""
2239 2246 with repo.lock(), repo.transaction('bundleurl') as tr:
2240 2247 try:
2241 2248 fh = urlmod.open(ui, url)
2242 2249 cg = readbundle(ui, fh, 'stream')
2243 2250
2244 2251 if isinstance(cg, streamclone.streamcloneapplier):
2245 2252 cg.apply(repo)
2246 2253 else:
2247 2254 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2248 2255 return True
2249 2256 except urlerr.httperror as e:
2250 2257 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2251 2258 except urlerr.urlerror as e:
2252 2259 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2253 2260
2254 2261 return False
General Comments 0
You need to be logged in to leave comments. Login now