##// END OF EJS Templates
exchange: remove dead assignment or forcebundle1...
Martin von Zweigbergk -
r36593:df7b7d50 default
parent child Browse files
Show More
@@ -1,2264 +1,2263 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import errno
12 12 import hashlib
13 13
14 14 from .i18n import _
15 15 from .node import (
16 16 bin,
17 17 hex,
18 18 nullid,
19 19 )
20 20 from . import (
21 21 bookmarks as bookmod,
22 22 bundle2,
23 23 changegroup,
24 24 discovery,
25 25 error,
26 26 lock as lockmod,
27 27 logexchange,
28 28 obsolete,
29 29 phases,
30 30 pushkey,
31 31 pycompat,
32 32 scmutil,
33 33 sslutil,
34 34 streamclone,
35 35 url as urlmod,
36 36 util,
37 37 )
38 38
39 39 urlerr = util.urlerr
40 40 urlreq = util.urlreq
41 41
42 42 # Maps bundle version human names to changegroup versions.
43 43 _bundlespeccgversions = {'v1': '01',
44 44 'v2': '02',
45 45 'packed1': 's1',
46 46 'bundle2': '02', #legacy
47 47 }
48 48
49 49 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
50 50 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
51 51
52 52 def parsebundlespec(repo, spec, strict=True, externalnames=False):
53 53 """Parse a bundle string specification into parts.
54 54
55 55 Bundle specifications denote a well-defined bundle/exchange format.
56 56 The content of a given specification should not change over time in
57 57 order to ensure that bundles produced by a newer version of Mercurial are
58 58 readable from an older version.
59 59
60 60 The string currently has the form:
61 61
62 62 <compression>-<type>[;<parameter0>[;<parameter1>]]
63 63
64 64 Where <compression> is one of the supported compression formats
65 65 and <type> is (currently) a version string. A ";" can follow the type and
66 66 all text afterwards is interpreted as URI encoded, ";" delimited key=value
67 67 pairs.
68 68
69 69 If ``strict`` is True (the default) <compression> is required. Otherwise,
70 70 it is optional.
71 71
72 72 If ``externalnames`` is False (the default), the human-centric names will
73 73 be converted to their internal representation.
74 74
75 75 Returns a 3-tuple of (compression, version, parameters). Compression will
76 76 be ``None`` if not in strict mode and a compression isn't defined.
77 77
78 78 An ``InvalidBundleSpecification`` is raised when the specification is
79 79 not syntactically well formed.
80 80
81 81 An ``UnsupportedBundleSpecification`` is raised when the compression or
82 82 bundle type/version is not recognized.
83 83
84 84 Note: this function will likely eventually return a more complex data
85 85 structure, including bundle2 part information.
86 86 """
87 87 def parseparams(s):
88 88 if ';' not in s:
89 89 return s, {}
90 90
91 91 params = {}
92 92 version, paramstr = s.split(';', 1)
93 93
94 94 for p in paramstr.split(';'):
95 95 if '=' not in p:
96 96 raise error.InvalidBundleSpecification(
97 97 _('invalid bundle specification: '
98 98 'missing "=" in parameter: %s') % p)
99 99
100 100 key, value = p.split('=', 1)
101 101 key = urlreq.unquote(key)
102 102 value = urlreq.unquote(value)
103 103 params[key] = value
104 104
105 105 return version, params
106 106
107 107
108 108 if strict and '-' not in spec:
109 109 raise error.InvalidBundleSpecification(
110 110 _('invalid bundle specification; '
111 111 'must be prefixed with compression: %s') % spec)
112 112
113 113 if '-' in spec:
114 114 compression, version = spec.split('-', 1)
115 115
116 116 if compression not in util.compengines.supportedbundlenames:
117 117 raise error.UnsupportedBundleSpecification(
118 118 _('%s compression is not supported') % compression)
119 119
120 120 version, params = parseparams(version)
121 121
122 122 if version not in _bundlespeccgversions:
123 123 raise error.UnsupportedBundleSpecification(
124 124 _('%s is not a recognized bundle version') % version)
125 125 else:
126 126 # Value could be just the compression or just the version, in which
127 127 # case some defaults are assumed (but only when not in strict mode).
128 128 assert not strict
129 129
130 130 spec, params = parseparams(spec)
131 131
132 132 if spec in util.compengines.supportedbundlenames:
133 133 compression = spec
134 134 version = 'v1'
135 135 # Generaldelta repos require v2.
136 136 if 'generaldelta' in repo.requirements:
137 137 version = 'v2'
138 138 # Modern compression engines require v2.
139 139 if compression not in _bundlespecv1compengines:
140 140 version = 'v2'
141 141 elif spec in _bundlespeccgversions:
142 142 if spec == 'packed1':
143 143 compression = 'none'
144 144 else:
145 145 compression = 'bzip2'
146 146 version = spec
147 147 else:
148 148 raise error.UnsupportedBundleSpecification(
149 149 _('%s is not a recognized bundle specification') % spec)
150 150
151 151 # Bundle version 1 only supports a known set of compression engines.
152 152 if version == 'v1' and compression not in _bundlespecv1compengines:
153 153 raise error.UnsupportedBundleSpecification(
154 154 _('compression engine %s is not supported on v1 bundles') %
155 155 compression)
156 156
157 157 # The specification for packed1 can optionally declare the data formats
158 158 # required to apply it. If we see this metadata, compare against what the
159 159 # repo supports and error if the bundle isn't compatible.
160 160 if version == 'packed1' and 'requirements' in params:
161 161 requirements = set(params['requirements'].split(','))
162 162 missingreqs = requirements - repo.supportedformats
163 163 if missingreqs:
164 164 raise error.UnsupportedBundleSpecification(
165 165 _('missing support for repository features: %s') %
166 166 ', '.join(sorted(missingreqs)))
167 167
168 168 if not externalnames:
169 169 engine = util.compengines.forbundlename(compression)
170 170 compression = engine.bundletype()[1]
171 171 version = _bundlespeccgversions[version]
172 172 return compression, version, params
173 173
174 174 def readbundle(ui, fh, fname, vfs=None):
175 175 header = changegroup.readexactly(fh, 4)
176 176
177 177 alg = None
178 178 if not fname:
179 179 fname = "stream"
180 180 if not header.startswith('HG') and header.startswith('\0'):
181 181 fh = changegroup.headerlessfixup(fh, header)
182 182 header = "HG10"
183 183 alg = 'UN'
184 184 elif vfs:
185 185 fname = vfs.join(fname)
186 186
187 187 magic, version = header[0:2], header[2:4]
188 188
189 189 if magic != 'HG':
190 190 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
191 191 if version == '10':
192 192 if alg is None:
193 193 alg = changegroup.readexactly(fh, 2)
194 194 return changegroup.cg1unpacker(fh, alg)
195 195 elif version.startswith('2'):
196 196 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
197 197 elif version == 'S1':
198 198 return streamclone.streamcloneapplier(fh)
199 199 else:
200 200 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
201 201
202 202 def _formatrequirementsspec(requirements):
203 203 return urlreq.quote(','.join(sorted(requirements)))
204 204
205 205 def _formatrequirementsparams(requirements):
206 206 requirements = _formatrequirementsspec(requirements)
207 207 params = "%s%s" % (urlreq.quote("requirements="), requirements)
208 208 return params
209 209
210 210 def getbundlespec(ui, fh):
211 211 """Infer the bundlespec from a bundle file handle.
212 212
213 213 The input file handle is seeked and the original seek position is not
214 214 restored.
215 215 """
216 216 def speccompression(alg):
217 217 try:
218 218 return util.compengines.forbundletype(alg).bundletype()[0]
219 219 except KeyError:
220 220 return None
221 221
222 222 b = readbundle(ui, fh, None)
223 223 if isinstance(b, changegroup.cg1unpacker):
224 224 alg = b._type
225 225 if alg == '_truncatedBZ':
226 226 alg = 'BZ'
227 227 comp = speccompression(alg)
228 228 if not comp:
229 229 raise error.Abort(_('unknown compression algorithm: %s') % alg)
230 230 return '%s-v1' % comp
231 231 elif isinstance(b, bundle2.unbundle20):
232 232 if 'Compression' in b.params:
233 233 comp = speccompression(b.params['Compression'])
234 234 if not comp:
235 235 raise error.Abort(_('unknown compression algorithm: %s') % comp)
236 236 else:
237 237 comp = 'none'
238 238
239 239 version = None
240 240 for part in b.iterparts():
241 241 if part.type == 'changegroup':
242 242 version = part.params['version']
243 243 if version in ('01', '02'):
244 244 version = 'v2'
245 245 else:
246 246 raise error.Abort(_('changegroup version %s does not have '
247 247 'a known bundlespec') % version,
248 248 hint=_('try upgrading your Mercurial '
249 249 'client'))
250 250
251 251 if not version:
252 252 raise error.Abort(_('could not identify changegroup version in '
253 253 'bundle'))
254 254
255 255 return '%s-%s' % (comp, version)
256 256 elif isinstance(b, streamclone.streamcloneapplier):
257 257 requirements = streamclone.readbundle1header(fh)[2]
258 258 return 'none-packed1;%s' % _formatrequirementsparams(requirements)
259 259 else:
260 260 raise error.Abort(_('unknown bundle type: %s') % b)
261 261
262 262 def _computeoutgoing(repo, heads, common):
263 263 """Computes which revs are outgoing given a set of common
264 264 and a set of heads.
265 265
266 266 This is a separate function so extensions can have access to
267 267 the logic.
268 268
269 269 Returns a discovery.outgoing object.
270 270 """
271 271 cl = repo.changelog
272 272 if common:
273 273 hasnode = cl.hasnode
274 274 common = [n for n in common if hasnode(n)]
275 275 else:
276 276 common = [nullid]
277 277 if not heads:
278 278 heads = cl.heads()
279 279 return discovery.outgoing(repo, common, heads)
280 280
281 281 def _forcebundle1(op):
282 282 """return true if a pull/push must use bundle1
283 283
284 284 This function is used to allow testing of the older bundle version"""
285 285 ui = op.repo.ui
286 forcebundle1 = False
287 286 # The goal is this config is to allow developer to choose the bundle
288 287 # version used during exchanged. This is especially handy during test.
289 288 # Value is a list of bundle version to be picked from, highest version
290 289 # should be used.
291 290 #
292 291 # developer config: devel.legacy.exchange
293 292 exchange = ui.configlist('devel', 'legacy.exchange')
294 293 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
295 294 return forcebundle1 or not op.remote.capable('bundle2')
296 295
297 296 class pushoperation(object):
298 297 """A object that represent a single push operation
299 298
300 299 Its purpose is to carry push related state and very common operations.
301 300
302 301 A new pushoperation should be created at the beginning of each push and
303 302 discarded afterward.
304 303 """
305 304
306 305 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
307 306 bookmarks=(), pushvars=None):
308 307 # repo we push from
309 308 self.repo = repo
310 309 self.ui = repo.ui
311 310 # repo we push to
312 311 self.remote = remote
313 312 # force option provided
314 313 self.force = force
315 314 # revs to be pushed (None is "all")
316 315 self.revs = revs
317 316 # bookmark explicitly pushed
318 317 self.bookmarks = bookmarks
319 318 # allow push of new branch
320 319 self.newbranch = newbranch
321 320 # step already performed
322 321 # (used to check what steps have been already performed through bundle2)
323 322 self.stepsdone = set()
324 323 # Integer version of the changegroup push result
325 324 # - None means nothing to push
326 325 # - 0 means HTTP error
327 326 # - 1 means we pushed and remote head count is unchanged *or*
328 327 # we have outgoing changesets but refused to push
329 328 # - other values as described by addchangegroup()
330 329 self.cgresult = None
331 330 # Boolean value for the bookmark push
332 331 self.bkresult = None
333 332 # discover.outgoing object (contains common and outgoing data)
334 333 self.outgoing = None
335 334 # all remote topological heads before the push
336 335 self.remoteheads = None
337 336 # Details of the remote branch pre and post push
338 337 #
339 338 # mapping: {'branch': ([remoteheads],
340 339 # [newheads],
341 340 # [unsyncedheads],
342 341 # [discardedheads])}
343 342 # - branch: the branch name
344 343 # - remoteheads: the list of remote heads known locally
345 344 # None if the branch is new
346 345 # - newheads: the new remote heads (known locally) with outgoing pushed
347 346 # - unsyncedheads: the list of remote heads unknown locally.
348 347 # - discardedheads: the list of remote heads made obsolete by the push
349 348 self.pushbranchmap = None
350 349 # testable as a boolean indicating if any nodes are missing locally.
351 350 self.incoming = None
352 351 # summary of the remote phase situation
353 352 self.remotephases = None
354 353 # phases changes that must be pushed along side the changesets
355 354 self.outdatedphases = None
356 355 # phases changes that must be pushed if changeset push fails
357 356 self.fallbackoutdatedphases = None
358 357 # outgoing obsmarkers
359 358 self.outobsmarkers = set()
360 359 # outgoing bookmarks
361 360 self.outbookmarks = []
362 361 # transaction manager
363 362 self.trmanager = None
364 363 # map { pushkey partid -> callback handling failure}
365 364 # used to handle exception from mandatory pushkey part failure
366 365 self.pkfailcb = {}
367 366 # an iterable of pushvars or None
368 367 self.pushvars = pushvars
369 368
370 369 @util.propertycache
371 370 def futureheads(self):
372 371 """future remote heads if the changeset push succeeds"""
373 372 return self.outgoing.missingheads
374 373
375 374 @util.propertycache
376 375 def fallbackheads(self):
377 376 """future remote heads if the changeset push fails"""
378 377 if self.revs is None:
379 378 # not target to push, all common are relevant
380 379 return self.outgoing.commonheads
381 380 unfi = self.repo.unfiltered()
382 381 # I want cheads = heads(::missingheads and ::commonheads)
383 382 # (missingheads is revs with secret changeset filtered out)
384 383 #
385 384 # This can be expressed as:
386 385 # cheads = ( (missingheads and ::commonheads)
387 386 # + (commonheads and ::missingheads))"
388 387 # )
389 388 #
390 389 # while trying to push we already computed the following:
391 390 # common = (::commonheads)
392 391 # missing = ((commonheads::missingheads) - commonheads)
393 392 #
394 393 # We can pick:
395 394 # * missingheads part of common (::commonheads)
396 395 common = self.outgoing.common
397 396 nm = self.repo.changelog.nodemap
398 397 cheads = [node for node in self.revs if nm[node] in common]
399 398 # and
400 399 # * commonheads parents on missing
401 400 revset = unfi.set('%ln and parents(roots(%ln))',
402 401 self.outgoing.commonheads,
403 402 self.outgoing.missing)
404 403 cheads.extend(c.node() for c in revset)
405 404 return cheads
406 405
407 406 @property
408 407 def commonheads(self):
409 408 """set of all common heads after changeset bundle push"""
410 409 if self.cgresult:
411 410 return self.futureheads
412 411 else:
413 412 return self.fallbackheads
414 413
415 414 # mapping of message used when pushing bookmark
416 415 bookmsgmap = {'update': (_("updating bookmark %s\n"),
417 416 _('updating bookmark %s failed!\n')),
418 417 'export': (_("exporting bookmark %s\n"),
419 418 _('exporting bookmark %s failed!\n')),
420 419 'delete': (_("deleting remote bookmark %s\n"),
421 420 _('deleting remote bookmark %s failed!\n')),
422 421 }
423 422
424 423
425 424 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
426 425 opargs=None):
427 426 '''Push outgoing changesets (limited by revs) from a local
428 427 repository to remote. Return an integer:
429 428 - None means nothing to push
430 429 - 0 means HTTP error
431 430 - 1 means we pushed and remote head count is unchanged *or*
432 431 we have outgoing changesets but refused to push
433 432 - other values as described by addchangegroup()
434 433 '''
435 434 if opargs is None:
436 435 opargs = {}
437 436 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
438 437 **pycompat.strkwargs(opargs))
439 438 if pushop.remote.local():
440 439 missing = (set(pushop.repo.requirements)
441 440 - pushop.remote.local().supported)
442 441 if missing:
443 442 msg = _("required features are not"
444 443 " supported in the destination:"
445 444 " %s") % (', '.join(sorted(missing)))
446 445 raise error.Abort(msg)
447 446
448 447 if not pushop.remote.canpush():
449 448 raise error.Abort(_("destination does not support push"))
450 449
451 450 if not pushop.remote.capable('unbundle'):
452 451 raise error.Abort(_('cannot push: destination does not support the '
453 452 'unbundle wire protocol command'))
454 453
455 454 # get lock as we might write phase data
456 455 wlock = lock = None
457 456 try:
458 457 # bundle2 push may receive a reply bundle touching bookmarks or other
459 458 # things requiring the wlock. Take it now to ensure proper ordering.
460 459 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
461 460 if (not _forcebundle1(pushop)) and maypushback:
462 461 wlock = pushop.repo.wlock()
463 462 lock = pushop.repo.lock()
464 463 pushop.trmanager = transactionmanager(pushop.repo,
465 464 'push-response',
466 465 pushop.remote.url())
467 466 except IOError as err:
468 467 if err.errno != errno.EACCES:
469 468 raise
470 469 # source repo cannot be locked.
471 470 # We do not abort the push, but just disable the local phase
472 471 # synchronisation.
473 472 msg = 'cannot lock source repository: %s\n' % err
474 473 pushop.ui.debug(msg)
475 474
476 475 with wlock or util.nullcontextmanager(), \
477 476 lock or util.nullcontextmanager(), \
478 477 pushop.trmanager or util.nullcontextmanager():
479 478 pushop.repo.checkpush(pushop)
480 479 _pushdiscovery(pushop)
481 480 if not _forcebundle1(pushop):
482 481 _pushbundle2(pushop)
483 482 _pushchangeset(pushop)
484 483 _pushsyncphase(pushop)
485 484 _pushobsolete(pushop)
486 485 _pushbookmark(pushop)
487 486
488 487 return pushop
489 488
490 489 # list of steps to perform discovery before push
491 490 pushdiscoveryorder = []
492 491
493 492 # Mapping between step name and function
494 493 #
495 494 # This exists to help extensions wrap steps if necessary
496 495 pushdiscoverymapping = {}
497 496
498 497 def pushdiscovery(stepname):
499 498 """decorator for function performing discovery before push
500 499
501 500 The function is added to the step -> function mapping and appended to the
502 501 list of steps. Beware that decorated function will be added in order (this
503 502 may matter).
504 503
505 504 You can only use this decorator for a new step, if you want to wrap a step
506 505 from an extension, change the pushdiscovery dictionary directly."""
507 506 def dec(func):
508 507 assert stepname not in pushdiscoverymapping
509 508 pushdiscoverymapping[stepname] = func
510 509 pushdiscoveryorder.append(stepname)
511 510 return func
512 511 return dec
513 512
514 513 def _pushdiscovery(pushop):
515 514 """Run all discovery steps"""
516 515 for stepname in pushdiscoveryorder:
517 516 step = pushdiscoverymapping[stepname]
518 517 step(pushop)
519 518
520 519 @pushdiscovery('changeset')
521 520 def _pushdiscoverychangeset(pushop):
522 521 """discover the changeset that need to be pushed"""
523 522 fci = discovery.findcommonincoming
524 523 if pushop.revs:
525 524 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
526 525 ancestorsof=pushop.revs)
527 526 else:
528 527 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
529 528 common, inc, remoteheads = commoninc
530 529 fco = discovery.findcommonoutgoing
531 530 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
532 531 commoninc=commoninc, force=pushop.force)
533 532 pushop.outgoing = outgoing
534 533 pushop.remoteheads = remoteheads
535 534 pushop.incoming = inc
536 535
537 536 @pushdiscovery('phase')
538 537 def _pushdiscoveryphase(pushop):
539 538 """discover the phase that needs to be pushed
540 539
541 540 (computed for both success and failure case for changesets push)"""
542 541 outgoing = pushop.outgoing
543 542 unfi = pushop.repo.unfiltered()
544 543 remotephases = pushop.remote.listkeys('phases')
545 544 if (pushop.ui.configbool('ui', '_usedassubrepo')
546 545 and remotephases # server supports phases
547 546 and not pushop.outgoing.missing # no changesets to be pushed
548 547 and remotephases.get('publishing', False)):
549 548 # When:
550 549 # - this is a subrepo push
551 550 # - and remote support phase
552 551 # - and no changeset are to be pushed
553 552 # - and remote is publishing
554 553 # We may be in issue 3781 case!
555 554 # We drop the possible phase synchronisation done by
556 555 # courtesy to publish changesets possibly locally draft
557 556 # on the remote.
558 557 pushop.outdatedphases = []
559 558 pushop.fallbackoutdatedphases = []
560 559 return
561 560
562 561 pushop.remotephases = phases.remotephasessummary(pushop.repo,
563 562 pushop.fallbackheads,
564 563 remotephases)
565 564 droots = pushop.remotephases.draftroots
566 565
567 566 extracond = ''
568 567 if not pushop.remotephases.publishing:
569 568 extracond = ' and public()'
570 569 revset = 'heads((%%ln::%%ln) %s)' % extracond
571 570 # Get the list of all revs draft on remote by public here.
572 571 # XXX Beware that revset break if droots is not strictly
573 572 # XXX root we may want to ensure it is but it is costly
574 573 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
575 574 if not outgoing.missing:
576 575 future = fallback
577 576 else:
578 577 # adds changeset we are going to push as draft
579 578 #
580 579 # should not be necessary for publishing server, but because of an
581 580 # issue fixed in xxxxx we have to do it anyway.
582 581 fdroots = list(unfi.set('roots(%ln + %ln::)',
583 582 outgoing.missing, droots))
584 583 fdroots = [f.node() for f in fdroots]
585 584 future = list(unfi.set(revset, fdroots, pushop.futureheads))
586 585 pushop.outdatedphases = future
587 586 pushop.fallbackoutdatedphases = fallback
588 587
589 588 @pushdiscovery('obsmarker')
590 589 def _pushdiscoveryobsmarkers(pushop):
591 590 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
592 591 and pushop.repo.obsstore
593 592 and 'obsolete' in pushop.remote.listkeys('namespaces')):
594 593 repo = pushop.repo
595 594 # very naive computation, that can be quite expensive on big repo.
596 595 # However: evolution is currently slow on them anyway.
597 596 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
598 597 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
599 598
600 599 @pushdiscovery('bookmarks')
601 600 def _pushdiscoverybookmarks(pushop):
602 601 ui = pushop.ui
603 602 repo = pushop.repo.unfiltered()
604 603 remote = pushop.remote
605 604 ui.debug("checking for updated bookmarks\n")
606 605 ancestors = ()
607 606 if pushop.revs:
608 607 revnums = map(repo.changelog.rev, pushop.revs)
609 608 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
610 609 remotebookmark = remote.listkeys('bookmarks')
611 610
612 611 explicit = set([repo._bookmarks.expandname(bookmark)
613 612 for bookmark in pushop.bookmarks])
614 613
615 614 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
616 615 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
617 616
618 617 def safehex(x):
619 618 if x is None:
620 619 return x
621 620 return hex(x)
622 621
623 622 def hexifycompbookmarks(bookmarks):
624 623 for b, scid, dcid in bookmarks:
625 624 yield b, safehex(scid), safehex(dcid)
626 625
627 626 comp = [hexifycompbookmarks(marks) for marks in comp]
628 627 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
629 628
630 629 for b, scid, dcid in advsrc:
631 630 if b in explicit:
632 631 explicit.remove(b)
633 632 if not ancestors or repo[scid].rev() in ancestors:
634 633 pushop.outbookmarks.append((b, dcid, scid))
635 634 # search added bookmark
636 635 for b, scid, dcid in addsrc:
637 636 if b in explicit:
638 637 explicit.remove(b)
639 638 pushop.outbookmarks.append((b, '', scid))
640 639 # search for overwritten bookmark
641 640 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
642 641 if b in explicit:
643 642 explicit.remove(b)
644 643 pushop.outbookmarks.append((b, dcid, scid))
645 644 # search for bookmark to delete
646 645 for b, scid, dcid in adddst:
647 646 if b in explicit:
648 647 explicit.remove(b)
649 648 # treat as "deleted locally"
650 649 pushop.outbookmarks.append((b, dcid, ''))
651 650 # identical bookmarks shouldn't get reported
652 651 for b, scid, dcid in same:
653 652 if b in explicit:
654 653 explicit.remove(b)
655 654
656 655 if explicit:
657 656 explicit = sorted(explicit)
658 657 # we should probably list all of them
659 658 ui.warn(_('bookmark %s does not exist on the local '
660 659 'or remote repository!\n') % explicit[0])
661 660 pushop.bkresult = 2
662 661
663 662 pushop.outbookmarks.sort()
664 663
665 664 def _pushcheckoutgoing(pushop):
666 665 outgoing = pushop.outgoing
667 666 unfi = pushop.repo.unfiltered()
668 667 if not outgoing.missing:
669 668 # nothing to push
670 669 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
671 670 return False
672 671 # something to push
673 672 if not pushop.force:
674 673 # if repo.obsstore == False --> no obsolete
675 674 # then, save the iteration
676 675 if unfi.obsstore:
677 676 # this message are here for 80 char limit reason
678 677 mso = _("push includes obsolete changeset: %s!")
679 678 mspd = _("push includes phase-divergent changeset: %s!")
680 679 mscd = _("push includes content-divergent changeset: %s!")
681 680 mst = {"orphan": _("push includes orphan changeset: %s!"),
682 681 "phase-divergent": mspd,
683 682 "content-divergent": mscd}
684 683 # If we are to push if there is at least one
685 684 # obsolete or unstable changeset in missing, at
686 685 # least one of the missinghead will be obsolete or
687 686 # unstable. So checking heads only is ok
688 687 for node in outgoing.missingheads:
689 688 ctx = unfi[node]
690 689 if ctx.obsolete():
691 690 raise error.Abort(mso % ctx)
692 691 elif ctx.isunstable():
693 692 # TODO print more than one instability in the abort
694 693 # message
695 694 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
696 695
697 696 discovery.checkheads(pushop)
698 697 return True
699 698
700 699 # List of names of steps to perform for an outgoing bundle2, order matters.
701 700 b2partsgenorder = []
702 701
703 702 # Mapping between step name and function
704 703 #
705 704 # This exists to help extensions wrap steps if necessary
706 705 b2partsgenmapping = {}
707 706
708 707 def b2partsgenerator(stepname, idx=None):
709 708 """decorator for function generating bundle2 part
710 709
711 710 The function is added to the step -> function mapping and appended to the
712 711 list of steps. Beware that decorated functions will be added in order
713 712 (this may matter).
714 713
715 714 You can only use this decorator for new steps, if you want to wrap a step
716 715 from an extension, attack the b2partsgenmapping dictionary directly."""
717 716 def dec(func):
718 717 assert stepname not in b2partsgenmapping
719 718 b2partsgenmapping[stepname] = func
720 719 if idx is None:
721 720 b2partsgenorder.append(stepname)
722 721 else:
723 722 b2partsgenorder.insert(idx, stepname)
724 723 return func
725 724 return dec
726 725
727 726 def _pushb2ctxcheckheads(pushop, bundler):
728 727 """Generate race condition checking parts
729 728
730 729 Exists as an independent function to aid extensions
731 730 """
732 731 # * 'force' do not check for push race,
733 732 # * if we don't push anything, there are nothing to check.
734 733 if not pushop.force and pushop.outgoing.missingheads:
735 734 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
736 735 emptyremote = pushop.pushbranchmap is None
737 736 if not allowunrelated or emptyremote:
738 737 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
739 738 else:
740 739 affected = set()
741 740 for branch, heads in pushop.pushbranchmap.iteritems():
742 741 remoteheads, newheads, unsyncedheads, discardedheads = heads
743 742 if remoteheads is not None:
744 743 remote = set(remoteheads)
745 744 affected |= set(discardedheads) & remote
746 745 affected |= remote - set(newheads)
747 746 if affected:
748 747 data = iter(sorted(affected))
749 748 bundler.newpart('check:updated-heads', data=data)
750 749
751 750 def _pushing(pushop):
752 751 """return True if we are pushing anything"""
753 752 return bool(pushop.outgoing.missing
754 753 or pushop.outdatedphases
755 754 or pushop.outobsmarkers
756 755 or pushop.outbookmarks)
757 756
758 757 @b2partsgenerator('check-bookmarks')
759 758 def _pushb2checkbookmarks(pushop, bundler):
760 759 """insert bookmark move checking"""
761 760 if not _pushing(pushop) or pushop.force:
762 761 return
763 762 b2caps = bundle2.bundle2caps(pushop.remote)
764 763 hasbookmarkcheck = 'bookmarks' in b2caps
765 764 if not (pushop.outbookmarks and hasbookmarkcheck):
766 765 return
767 766 data = []
768 767 for book, old, new in pushop.outbookmarks:
769 768 old = bin(old)
770 769 data.append((book, old))
771 770 checkdata = bookmod.binaryencode(data)
772 771 bundler.newpart('check:bookmarks', data=checkdata)
773 772
774 773 @b2partsgenerator('check-phases')
775 774 def _pushb2checkphases(pushop, bundler):
776 775 """insert phase move checking"""
777 776 if not _pushing(pushop) or pushop.force:
778 777 return
779 778 b2caps = bundle2.bundle2caps(pushop.remote)
780 779 hasphaseheads = 'heads' in b2caps.get('phases', ())
781 780 if pushop.remotephases is not None and hasphaseheads:
782 781 # check that the remote phase has not changed
783 782 checks = [[] for p in phases.allphases]
784 783 checks[phases.public].extend(pushop.remotephases.publicheads)
785 784 checks[phases.draft].extend(pushop.remotephases.draftroots)
786 785 if any(checks):
787 786 for nodes in checks:
788 787 nodes.sort()
789 788 checkdata = phases.binaryencode(checks)
790 789 bundler.newpart('check:phases', data=checkdata)
791 790
792 791 @b2partsgenerator('changeset')
793 792 def _pushb2ctx(pushop, bundler):
794 793 """handle changegroup push through bundle2
795 794
796 795 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
797 796 """
798 797 if 'changesets' in pushop.stepsdone:
799 798 return
800 799 pushop.stepsdone.add('changesets')
801 800 # Send known heads to the server for race detection.
802 801 if not _pushcheckoutgoing(pushop):
803 802 return
804 803 pushop.repo.prepushoutgoinghooks(pushop)
805 804
806 805 _pushb2ctxcheckheads(pushop, bundler)
807 806
808 807 b2caps = bundle2.bundle2caps(pushop.remote)
809 808 version = '01'
810 809 cgversions = b2caps.get('changegroup')
811 810 if cgversions: # 3.1 and 3.2 ship with an empty value
812 811 cgversions = [v for v in cgversions
813 812 if v in changegroup.supportedoutgoingversions(
814 813 pushop.repo)]
815 814 if not cgversions:
816 815 raise ValueError(_('no common changegroup version'))
817 816 version = max(cgversions)
818 817 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
819 818 'push')
820 819 cgpart = bundler.newpart('changegroup', data=cgstream)
821 820 if cgversions:
822 821 cgpart.addparam('version', version)
823 822 if 'treemanifest' in pushop.repo.requirements:
824 823 cgpart.addparam('treemanifest', '1')
825 824 def handlereply(op):
826 825 """extract addchangegroup returns from server reply"""
827 826 cgreplies = op.records.getreplies(cgpart.id)
828 827 assert len(cgreplies['changegroup']) == 1
829 828 pushop.cgresult = cgreplies['changegroup'][0]['return']
830 829 return handlereply
831 830
832 831 @b2partsgenerator('phase')
833 832 def _pushb2phases(pushop, bundler):
834 833 """handle phase push through bundle2"""
835 834 if 'phases' in pushop.stepsdone:
836 835 return
837 836 b2caps = bundle2.bundle2caps(pushop.remote)
838 837 ui = pushop.repo.ui
839 838
840 839 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
841 840 haspushkey = 'pushkey' in b2caps
842 841 hasphaseheads = 'heads' in b2caps.get('phases', ())
843 842
844 843 if hasphaseheads and not legacyphase:
845 844 return _pushb2phaseheads(pushop, bundler)
846 845 elif haspushkey:
847 846 return _pushb2phasespushkey(pushop, bundler)
848 847
849 848 def _pushb2phaseheads(pushop, bundler):
850 849 """push phase information through a bundle2 - binary part"""
851 850 pushop.stepsdone.add('phases')
852 851 if pushop.outdatedphases:
853 852 updates = [[] for p in phases.allphases]
854 853 updates[0].extend(h.node() for h in pushop.outdatedphases)
855 854 phasedata = phases.binaryencode(updates)
856 855 bundler.newpart('phase-heads', data=phasedata)
857 856
858 857 def _pushb2phasespushkey(pushop, bundler):
859 858 """push phase information through a bundle2 - pushkey part"""
860 859 pushop.stepsdone.add('phases')
861 860 part2node = []
862 861
863 862 def handlefailure(pushop, exc):
864 863 targetid = int(exc.partid)
865 864 for partid, node in part2node:
866 865 if partid == targetid:
867 866 raise error.Abort(_('updating %s to public failed') % node)
868 867
869 868 enc = pushkey.encode
870 869 for newremotehead in pushop.outdatedphases:
871 870 part = bundler.newpart('pushkey')
872 871 part.addparam('namespace', enc('phases'))
873 872 part.addparam('key', enc(newremotehead.hex()))
874 873 part.addparam('old', enc('%d' % phases.draft))
875 874 part.addparam('new', enc('%d' % phases.public))
876 875 part2node.append((part.id, newremotehead))
877 876 pushop.pkfailcb[part.id] = handlefailure
878 877
879 878 def handlereply(op):
880 879 for partid, node in part2node:
881 880 partrep = op.records.getreplies(partid)
882 881 results = partrep['pushkey']
883 882 assert len(results) <= 1
884 883 msg = None
885 884 if not results:
886 885 msg = _('server ignored update of %s to public!\n') % node
887 886 elif not int(results[0]['return']):
888 887 msg = _('updating %s to public failed!\n') % node
889 888 if msg is not None:
890 889 pushop.ui.warn(msg)
891 890 return handlereply
892 891
893 892 @b2partsgenerator('obsmarkers')
894 893 def _pushb2obsmarkers(pushop, bundler):
895 894 if 'obsmarkers' in pushop.stepsdone:
896 895 return
897 896 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
898 897 if obsolete.commonversion(remoteversions) is None:
899 898 return
900 899 pushop.stepsdone.add('obsmarkers')
901 900 if pushop.outobsmarkers:
902 901 markers = sorted(pushop.outobsmarkers)
903 902 bundle2.buildobsmarkerspart(bundler, markers)
904 903
905 904 @b2partsgenerator('bookmarks')
906 905 def _pushb2bookmarks(pushop, bundler):
907 906 """handle bookmark push through bundle2"""
908 907 if 'bookmarks' in pushop.stepsdone:
909 908 return
910 909 b2caps = bundle2.bundle2caps(pushop.remote)
911 910
912 911 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
913 912 legacybooks = 'bookmarks' in legacy
914 913
915 914 if not legacybooks and 'bookmarks' in b2caps:
916 915 return _pushb2bookmarkspart(pushop, bundler)
917 916 elif 'pushkey' in b2caps:
918 917 return _pushb2bookmarkspushkey(pushop, bundler)
919 918
920 919 def _bmaction(old, new):
921 920 """small utility for bookmark pushing"""
922 921 if not old:
923 922 return 'export'
924 923 elif not new:
925 924 return 'delete'
926 925 return 'update'
927 926
928 927 def _pushb2bookmarkspart(pushop, bundler):
929 928 pushop.stepsdone.add('bookmarks')
930 929 if not pushop.outbookmarks:
931 930 return
932 931
933 932 allactions = []
934 933 data = []
935 934 for book, old, new in pushop.outbookmarks:
936 935 new = bin(new)
937 936 data.append((book, new))
938 937 allactions.append((book, _bmaction(old, new)))
939 938 checkdata = bookmod.binaryencode(data)
940 939 bundler.newpart('bookmarks', data=checkdata)
941 940
942 941 def handlereply(op):
943 942 ui = pushop.ui
944 943 # if success
945 944 for book, action in allactions:
946 945 ui.status(bookmsgmap[action][0] % book)
947 946
948 947 return handlereply
949 948
950 949 def _pushb2bookmarkspushkey(pushop, bundler):
951 950 pushop.stepsdone.add('bookmarks')
952 951 part2book = []
953 952 enc = pushkey.encode
954 953
955 954 def handlefailure(pushop, exc):
956 955 targetid = int(exc.partid)
957 956 for partid, book, action in part2book:
958 957 if partid == targetid:
959 958 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
960 959 # we should not be called for part we did not generated
961 960 assert False
962 961
963 962 for book, old, new in pushop.outbookmarks:
964 963 part = bundler.newpart('pushkey')
965 964 part.addparam('namespace', enc('bookmarks'))
966 965 part.addparam('key', enc(book))
967 966 part.addparam('old', enc(old))
968 967 part.addparam('new', enc(new))
969 968 action = 'update'
970 969 if not old:
971 970 action = 'export'
972 971 elif not new:
973 972 action = 'delete'
974 973 part2book.append((part.id, book, action))
975 974 pushop.pkfailcb[part.id] = handlefailure
976 975
977 976 def handlereply(op):
978 977 ui = pushop.ui
979 978 for partid, book, action in part2book:
980 979 partrep = op.records.getreplies(partid)
981 980 results = partrep['pushkey']
982 981 assert len(results) <= 1
983 982 if not results:
984 983 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
985 984 else:
986 985 ret = int(results[0]['return'])
987 986 if ret:
988 987 ui.status(bookmsgmap[action][0] % book)
989 988 else:
990 989 ui.warn(bookmsgmap[action][1] % book)
991 990 if pushop.bkresult is not None:
992 991 pushop.bkresult = 1
993 992 return handlereply
994 993
995 994 @b2partsgenerator('pushvars', idx=0)
996 995 def _getbundlesendvars(pushop, bundler):
997 996 '''send shellvars via bundle2'''
998 997 pushvars = pushop.pushvars
999 998 if pushvars:
1000 999 shellvars = {}
1001 1000 for raw in pushvars:
1002 1001 if '=' not in raw:
1003 1002 msg = ("unable to parse variable '%s', should follow "
1004 1003 "'KEY=VALUE' or 'KEY=' format")
1005 1004 raise error.Abort(msg % raw)
1006 1005 k, v = raw.split('=', 1)
1007 1006 shellvars[k] = v
1008 1007
1009 1008 part = bundler.newpart('pushvars')
1010 1009
1011 1010 for key, value in shellvars.iteritems():
1012 1011 part.addparam(key, value, mandatory=False)
1013 1012
1014 1013 def _pushbundle2(pushop):
1015 1014 """push data to the remote using bundle2
1016 1015
1017 1016 The only currently supported type of data is changegroup but this will
1018 1017 evolve in the future."""
1019 1018 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1020 1019 pushback = (pushop.trmanager
1021 1020 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1022 1021
1023 1022 # create reply capability
1024 1023 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1025 1024 allowpushback=pushback,
1026 1025 role='client'))
1027 1026 bundler.newpart('replycaps', data=capsblob)
1028 1027 replyhandlers = []
1029 1028 for partgenname in b2partsgenorder:
1030 1029 partgen = b2partsgenmapping[partgenname]
1031 1030 ret = partgen(pushop, bundler)
1032 1031 if callable(ret):
1033 1032 replyhandlers.append(ret)
1034 1033 # do not push if nothing to push
1035 1034 if bundler.nbparts <= 1:
1036 1035 return
1037 1036 stream = util.chunkbuffer(bundler.getchunks())
1038 1037 try:
1039 1038 try:
1040 1039 reply = pushop.remote.unbundle(
1041 1040 stream, ['force'], pushop.remote.url())
1042 1041 except error.BundleValueError as exc:
1043 1042 raise error.Abort(_('missing support for %s') % exc)
1044 1043 try:
1045 1044 trgetter = None
1046 1045 if pushback:
1047 1046 trgetter = pushop.trmanager.transaction
1048 1047 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1049 1048 except error.BundleValueError as exc:
1050 1049 raise error.Abort(_('missing support for %s') % exc)
1051 1050 except bundle2.AbortFromPart as exc:
1052 1051 pushop.ui.status(_('remote: %s\n') % exc)
1053 1052 if exc.hint is not None:
1054 1053 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1055 1054 raise error.Abort(_('push failed on remote'))
1056 1055 except error.PushkeyFailed as exc:
1057 1056 partid = int(exc.partid)
1058 1057 if partid not in pushop.pkfailcb:
1059 1058 raise
1060 1059 pushop.pkfailcb[partid](pushop, exc)
1061 1060 for rephand in replyhandlers:
1062 1061 rephand(op)
1063 1062
1064 1063 def _pushchangeset(pushop):
1065 1064 """Make the actual push of changeset bundle to remote repo"""
1066 1065 if 'changesets' in pushop.stepsdone:
1067 1066 return
1068 1067 pushop.stepsdone.add('changesets')
1069 1068 if not _pushcheckoutgoing(pushop):
1070 1069 return
1071 1070
1072 1071 # Should have verified this in push().
1073 1072 assert pushop.remote.capable('unbundle')
1074 1073
1075 1074 pushop.repo.prepushoutgoinghooks(pushop)
1076 1075 outgoing = pushop.outgoing
1077 1076 # TODO: get bundlecaps from remote
1078 1077 bundlecaps = None
1079 1078 # create a changegroup from local
1080 1079 if pushop.revs is None and not (outgoing.excluded
1081 1080 or pushop.repo.changelog.filteredrevs):
1082 1081 # push everything,
1083 1082 # use the fast path, no race possible on push
1084 1083 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1085 1084 fastpath=True, bundlecaps=bundlecaps)
1086 1085 else:
1087 1086 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1088 1087 'push', bundlecaps=bundlecaps)
1089 1088
1090 1089 # apply changegroup to remote
1091 1090 # local repo finds heads on server, finds out what
1092 1091 # revs it must push. once revs transferred, if server
1093 1092 # finds it has different heads (someone else won
1094 1093 # commit/push race), server aborts.
1095 1094 if pushop.force:
1096 1095 remoteheads = ['force']
1097 1096 else:
1098 1097 remoteheads = pushop.remoteheads
1099 1098 # ssh: return remote's addchangegroup()
1100 1099 # http: return remote's addchangegroup() or 0 for error
1101 1100 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1102 1101 pushop.repo.url())
1103 1102
1104 1103 def _pushsyncphase(pushop):
1105 1104 """synchronise phase information locally and remotely"""
1106 1105 cheads = pushop.commonheads
1107 1106 # even when we don't push, exchanging phase data is useful
1108 1107 remotephases = pushop.remote.listkeys('phases')
1109 1108 if (pushop.ui.configbool('ui', '_usedassubrepo')
1110 1109 and remotephases # server supports phases
1111 1110 and pushop.cgresult is None # nothing was pushed
1112 1111 and remotephases.get('publishing', False)):
1113 1112 # When:
1114 1113 # - this is a subrepo push
1115 1114 # - and remote support phase
1116 1115 # - and no changeset was pushed
1117 1116 # - and remote is publishing
1118 1117 # We may be in issue 3871 case!
1119 1118 # We drop the possible phase synchronisation done by
1120 1119 # courtesy to publish changesets possibly locally draft
1121 1120 # on the remote.
1122 1121 remotephases = {'publishing': 'True'}
1123 1122 if not remotephases: # old server or public only reply from non-publishing
1124 1123 _localphasemove(pushop, cheads)
1125 1124 # don't push any phase data as there is nothing to push
1126 1125 else:
1127 1126 ana = phases.analyzeremotephases(pushop.repo, cheads,
1128 1127 remotephases)
1129 1128 pheads, droots = ana
1130 1129 ### Apply remote phase on local
1131 1130 if remotephases.get('publishing', False):
1132 1131 _localphasemove(pushop, cheads)
1133 1132 else: # publish = False
1134 1133 _localphasemove(pushop, pheads)
1135 1134 _localphasemove(pushop, cheads, phases.draft)
1136 1135 ### Apply local phase on remote
1137 1136
1138 1137 if pushop.cgresult:
1139 1138 if 'phases' in pushop.stepsdone:
1140 1139 # phases already pushed though bundle2
1141 1140 return
1142 1141 outdated = pushop.outdatedphases
1143 1142 else:
1144 1143 outdated = pushop.fallbackoutdatedphases
1145 1144
1146 1145 pushop.stepsdone.add('phases')
1147 1146
1148 1147 # filter heads already turned public by the push
1149 1148 outdated = [c for c in outdated if c.node() not in pheads]
1150 1149 # fallback to independent pushkey command
1151 1150 for newremotehead in outdated:
1152 1151 r = pushop.remote.pushkey('phases',
1153 1152 newremotehead.hex(),
1154 1153 ('%d' % phases.draft),
1155 1154 ('%d' % phases.public))
1156 1155 if not r:
1157 1156 pushop.ui.warn(_('updating %s to public failed!\n')
1158 1157 % newremotehead)
1159 1158
1160 1159 def _localphasemove(pushop, nodes, phase=phases.public):
1161 1160 """move <nodes> to <phase> in the local source repo"""
1162 1161 if pushop.trmanager:
1163 1162 phases.advanceboundary(pushop.repo,
1164 1163 pushop.trmanager.transaction(),
1165 1164 phase,
1166 1165 nodes)
1167 1166 else:
1168 1167 # repo is not locked, do not change any phases!
1169 1168 # Informs the user that phases should have been moved when
1170 1169 # applicable.
1171 1170 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1172 1171 phasestr = phases.phasenames[phase]
1173 1172 if actualmoves:
1174 1173 pushop.ui.status(_('cannot lock source repo, skipping '
1175 1174 'local %s phase update\n') % phasestr)
1176 1175
1177 1176 def _pushobsolete(pushop):
1178 1177 """utility function to push obsolete markers to a remote"""
1179 1178 if 'obsmarkers' in pushop.stepsdone:
1180 1179 return
1181 1180 repo = pushop.repo
1182 1181 remote = pushop.remote
1183 1182 pushop.stepsdone.add('obsmarkers')
1184 1183 if pushop.outobsmarkers:
1185 1184 pushop.ui.debug('try to push obsolete markers to remote\n')
1186 1185 rslts = []
1187 1186 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1188 1187 for key in sorted(remotedata, reverse=True):
1189 1188 # reverse sort to ensure we end with dump0
1190 1189 data = remotedata[key]
1191 1190 rslts.append(remote.pushkey('obsolete', key, '', data))
1192 1191 if [r for r in rslts if not r]:
1193 1192 msg = _('failed to push some obsolete markers!\n')
1194 1193 repo.ui.warn(msg)
1195 1194
1196 1195 def _pushbookmark(pushop):
1197 1196 """Update bookmark position on remote"""
1198 1197 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1199 1198 return
1200 1199 pushop.stepsdone.add('bookmarks')
1201 1200 ui = pushop.ui
1202 1201 remote = pushop.remote
1203 1202
1204 1203 for b, old, new in pushop.outbookmarks:
1205 1204 action = 'update'
1206 1205 if not old:
1207 1206 action = 'export'
1208 1207 elif not new:
1209 1208 action = 'delete'
1210 1209 if remote.pushkey('bookmarks', b, old, new):
1211 1210 ui.status(bookmsgmap[action][0] % b)
1212 1211 else:
1213 1212 ui.warn(bookmsgmap[action][1] % b)
1214 1213 # discovery can have set the value form invalid entry
1215 1214 if pushop.bkresult is not None:
1216 1215 pushop.bkresult = 1
1217 1216
1218 1217 class pulloperation(object):
1219 1218 """A object that represent a single pull operation
1220 1219
1221 1220 It purpose is to carry pull related state and very common operation.
1222 1221
1223 1222 A new should be created at the beginning of each pull and discarded
1224 1223 afterward.
1225 1224 """
1226 1225
1227 1226 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1228 1227 remotebookmarks=None, streamclonerequested=None):
1229 1228 # repo we pull into
1230 1229 self.repo = repo
1231 1230 # repo we pull from
1232 1231 self.remote = remote
1233 1232 # revision we try to pull (None is "all")
1234 1233 self.heads = heads
1235 1234 # bookmark pulled explicitly
1236 1235 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1237 1236 for bookmark in bookmarks]
1238 1237 # do we force pull?
1239 1238 self.force = force
1240 1239 # whether a streaming clone was requested
1241 1240 self.streamclonerequested = streamclonerequested
1242 1241 # transaction manager
1243 1242 self.trmanager = None
1244 1243 # set of common changeset between local and remote before pull
1245 1244 self.common = None
1246 1245 # set of pulled head
1247 1246 self.rheads = None
1248 1247 # list of missing changeset to fetch remotely
1249 1248 self.fetch = None
1250 1249 # remote bookmarks data
1251 1250 self.remotebookmarks = remotebookmarks
1252 1251 # result of changegroup pulling (used as return code by pull)
1253 1252 self.cgresult = None
1254 1253 # list of step already done
1255 1254 self.stepsdone = set()
1256 1255 # Whether we attempted a clone from pre-generated bundles.
1257 1256 self.clonebundleattempted = False
1258 1257
1259 1258 @util.propertycache
1260 1259 def pulledsubset(self):
1261 1260 """heads of the set of changeset target by the pull"""
1262 1261 # compute target subset
1263 1262 if self.heads is None:
1264 1263 # We pulled every thing possible
1265 1264 # sync on everything common
1266 1265 c = set(self.common)
1267 1266 ret = list(self.common)
1268 1267 for n in self.rheads:
1269 1268 if n not in c:
1270 1269 ret.append(n)
1271 1270 return ret
1272 1271 else:
1273 1272 # We pulled a specific subset
1274 1273 # sync on this subset
1275 1274 return self.heads
1276 1275
1277 1276 @util.propertycache
1278 1277 def canusebundle2(self):
1279 1278 return not _forcebundle1(self)
1280 1279
1281 1280 @util.propertycache
1282 1281 def remotebundle2caps(self):
1283 1282 return bundle2.bundle2caps(self.remote)
1284 1283
1285 1284 def gettransaction(self):
1286 1285 # deprecated; talk to trmanager directly
1287 1286 return self.trmanager.transaction()
1288 1287
1289 1288 class transactionmanager(util.transactional):
1290 1289 """An object to manage the life cycle of a transaction
1291 1290
1292 1291 It creates the transaction on demand and calls the appropriate hooks when
1293 1292 closing the transaction."""
1294 1293 def __init__(self, repo, source, url):
1295 1294 self.repo = repo
1296 1295 self.source = source
1297 1296 self.url = url
1298 1297 self._tr = None
1299 1298
1300 1299 def transaction(self):
1301 1300 """Return an open transaction object, constructing if necessary"""
1302 1301 if not self._tr:
1303 1302 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1304 1303 self._tr = self.repo.transaction(trname)
1305 1304 self._tr.hookargs['source'] = self.source
1306 1305 self._tr.hookargs['url'] = self.url
1307 1306 return self._tr
1308 1307
1309 1308 def close(self):
1310 1309 """close transaction if created"""
1311 1310 if self._tr is not None:
1312 1311 self._tr.close()
1313 1312
1314 1313 def release(self):
1315 1314 """release transaction if created"""
1316 1315 if self._tr is not None:
1317 1316 self._tr.release()
1318 1317
1319 1318 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1320 1319 streamclonerequested=None):
1321 1320 """Fetch repository data from a remote.
1322 1321
1323 1322 This is the main function used to retrieve data from a remote repository.
1324 1323
1325 1324 ``repo`` is the local repository to clone into.
1326 1325 ``remote`` is a peer instance.
1327 1326 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1328 1327 default) means to pull everything from the remote.
1329 1328 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1330 1329 default, all remote bookmarks are pulled.
1331 1330 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1332 1331 initialization.
1333 1332 ``streamclonerequested`` is a boolean indicating whether a "streaming
1334 1333 clone" is requested. A "streaming clone" is essentially a raw file copy
1335 1334 of revlogs from the server. This only works when the local repository is
1336 1335 empty. The default value of ``None`` means to respect the server
1337 1336 configuration for preferring stream clones.
1338 1337
1339 1338 Returns the ``pulloperation`` created for this pull.
1340 1339 """
1341 1340 if opargs is None:
1342 1341 opargs = {}
1343 1342 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1344 1343 streamclonerequested=streamclonerequested,
1345 1344 **pycompat.strkwargs(opargs))
1346 1345
1347 1346 peerlocal = pullop.remote.local()
1348 1347 if peerlocal:
1349 1348 missing = set(peerlocal.requirements) - pullop.repo.supported
1350 1349 if missing:
1351 1350 msg = _("required features are not"
1352 1351 " supported in the destination:"
1353 1352 " %s") % (', '.join(sorted(missing)))
1354 1353 raise error.Abort(msg)
1355 1354
1356 1355 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1357 1356 with repo.wlock(), repo.lock(), pullop.trmanager:
1358 1357 # This should ideally be in _pullbundle2(). However, it needs to run
1359 1358 # before discovery to avoid extra work.
1360 1359 _maybeapplyclonebundle(pullop)
1361 1360 streamclone.maybeperformlegacystreamclone(pullop)
1362 1361 _pulldiscovery(pullop)
1363 1362 if pullop.canusebundle2:
1364 1363 _pullbundle2(pullop)
1365 1364 _pullchangeset(pullop)
1366 1365 _pullphase(pullop)
1367 1366 _pullbookmarks(pullop)
1368 1367 _pullobsolete(pullop)
1369 1368
1370 1369 # storing remotenames
1371 1370 if repo.ui.configbool('experimental', 'remotenames'):
1372 1371 logexchange.pullremotenames(repo, remote)
1373 1372
1374 1373 return pullop
1375 1374
1376 1375 # list of steps to perform discovery before pull
1377 1376 pulldiscoveryorder = []
1378 1377
1379 1378 # Mapping between step name and function
1380 1379 #
1381 1380 # This exists to help extensions wrap steps if necessary
1382 1381 pulldiscoverymapping = {}
1383 1382
1384 1383 def pulldiscovery(stepname):
1385 1384 """decorator for function performing discovery before pull
1386 1385
1387 1386 The function is added to the step -> function mapping and appended to the
1388 1387 list of steps. Beware that decorated function will be added in order (this
1389 1388 may matter).
1390 1389
1391 1390 You can only use this decorator for a new step, if you want to wrap a step
1392 1391 from an extension, change the pulldiscovery dictionary directly."""
1393 1392 def dec(func):
1394 1393 assert stepname not in pulldiscoverymapping
1395 1394 pulldiscoverymapping[stepname] = func
1396 1395 pulldiscoveryorder.append(stepname)
1397 1396 return func
1398 1397 return dec
1399 1398
1400 1399 def _pulldiscovery(pullop):
1401 1400 """Run all discovery steps"""
1402 1401 for stepname in pulldiscoveryorder:
1403 1402 step = pulldiscoverymapping[stepname]
1404 1403 step(pullop)
1405 1404
1406 1405 @pulldiscovery('b1:bookmarks')
1407 1406 def _pullbookmarkbundle1(pullop):
1408 1407 """fetch bookmark data in bundle1 case
1409 1408
1410 1409 If not using bundle2, we have to fetch bookmarks before changeset
1411 1410 discovery to reduce the chance and impact of race conditions."""
1412 1411 if pullop.remotebookmarks is not None:
1413 1412 return
1414 1413 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1415 1414 # all known bundle2 servers now support listkeys, but lets be nice with
1416 1415 # new implementation.
1417 1416 return
1418 1417 books = pullop.remote.listkeys('bookmarks')
1419 1418 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1420 1419
1421 1420
1422 1421 @pulldiscovery('changegroup')
1423 1422 def _pulldiscoverychangegroup(pullop):
1424 1423 """discovery phase for the pull
1425 1424
1426 1425 Current handle changeset discovery only, will change handle all discovery
1427 1426 at some point."""
1428 1427 tmp = discovery.findcommonincoming(pullop.repo,
1429 1428 pullop.remote,
1430 1429 heads=pullop.heads,
1431 1430 force=pullop.force)
1432 1431 common, fetch, rheads = tmp
1433 1432 nm = pullop.repo.unfiltered().changelog.nodemap
1434 1433 if fetch and rheads:
1435 1434 # If a remote heads is filtered locally, put in back in common.
1436 1435 #
1437 1436 # This is a hackish solution to catch most of "common but locally
1438 1437 # hidden situation". We do not performs discovery on unfiltered
1439 1438 # repository because it end up doing a pathological amount of round
1440 1439 # trip for w huge amount of changeset we do not care about.
1441 1440 #
1442 1441 # If a set of such "common but filtered" changeset exist on the server
1443 1442 # but are not including a remote heads, we'll not be able to detect it,
1444 1443 scommon = set(common)
1445 1444 for n in rheads:
1446 1445 if n in nm:
1447 1446 if n not in scommon:
1448 1447 common.append(n)
1449 1448 if set(rheads).issubset(set(common)):
1450 1449 fetch = []
1451 1450 pullop.common = common
1452 1451 pullop.fetch = fetch
1453 1452 pullop.rheads = rheads
1454 1453
1455 1454 def _pullbundle2(pullop):
1456 1455 """pull data using bundle2
1457 1456
1458 1457 For now, the only supported data are changegroup."""
1459 1458 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1460 1459
1461 1460 # make ui easier to access
1462 1461 ui = pullop.repo.ui
1463 1462
1464 1463 # At the moment we don't do stream clones over bundle2. If that is
1465 1464 # implemented then here's where the check for that will go.
1466 1465 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1467 1466
1468 1467 # declare pull perimeters
1469 1468 kwargs['common'] = pullop.common
1470 1469 kwargs['heads'] = pullop.heads or pullop.rheads
1471 1470
1472 1471 if streaming:
1473 1472 kwargs['cg'] = False
1474 1473 kwargs['stream'] = True
1475 1474 pullop.stepsdone.add('changegroup')
1476 1475 pullop.stepsdone.add('phases')
1477 1476
1478 1477 else:
1479 1478 # pulling changegroup
1480 1479 pullop.stepsdone.add('changegroup')
1481 1480
1482 1481 kwargs['cg'] = pullop.fetch
1483 1482
1484 1483 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1485 1484 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1486 1485 if (not legacyphase and hasbinaryphase):
1487 1486 kwargs['phases'] = True
1488 1487 pullop.stepsdone.add('phases')
1489 1488
1490 1489 if 'listkeys' in pullop.remotebundle2caps:
1491 1490 if 'phases' not in pullop.stepsdone:
1492 1491 kwargs['listkeys'] = ['phases']
1493 1492
1494 1493 bookmarksrequested = False
1495 1494 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1496 1495 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1497 1496
1498 1497 if pullop.remotebookmarks is not None:
1499 1498 pullop.stepsdone.add('request-bookmarks')
1500 1499
1501 1500 if ('request-bookmarks' not in pullop.stepsdone
1502 1501 and pullop.remotebookmarks is None
1503 1502 and not legacybookmark and hasbinarybook):
1504 1503 kwargs['bookmarks'] = True
1505 1504 bookmarksrequested = True
1506 1505
1507 1506 if 'listkeys' in pullop.remotebundle2caps:
1508 1507 if 'request-bookmarks' not in pullop.stepsdone:
1509 1508 # make sure to always includes bookmark data when migrating
1510 1509 # `hg incoming --bundle` to using this function.
1511 1510 pullop.stepsdone.add('request-bookmarks')
1512 1511 kwargs.setdefault('listkeys', []).append('bookmarks')
1513 1512
1514 1513 # If this is a full pull / clone and the server supports the clone bundles
1515 1514 # feature, tell the server whether we attempted a clone bundle. The
1516 1515 # presence of this flag indicates the client supports clone bundles. This
1517 1516 # will enable the server to treat clients that support clone bundles
1518 1517 # differently from those that don't.
1519 1518 if (pullop.remote.capable('clonebundles')
1520 1519 and pullop.heads is None and list(pullop.common) == [nullid]):
1521 1520 kwargs['cbattempted'] = pullop.clonebundleattempted
1522 1521
1523 1522 if streaming:
1524 1523 pullop.repo.ui.status(_('streaming all changes\n'))
1525 1524 elif not pullop.fetch:
1526 1525 pullop.repo.ui.status(_("no changes found\n"))
1527 1526 pullop.cgresult = 0
1528 1527 else:
1529 1528 if pullop.heads is None and list(pullop.common) == [nullid]:
1530 1529 pullop.repo.ui.status(_("requesting all changes\n"))
1531 1530 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1532 1531 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1533 1532 if obsolete.commonversion(remoteversions) is not None:
1534 1533 kwargs['obsmarkers'] = True
1535 1534 pullop.stepsdone.add('obsmarkers')
1536 1535 _pullbundle2extraprepare(pullop, kwargs)
1537 1536 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1538 1537 try:
1539 1538 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction)
1540 1539 op.modes['bookmarks'] = 'records'
1541 1540 bundle2.processbundle(pullop.repo, bundle, op=op)
1542 1541 except bundle2.AbortFromPart as exc:
1543 1542 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1544 1543 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1545 1544 except error.BundleValueError as exc:
1546 1545 raise error.Abort(_('missing support for %s') % exc)
1547 1546
1548 1547 if pullop.fetch:
1549 1548 pullop.cgresult = bundle2.combinechangegroupresults(op)
1550 1549
1551 1550 # processing phases change
1552 1551 for namespace, value in op.records['listkeys']:
1553 1552 if namespace == 'phases':
1554 1553 _pullapplyphases(pullop, value)
1555 1554
1556 1555 # processing bookmark update
1557 1556 if bookmarksrequested:
1558 1557 books = {}
1559 1558 for record in op.records['bookmarks']:
1560 1559 books[record['bookmark']] = record["node"]
1561 1560 pullop.remotebookmarks = books
1562 1561 else:
1563 1562 for namespace, value in op.records['listkeys']:
1564 1563 if namespace == 'bookmarks':
1565 1564 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1566 1565
1567 1566 # bookmark data were either already there or pulled in the bundle
1568 1567 if pullop.remotebookmarks is not None:
1569 1568 _pullbookmarks(pullop)
1570 1569
1571 1570 def _pullbundle2extraprepare(pullop, kwargs):
1572 1571 """hook function so that extensions can extend the getbundle call"""
1573 1572
1574 1573 def _pullchangeset(pullop):
1575 1574 """pull changeset from unbundle into the local repo"""
1576 1575 # We delay the open of the transaction as late as possible so we
1577 1576 # don't open transaction for nothing or you break future useful
1578 1577 # rollback call
1579 1578 if 'changegroup' in pullop.stepsdone:
1580 1579 return
1581 1580 pullop.stepsdone.add('changegroup')
1582 1581 if not pullop.fetch:
1583 1582 pullop.repo.ui.status(_("no changes found\n"))
1584 1583 pullop.cgresult = 0
1585 1584 return
1586 1585 tr = pullop.gettransaction()
1587 1586 if pullop.heads is None and list(pullop.common) == [nullid]:
1588 1587 pullop.repo.ui.status(_("requesting all changes\n"))
1589 1588 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1590 1589 # issue1320, avoid a race if remote changed after discovery
1591 1590 pullop.heads = pullop.rheads
1592 1591
1593 1592 if pullop.remote.capable('getbundle'):
1594 1593 # TODO: get bundlecaps from remote
1595 1594 cg = pullop.remote.getbundle('pull', common=pullop.common,
1596 1595 heads=pullop.heads or pullop.rheads)
1597 1596 elif pullop.heads is None:
1598 1597 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1599 1598 elif not pullop.remote.capable('changegroupsubset'):
1600 1599 raise error.Abort(_("partial pull cannot be done because "
1601 1600 "other repository doesn't support "
1602 1601 "changegroupsubset."))
1603 1602 else:
1604 1603 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1605 1604 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1606 1605 pullop.remote.url())
1607 1606 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1608 1607
1609 1608 def _pullphase(pullop):
1610 1609 # Get remote phases data from remote
1611 1610 if 'phases' in pullop.stepsdone:
1612 1611 return
1613 1612 remotephases = pullop.remote.listkeys('phases')
1614 1613 _pullapplyphases(pullop, remotephases)
1615 1614
1616 1615 def _pullapplyphases(pullop, remotephases):
1617 1616 """apply phase movement from observed remote state"""
1618 1617 if 'phases' in pullop.stepsdone:
1619 1618 return
1620 1619 pullop.stepsdone.add('phases')
1621 1620 publishing = bool(remotephases.get('publishing', False))
1622 1621 if remotephases and not publishing:
1623 1622 # remote is new and non-publishing
1624 1623 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1625 1624 pullop.pulledsubset,
1626 1625 remotephases)
1627 1626 dheads = pullop.pulledsubset
1628 1627 else:
1629 1628 # Remote is old or publishing all common changesets
1630 1629 # should be seen as public
1631 1630 pheads = pullop.pulledsubset
1632 1631 dheads = []
1633 1632 unfi = pullop.repo.unfiltered()
1634 1633 phase = unfi._phasecache.phase
1635 1634 rev = unfi.changelog.nodemap.get
1636 1635 public = phases.public
1637 1636 draft = phases.draft
1638 1637
1639 1638 # exclude changesets already public locally and update the others
1640 1639 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1641 1640 if pheads:
1642 1641 tr = pullop.gettransaction()
1643 1642 phases.advanceboundary(pullop.repo, tr, public, pheads)
1644 1643
1645 1644 # exclude changesets already draft locally and update the others
1646 1645 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1647 1646 if dheads:
1648 1647 tr = pullop.gettransaction()
1649 1648 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1650 1649
1651 1650 def _pullbookmarks(pullop):
1652 1651 """process the remote bookmark information to update the local one"""
1653 1652 if 'bookmarks' in pullop.stepsdone:
1654 1653 return
1655 1654 pullop.stepsdone.add('bookmarks')
1656 1655 repo = pullop.repo
1657 1656 remotebookmarks = pullop.remotebookmarks
1658 1657 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1659 1658 pullop.remote.url(),
1660 1659 pullop.gettransaction,
1661 1660 explicit=pullop.explicitbookmarks)
1662 1661
1663 1662 def _pullobsolete(pullop):
1664 1663 """utility function to pull obsolete markers from a remote
1665 1664
1666 1665 The `gettransaction` is function that return the pull transaction, creating
1667 1666 one if necessary. We return the transaction to inform the calling code that
1668 1667 a new transaction have been created (when applicable).
1669 1668
1670 1669 Exists mostly to allow overriding for experimentation purpose"""
1671 1670 if 'obsmarkers' in pullop.stepsdone:
1672 1671 return
1673 1672 pullop.stepsdone.add('obsmarkers')
1674 1673 tr = None
1675 1674 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1676 1675 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1677 1676 remoteobs = pullop.remote.listkeys('obsolete')
1678 1677 if 'dump0' in remoteobs:
1679 1678 tr = pullop.gettransaction()
1680 1679 markers = []
1681 1680 for key in sorted(remoteobs, reverse=True):
1682 1681 if key.startswith('dump'):
1683 1682 data = util.b85decode(remoteobs[key])
1684 1683 version, newmarks = obsolete._readmarkers(data)
1685 1684 markers += newmarks
1686 1685 if markers:
1687 1686 pullop.repo.obsstore.add(tr, markers)
1688 1687 pullop.repo.invalidatevolatilesets()
1689 1688 return tr
1690 1689
1691 1690 def caps20to10(repo, role):
1692 1691 """return a set with appropriate options to use bundle20 during getbundle"""
1693 1692 caps = {'HG20'}
1694 1693 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
1695 1694 caps.add('bundle2=' + urlreq.quote(capsblob))
1696 1695 return caps
1697 1696
1698 1697 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1699 1698 getbundle2partsorder = []
1700 1699
1701 1700 # Mapping between step name and function
1702 1701 #
1703 1702 # This exists to help extensions wrap steps if necessary
1704 1703 getbundle2partsmapping = {}
1705 1704
1706 1705 def getbundle2partsgenerator(stepname, idx=None):
1707 1706 """decorator for function generating bundle2 part for getbundle
1708 1707
1709 1708 The function is added to the step -> function mapping and appended to the
1710 1709 list of steps. Beware that decorated functions will be added in order
1711 1710 (this may matter).
1712 1711
1713 1712 You can only use this decorator for new steps, if you want to wrap a step
1714 1713 from an extension, attack the getbundle2partsmapping dictionary directly."""
1715 1714 def dec(func):
1716 1715 assert stepname not in getbundle2partsmapping
1717 1716 getbundle2partsmapping[stepname] = func
1718 1717 if idx is None:
1719 1718 getbundle2partsorder.append(stepname)
1720 1719 else:
1721 1720 getbundle2partsorder.insert(idx, stepname)
1722 1721 return func
1723 1722 return dec
1724 1723
1725 1724 def bundle2requested(bundlecaps):
1726 1725 if bundlecaps is not None:
1727 1726 return any(cap.startswith('HG2') for cap in bundlecaps)
1728 1727 return False
1729 1728
1730 1729 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1731 1730 **kwargs):
1732 1731 """Return chunks constituting a bundle's raw data.
1733 1732
1734 1733 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1735 1734 passed.
1736 1735
1737 1736 Returns a 2-tuple of a dict with metadata about the generated bundle
1738 1737 and an iterator over raw chunks (of varying sizes).
1739 1738 """
1740 1739 kwargs = pycompat.byteskwargs(kwargs)
1741 1740 info = {}
1742 1741 usebundle2 = bundle2requested(bundlecaps)
1743 1742 # bundle10 case
1744 1743 if not usebundle2:
1745 1744 if bundlecaps and not kwargs.get('cg', True):
1746 1745 raise ValueError(_('request for bundle10 must include changegroup'))
1747 1746
1748 1747 if kwargs:
1749 1748 raise ValueError(_('unsupported getbundle arguments: %s')
1750 1749 % ', '.join(sorted(kwargs.keys())))
1751 1750 outgoing = _computeoutgoing(repo, heads, common)
1752 1751 info['bundleversion'] = 1
1753 1752 return info, changegroup.makestream(repo, outgoing, '01', source,
1754 1753 bundlecaps=bundlecaps)
1755 1754
1756 1755 # bundle20 case
1757 1756 info['bundleversion'] = 2
1758 1757 b2caps = {}
1759 1758 for bcaps in bundlecaps:
1760 1759 if bcaps.startswith('bundle2='):
1761 1760 blob = urlreq.unquote(bcaps[len('bundle2='):])
1762 1761 b2caps.update(bundle2.decodecaps(blob))
1763 1762 bundler = bundle2.bundle20(repo.ui, b2caps)
1764 1763
1765 1764 kwargs['heads'] = heads
1766 1765 kwargs['common'] = common
1767 1766
1768 1767 for name in getbundle2partsorder:
1769 1768 func = getbundle2partsmapping[name]
1770 1769 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1771 1770 **pycompat.strkwargs(kwargs))
1772 1771
1773 1772 info['prefercompressed'] = bundler.prefercompressed
1774 1773
1775 1774 return info, bundler.getchunks()
1776 1775
1777 1776 @getbundle2partsgenerator('stream2')
1778 1777 def _getbundlestream2(bundler, repo, source, bundlecaps=None,
1779 1778 b2caps=None, heads=None, common=None, **kwargs):
1780 1779 if not kwargs.get('stream', False):
1781 1780 return
1782 1781
1783 1782 if not streamclone.allowservergeneration(repo):
1784 1783 raise error.Abort(_('stream data requested but server does not allow '
1785 1784 'this feature'),
1786 1785 hint=_('well-behaved clients should not be '
1787 1786 'requesting stream data from servers not '
1788 1787 'advertising it; the client may be buggy'))
1789 1788
1790 1789 # Stream clones don't compress well. And compression undermines a
1791 1790 # goal of stream clones, which is to be fast. Communicate the desire
1792 1791 # to avoid compression to consumers of the bundle.
1793 1792 bundler.prefercompressed = False
1794 1793
1795 1794 filecount, bytecount, it = streamclone.generatev2(repo)
1796 1795 requirements = _formatrequirementsspec(repo.requirements)
1797 1796 part = bundler.newpart('stream2', data=it)
1798 1797 part.addparam('bytecount', '%d' % bytecount, mandatory=True)
1799 1798 part.addparam('filecount', '%d' % filecount, mandatory=True)
1800 1799 part.addparam('requirements', requirements, mandatory=True)
1801 1800
1802 1801 @getbundle2partsgenerator('changegroup')
1803 1802 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1804 1803 b2caps=None, heads=None, common=None, **kwargs):
1805 1804 """add a changegroup part to the requested bundle"""
1806 1805 cgstream = None
1807 1806 if kwargs.get(r'cg', True):
1808 1807 # build changegroup bundle here.
1809 1808 version = '01'
1810 1809 cgversions = b2caps.get('changegroup')
1811 1810 if cgversions: # 3.1 and 3.2 ship with an empty value
1812 1811 cgversions = [v for v in cgversions
1813 1812 if v in changegroup.supportedoutgoingversions(repo)]
1814 1813 if not cgversions:
1815 1814 raise ValueError(_('no common changegroup version'))
1816 1815 version = max(cgversions)
1817 1816 outgoing = _computeoutgoing(repo, heads, common)
1818 1817 if outgoing.missing:
1819 1818 cgstream = changegroup.makestream(repo, outgoing, version, source,
1820 1819 bundlecaps=bundlecaps)
1821 1820
1822 1821 if cgstream:
1823 1822 part = bundler.newpart('changegroup', data=cgstream)
1824 1823 if cgversions:
1825 1824 part.addparam('version', version)
1826 1825 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1827 1826 mandatory=False)
1828 1827 if 'treemanifest' in repo.requirements:
1829 1828 part.addparam('treemanifest', '1')
1830 1829
1831 1830 @getbundle2partsgenerator('bookmarks')
1832 1831 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
1833 1832 b2caps=None, **kwargs):
1834 1833 """add a bookmark part to the requested bundle"""
1835 1834 if not kwargs.get(r'bookmarks', False):
1836 1835 return
1837 1836 if 'bookmarks' not in b2caps:
1838 1837 raise ValueError(_('no common bookmarks exchange method'))
1839 1838 books = bookmod.listbinbookmarks(repo)
1840 1839 data = bookmod.binaryencode(books)
1841 1840 if data:
1842 1841 bundler.newpart('bookmarks', data=data)
1843 1842
1844 1843 @getbundle2partsgenerator('listkeys')
1845 1844 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1846 1845 b2caps=None, **kwargs):
1847 1846 """add parts containing listkeys namespaces to the requested bundle"""
1848 1847 listkeys = kwargs.get(r'listkeys', ())
1849 1848 for namespace in listkeys:
1850 1849 part = bundler.newpart('listkeys')
1851 1850 part.addparam('namespace', namespace)
1852 1851 keys = repo.listkeys(namespace).items()
1853 1852 part.data = pushkey.encodekeys(keys)
1854 1853
1855 1854 @getbundle2partsgenerator('obsmarkers')
1856 1855 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1857 1856 b2caps=None, heads=None, **kwargs):
1858 1857 """add an obsolescence markers part to the requested bundle"""
1859 1858 if kwargs.get(r'obsmarkers', False):
1860 1859 if heads is None:
1861 1860 heads = repo.heads()
1862 1861 subset = [c.node() for c in repo.set('::%ln', heads)]
1863 1862 markers = repo.obsstore.relevantmarkers(subset)
1864 1863 markers = sorted(markers)
1865 1864 bundle2.buildobsmarkerspart(bundler, markers)
1866 1865
1867 1866 @getbundle2partsgenerator('phases')
1868 1867 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1869 1868 b2caps=None, heads=None, **kwargs):
1870 1869 """add phase heads part to the requested bundle"""
1871 1870 if kwargs.get(r'phases', False):
1872 1871 if not 'heads' in b2caps.get('phases'):
1873 1872 raise ValueError(_('no common phases exchange method'))
1874 1873 if heads is None:
1875 1874 heads = repo.heads()
1876 1875
1877 1876 headsbyphase = collections.defaultdict(set)
1878 1877 if repo.publishing():
1879 1878 headsbyphase[phases.public] = heads
1880 1879 else:
1881 1880 # find the appropriate heads to move
1882 1881
1883 1882 phase = repo._phasecache.phase
1884 1883 node = repo.changelog.node
1885 1884 rev = repo.changelog.rev
1886 1885 for h in heads:
1887 1886 headsbyphase[phase(repo, rev(h))].add(h)
1888 1887 seenphases = list(headsbyphase.keys())
1889 1888
1890 1889 # We do not handle anything but public and draft phase for now)
1891 1890 if seenphases:
1892 1891 assert max(seenphases) <= phases.draft
1893 1892
1894 1893 # if client is pulling non-public changesets, we need to find
1895 1894 # intermediate public heads.
1896 1895 draftheads = headsbyphase.get(phases.draft, set())
1897 1896 if draftheads:
1898 1897 publicheads = headsbyphase.get(phases.public, set())
1899 1898
1900 1899 revset = 'heads(only(%ln, %ln) and public())'
1901 1900 extraheads = repo.revs(revset, draftheads, publicheads)
1902 1901 for r in extraheads:
1903 1902 headsbyphase[phases.public].add(node(r))
1904 1903
1905 1904 # transform data in a format used by the encoding function
1906 1905 phasemapping = []
1907 1906 for phase in phases.allphases:
1908 1907 phasemapping.append(sorted(headsbyphase[phase]))
1909 1908
1910 1909 # generate the actual part
1911 1910 phasedata = phases.binaryencode(phasemapping)
1912 1911 bundler.newpart('phase-heads', data=phasedata)
1913 1912
1914 1913 @getbundle2partsgenerator('hgtagsfnodes')
1915 1914 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1916 1915 b2caps=None, heads=None, common=None,
1917 1916 **kwargs):
1918 1917 """Transfer the .hgtags filenodes mapping.
1919 1918
1920 1919 Only values for heads in this bundle will be transferred.
1921 1920
1922 1921 The part data consists of pairs of 20 byte changeset node and .hgtags
1923 1922 filenodes raw values.
1924 1923 """
1925 1924 # Don't send unless:
1926 1925 # - changeset are being exchanged,
1927 1926 # - the client supports it.
1928 1927 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
1929 1928 return
1930 1929
1931 1930 outgoing = _computeoutgoing(repo, heads, common)
1932 1931 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1933 1932
1934 1933 def check_heads(repo, their_heads, context):
1935 1934 """check if the heads of a repo have been modified
1936 1935
1937 1936 Used by peer for unbundling.
1938 1937 """
1939 1938 heads = repo.heads()
1940 1939 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1941 1940 if not (their_heads == ['force'] or their_heads == heads or
1942 1941 their_heads == ['hashed', heads_hash]):
1943 1942 # someone else committed/pushed/unbundled while we
1944 1943 # were transferring data
1945 1944 raise error.PushRaced('repository changed while %s - '
1946 1945 'please try again' % context)
1947 1946
1948 1947 def unbundle(repo, cg, heads, source, url):
1949 1948 """Apply a bundle to a repo.
1950 1949
1951 1950 this function makes sure the repo is locked during the application and have
1952 1951 mechanism to check that no push race occurred between the creation of the
1953 1952 bundle and its application.
1954 1953
1955 1954 If the push was raced as PushRaced exception is raised."""
1956 1955 r = 0
1957 1956 # need a transaction when processing a bundle2 stream
1958 1957 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1959 1958 lockandtr = [None, None, None]
1960 1959 recordout = None
1961 1960 # quick fix for output mismatch with bundle2 in 3.4
1962 1961 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
1963 1962 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1964 1963 captureoutput = True
1965 1964 try:
1966 1965 # note: outside bundle1, 'heads' is expected to be empty and this
1967 1966 # 'check_heads' call wil be a no-op
1968 1967 check_heads(repo, heads, 'uploading changes')
1969 1968 # push can proceed
1970 1969 if not isinstance(cg, bundle2.unbundle20):
1971 1970 # legacy case: bundle1 (changegroup 01)
1972 1971 txnname = "\n".join([source, util.hidepassword(url)])
1973 1972 with repo.lock(), repo.transaction(txnname) as tr:
1974 1973 op = bundle2.applybundle(repo, cg, tr, source, url)
1975 1974 r = bundle2.combinechangegroupresults(op)
1976 1975 else:
1977 1976 r = None
1978 1977 try:
1979 1978 def gettransaction():
1980 1979 if not lockandtr[2]:
1981 1980 lockandtr[0] = repo.wlock()
1982 1981 lockandtr[1] = repo.lock()
1983 1982 lockandtr[2] = repo.transaction(source)
1984 1983 lockandtr[2].hookargs['source'] = source
1985 1984 lockandtr[2].hookargs['url'] = url
1986 1985 lockandtr[2].hookargs['bundle2'] = '1'
1987 1986 return lockandtr[2]
1988 1987
1989 1988 # Do greedy locking by default until we're satisfied with lazy
1990 1989 # locking.
1991 1990 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1992 1991 gettransaction()
1993 1992
1994 1993 op = bundle2.bundleoperation(repo, gettransaction,
1995 1994 captureoutput=captureoutput)
1996 1995 try:
1997 1996 op = bundle2.processbundle(repo, cg, op=op)
1998 1997 finally:
1999 1998 r = op.reply
2000 1999 if captureoutput and r is not None:
2001 2000 repo.ui.pushbuffer(error=True, subproc=True)
2002 2001 def recordout(output):
2003 2002 r.newpart('output', data=output, mandatory=False)
2004 2003 if lockandtr[2] is not None:
2005 2004 lockandtr[2].close()
2006 2005 except BaseException as exc:
2007 2006 exc.duringunbundle2 = True
2008 2007 if captureoutput and r is not None:
2009 2008 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2010 2009 def recordout(output):
2011 2010 part = bundle2.bundlepart('output', data=output,
2012 2011 mandatory=False)
2013 2012 parts.append(part)
2014 2013 raise
2015 2014 finally:
2016 2015 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2017 2016 if recordout is not None:
2018 2017 recordout(repo.ui.popbuffer())
2019 2018 return r
2020 2019
2021 2020 def _maybeapplyclonebundle(pullop):
2022 2021 """Apply a clone bundle from a remote, if possible."""
2023 2022
2024 2023 repo = pullop.repo
2025 2024 remote = pullop.remote
2026 2025
2027 2026 if not repo.ui.configbool('ui', 'clonebundles'):
2028 2027 return
2029 2028
2030 2029 # Only run if local repo is empty.
2031 2030 if len(repo):
2032 2031 return
2033 2032
2034 2033 if pullop.heads:
2035 2034 return
2036 2035
2037 2036 if not remote.capable('clonebundles'):
2038 2037 return
2039 2038
2040 2039 res = remote._call('clonebundles')
2041 2040
2042 2041 # If we call the wire protocol command, that's good enough to record the
2043 2042 # attempt.
2044 2043 pullop.clonebundleattempted = True
2045 2044
2046 2045 entries = parseclonebundlesmanifest(repo, res)
2047 2046 if not entries:
2048 2047 repo.ui.note(_('no clone bundles available on remote; '
2049 2048 'falling back to regular clone\n'))
2050 2049 return
2051 2050
2052 2051 entries = filterclonebundleentries(
2053 2052 repo, entries, streamclonerequested=pullop.streamclonerequested)
2054 2053
2055 2054 if not entries:
2056 2055 # There is a thundering herd concern here. However, if a server
2057 2056 # operator doesn't advertise bundles appropriate for its clients,
2058 2057 # they deserve what's coming. Furthermore, from a client's
2059 2058 # perspective, no automatic fallback would mean not being able to
2060 2059 # clone!
2061 2060 repo.ui.warn(_('no compatible clone bundles available on server; '
2062 2061 'falling back to regular clone\n'))
2063 2062 repo.ui.warn(_('(you may want to report this to the server '
2064 2063 'operator)\n'))
2065 2064 return
2066 2065
2067 2066 entries = sortclonebundleentries(repo.ui, entries)
2068 2067
2069 2068 url = entries[0]['URL']
2070 2069 repo.ui.status(_('applying clone bundle from %s\n') % url)
2071 2070 if trypullbundlefromurl(repo.ui, repo, url):
2072 2071 repo.ui.status(_('finished applying clone bundle\n'))
2073 2072 # Bundle failed.
2074 2073 #
2075 2074 # We abort by default to avoid the thundering herd of
2076 2075 # clients flooding a server that was expecting expensive
2077 2076 # clone load to be offloaded.
2078 2077 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2079 2078 repo.ui.warn(_('falling back to normal clone\n'))
2080 2079 else:
2081 2080 raise error.Abort(_('error applying bundle'),
2082 2081 hint=_('if this error persists, consider contacting '
2083 2082 'the server operator or disable clone '
2084 2083 'bundles via '
2085 2084 '"--config ui.clonebundles=false"'))
2086 2085
2087 2086 def parseclonebundlesmanifest(repo, s):
2088 2087 """Parses the raw text of a clone bundles manifest.
2089 2088
2090 2089 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2091 2090 to the URL and other keys are the attributes for the entry.
2092 2091 """
2093 2092 m = []
2094 2093 for line in s.splitlines():
2095 2094 fields = line.split()
2096 2095 if not fields:
2097 2096 continue
2098 2097 attrs = {'URL': fields[0]}
2099 2098 for rawattr in fields[1:]:
2100 2099 key, value = rawattr.split('=', 1)
2101 2100 key = urlreq.unquote(key)
2102 2101 value = urlreq.unquote(value)
2103 2102 attrs[key] = value
2104 2103
2105 2104 # Parse BUNDLESPEC into components. This makes client-side
2106 2105 # preferences easier to specify since you can prefer a single
2107 2106 # component of the BUNDLESPEC.
2108 2107 if key == 'BUNDLESPEC':
2109 2108 try:
2110 2109 comp, version, params = parsebundlespec(repo, value,
2111 2110 externalnames=True)
2112 2111 attrs['COMPRESSION'] = comp
2113 2112 attrs['VERSION'] = version
2114 2113 except error.InvalidBundleSpecification:
2115 2114 pass
2116 2115 except error.UnsupportedBundleSpecification:
2117 2116 pass
2118 2117
2119 2118 m.append(attrs)
2120 2119
2121 2120 return m
2122 2121
2123 2122 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2124 2123 """Remove incompatible clone bundle manifest entries.
2125 2124
2126 2125 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2127 2126 and returns a new list consisting of only the entries that this client
2128 2127 should be able to apply.
2129 2128
2130 2129 There is no guarantee we'll be able to apply all returned entries because
2131 2130 the metadata we use to filter on may be missing or wrong.
2132 2131 """
2133 2132 newentries = []
2134 2133 for entry in entries:
2135 2134 spec = entry.get('BUNDLESPEC')
2136 2135 if spec:
2137 2136 try:
2138 2137 comp, version, params = parsebundlespec(repo, spec, strict=True)
2139 2138
2140 2139 # If a stream clone was requested, filter out non-streamclone
2141 2140 # entries.
2142 2141 if streamclonerequested and (comp != 'UN' or version != 's1'):
2143 2142 repo.ui.debug('filtering %s because not a stream clone\n' %
2144 2143 entry['URL'])
2145 2144 continue
2146 2145
2147 2146 except error.InvalidBundleSpecification as e:
2148 2147 repo.ui.debug(str(e) + '\n')
2149 2148 continue
2150 2149 except error.UnsupportedBundleSpecification as e:
2151 2150 repo.ui.debug('filtering %s because unsupported bundle '
2152 2151 'spec: %s\n' % (
2153 2152 entry['URL'], util.forcebytestr(e)))
2154 2153 continue
2155 2154 # If we don't have a spec and requested a stream clone, we don't know
2156 2155 # what the entry is so don't attempt to apply it.
2157 2156 elif streamclonerequested:
2158 2157 repo.ui.debug('filtering %s because cannot determine if a stream '
2159 2158 'clone bundle\n' % entry['URL'])
2160 2159 continue
2161 2160
2162 2161 if 'REQUIRESNI' in entry and not sslutil.hassni:
2163 2162 repo.ui.debug('filtering %s because SNI not supported\n' %
2164 2163 entry['URL'])
2165 2164 continue
2166 2165
2167 2166 newentries.append(entry)
2168 2167
2169 2168 return newentries
2170 2169
2171 2170 class clonebundleentry(object):
2172 2171 """Represents an item in a clone bundles manifest.
2173 2172
2174 2173 This rich class is needed to support sorting since sorted() in Python 3
2175 2174 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2176 2175 won't work.
2177 2176 """
2178 2177
2179 2178 def __init__(self, value, prefers):
2180 2179 self.value = value
2181 2180 self.prefers = prefers
2182 2181
2183 2182 def _cmp(self, other):
2184 2183 for prefkey, prefvalue in self.prefers:
2185 2184 avalue = self.value.get(prefkey)
2186 2185 bvalue = other.value.get(prefkey)
2187 2186
2188 2187 # Special case for b missing attribute and a matches exactly.
2189 2188 if avalue is not None and bvalue is None and avalue == prefvalue:
2190 2189 return -1
2191 2190
2192 2191 # Special case for a missing attribute and b matches exactly.
2193 2192 if bvalue is not None and avalue is None and bvalue == prefvalue:
2194 2193 return 1
2195 2194
2196 2195 # We can't compare unless attribute present on both.
2197 2196 if avalue is None or bvalue is None:
2198 2197 continue
2199 2198
2200 2199 # Same values should fall back to next attribute.
2201 2200 if avalue == bvalue:
2202 2201 continue
2203 2202
2204 2203 # Exact matches come first.
2205 2204 if avalue == prefvalue:
2206 2205 return -1
2207 2206 if bvalue == prefvalue:
2208 2207 return 1
2209 2208
2210 2209 # Fall back to next attribute.
2211 2210 continue
2212 2211
2213 2212 # If we got here we couldn't sort by attributes and prefers. Fall
2214 2213 # back to index order.
2215 2214 return 0
2216 2215
2217 2216 def __lt__(self, other):
2218 2217 return self._cmp(other) < 0
2219 2218
2220 2219 def __gt__(self, other):
2221 2220 return self._cmp(other) > 0
2222 2221
2223 2222 def __eq__(self, other):
2224 2223 return self._cmp(other) == 0
2225 2224
2226 2225 def __le__(self, other):
2227 2226 return self._cmp(other) <= 0
2228 2227
2229 2228 def __ge__(self, other):
2230 2229 return self._cmp(other) >= 0
2231 2230
2232 2231 def __ne__(self, other):
2233 2232 return self._cmp(other) != 0
2234 2233
2235 2234 def sortclonebundleentries(ui, entries):
2236 2235 prefers = ui.configlist('ui', 'clonebundleprefers')
2237 2236 if not prefers:
2238 2237 return list(entries)
2239 2238
2240 2239 prefers = [p.split('=', 1) for p in prefers]
2241 2240
2242 2241 items = sorted(clonebundleentry(v, prefers) for v in entries)
2243 2242 return [i.value for i in items]
2244 2243
2245 2244 def trypullbundlefromurl(ui, repo, url):
2246 2245 """Attempt to apply a bundle from a URL."""
2247 2246 with repo.lock(), repo.transaction('bundleurl') as tr:
2248 2247 try:
2249 2248 fh = urlmod.open(ui, url)
2250 2249 cg = readbundle(ui, fh, 'stream')
2251 2250
2252 2251 if isinstance(cg, streamclone.streamcloneapplier):
2253 2252 cg.apply(repo)
2254 2253 else:
2255 2254 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2256 2255 return True
2257 2256 except urlerr.httperror as e:
2258 2257 ui.warn(_('HTTP error fetching bundle: %s\n') %
2259 2258 util.forcebytestr(e))
2260 2259 except urlerr.urlerror as e:
2261 2260 ui.warn(_('error fetching bundle: %s\n') %
2262 2261 util.forcebytestr(e.reason))
2263 2262
2264 2263 return False
General Comments 0
You need to be logged in to leave comments. Login now