##// END OF EJS Templates
push-discovery: extract the bookmark comparison logic in its own function...
Boris Feld -
r36956:8fd9b56e default
parent child Browse files
Show More
@@ -1,2263 +1,2272 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import errno
12 12 import hashlib
13 13
14 14 from .i18n import _
15 15 from .node import (
16 16 bin,
17 17 hex,
18 18 nullid,
19 19 )
20 20 from . import (
21 21 bookmarks as bookmod,
22 22 bundle2,
23 23 changegroup,
24 24 discovery,
25 25 error,
26 26 lock as lockmod,
27 27 logexchange,
28 28 obsolete,
29 29 phases,
30 30 pushkey,
31 31 pycompat,
32 32 scmutil,
33 33 sslutil,
34 34 streamclone,
35 35 url as urlmod,
36 36 util,
37 37 )
38 38
39 39 urlerr = util.urlerr
40 40 urlreq = util.urlreq
41 41
42 42 # Maps bundle version human names to changegroup versions.
43 43 _bundlespeccgversions = {'v1': '01',
44 44 'v2': '02',
45 45 'packed1': 's1',
46 46 'bundle2': '02', #legacy
47 47 }
48 48
49 49 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
50 50 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
51 51
52 52 def parsebundlespec(repo, spec, strict=True, externalnames=False):
53 53 """Parse a bundle string specification into parts.
54 54
55 55 Bundle specifications denote a well-defined bundle/exchange format.
56 56 The content of a given specification should not change over time in
57 57 order to ensure that bundles produced by a newer version of Mercurial are
58 58 readable from an older version.
59 59
60 60 The string currently has the form:
61 61
62 62 <compression>-<type>[;<parameter0>[;<parameter1>]]
63 63
64 64 Where <compression> is one of the supported compression formats
65 65 and <type> is (currently) a version string. A ";" can follow the type and
66 66 all text afterwards is interpreted as URI encoded, ";" delimited key=value
67 67 pairs.
68 68
69 69 If ``strict`` is True (the default) <compression> is required. Otherwise,
70 70 it is optional.
71 71
72 72 If ``externalnames`` is False (the default), the human-centric names will
73 73 be converted to their internal representation.
74 74
75 75 Returns a 3-tuple of (compression, version, parameters). Compression will
76 76 be ``None`` if not in strict mode and a compression isn't defined.
77 77
78 78 An ``InvalidBundleSpecification`` is raised when the specification is
79 79 not syntactically well formed.
80 80
81 81 An ``UnsupportedBundleSpecification`` is raised when the compression or
82 82 bundle type/version is not recognized.
83 83
84 84 Note: this function will likely eventually return a more complex data
85 85 structure, including bundle2 part information.
86 86 """
87 87 def parseparams(s):
88 88 if ';' not in s:
89 89 return s, {}
90 90
91 91 params = {}
92 92 version, paramstr = s.split(';', 1)
93 93
94 94 for p in paramstr.split(';'):
95 95 if '=' not in p:
96 96 raise error.InvalidBundleSpecification(
97 97 _('invalid bundle specification: '
98 98 'missing "=" in parameter: %s') % p)
99 99
100 100 key, value = p.split('=', 1)
101 101 key = urlreq.unquote(key)
102 102 value = urlreq.unquote(value)
103 103 params[key] = value
104 104
105 105 return version, params
106 106
107 107
108 108 if strict and '-' not in spec:
109 109 raise error.InvalidBundleSpecification(
110 110 _('invalid bundle specification; '
111 111 'must be prefixed with compression: %s') % spec)
112 112
113 113 if '-' in spec:
114 114 compression, version = spec.split('-', 1)
115 115
116 116 if compression not in util.compengines.supportedbundlenames:
117 117 raise error.UnsupportedBundleSpecification(
118 118 _('%s compression is not supported') % compression)
119 119
120 120 version, params = parseparams(version)
121 121
122 122 if version not in _bundlespeccgversions:
123 123 raise error.UnsupportedBundleSpecification(
124 124 _('%s is not a recognized bundle version') % version)
125 125 else:
126 126 # Value could be just the compression or just the version, in which
127 127 # case some defaults are assumed (but only when not in strict mode).
128 128 assert not strict
129 129
130 130 spec, params = parseparams(spec)
131 131
132 132 if spec in util.compengines.supportedbundlenames:
133 133 compression = spec
134 134 version = 'v1'
135 135 # Generaldelta repos require v2.
136 136 if 'generaldelta' in repo.requirements:
137 137 version = 'v2'
138 138 # Modern compression engines require v2.
139 139 if compression not in _bundlespecv1compengines:
140 140 version = 'v2'
141 141 elif spec in _bundlespeccgversions:
142 142 if spec == 'packed1':
143 143 compression = 'none'
144 144 else:
145 145 compression = 'bzip2'
146 146 version = spec
147 147 else:
148 148 raise error.UnsupportedBundleSpecification(
149 149 _('%s is not a recognized bundle specification') % spec)
150 150
151 151 # Bundle version 1 only supports a known set of compression engines.
152 152 if version == 'v1' and compression not in _bundlespecv1compengines:
153 153 raise error.UnsupportedBundleSpecification(
154 154 _('compression engine %s is not supported on v1 bundles') %
155 155 compression)
156 156
157 157 # The specification for packed1 can optionally declare the data formats
158 158 # required to apply it. If we see this metadata, compare against what the
159 159 # repo supports and error if the bundle isn't compatible.
160 160 if version == 'packed1' and 'requirements' in params:
161 161 requirements = set(params['requirements'].split(','))
162 162 missingreqs = requirements - repo.supportedformats
163 163 if missingreqs:
164 164 raise error.UnsupportedBundleSpecification(
165 165 _('missing support for repository features: %s') %
166 166 ', '.join(sorted(missingreqs)))
167 167
168 168 if not externalnames:
169 169 engine = util.compengines.forbundlename(compression)
170 170 compression = engine.bundletype()[1]
171 171 version = _bundlespeccgversions[version]
172 172 return compression, version, params
173 173
174 174 def readbundle(ui, fh, fname, vfs=None):
175 175 header = changegroup.readexactly(fh, 4)
176 176
177 177 alg = None
178 178 if not fname:
179 179 fname = "stream"
180 180 if not header.startswith('HG') and header.startswith('\0'):
181 181 fh = changegroup.headerlessfixup(fh, header)
182 182 header = "HG10"
183 183 alg = 'UN'
184 184 elif vfs:
185 185 fname = vfs.join(fname)
186 186
187 187 magic, version = header[0:2], header[2:4]
188 188
189 189 if magic != 'HG':
190 190 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
191 191 if version == '10':
192 192 if alg is None:
193 193 alg = changegroup.readexactly(fh, 2)
194 194 return changegroup.cg1unpacker(fh, alg)
195 195 elif version.startswith('2'):
196 196 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
197 197 elif version == 'S1':
198 198 return streamclone.streamcloneapplier(fh)
199 199 else:
200 200 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
201 201
202 202 def _formatrequirementsspec(requirements):
203 203 return urlreq.quote(','.join(sorted(requirements)))
204 204
205 205 def _formatrequirementsparams(requirements):
206 206 requirements = _formatrequirementsspec(requirements)
207 207 params = "%s%s" % (urlreq.quote("requirements="), requirements)
208 208 return params
209 209
210 210 def getbundlespec(ui, fh):
211 211 """Infer the bundlespec from a bundle file handle.
212 212
213 213 The input file handle is seeked and the original seek position is not
214 214 restored.
215 215 """
216 216 def speccompression(alg):
217 217 try:
218 218 return util.compengines.forbundletype(alg).bundletype()[0]
219 219 except KeyError:
220 220 return None
221 221
222 222 b = readbundle(ui, fh, None)
223 223 if isinstance(b, changegroup.cg1unpacker):
224 224 alg = b._type
225 225 if alg == '_truncatedBZ':
226 226 alg = 'BZ'
227 227 comp = speccompression(alg)
228 228 if not comp:
229 229 raise error.Abort(_('unknown compression algorithm: %s') % alg)
230 230 return '%s-v1' % comp
231 231 elif isinstance(b, bundle2.unbundle20):
232 232 if 'Compression' in b.params:
233 233 comp = speccompression(b.params['Compression'])
234 234 if not comp:
235 235 raise error.Abort(_('unknown compression algorithm: %s') % comp)
236 236 else:
237 237 comp = 'none'
238 238
239 239 version = None
240 240 for part in b.iterparts():
241 241 if part.type == 'changegroup':
242 242 version = part.params['version']
243 243 if version in ('01', '02'):
244 244 version = 'v2'
245 245 else:
246 246 raise error.Abort(_('changegroup version %s does not have '
247 247 'a known bundlespec') % version,
248 248 hint=_('try upgrading your Mercurial '
249 249 'client'))
250 250
251 251 if not version:
252 252 raise error.Abort(_('could not identify changegroup version in '
253 253 'bundle'))
254 254
255 255 return '%s-%s' % (comp, version)
256 256 elif isinstance(b, streamclone.streamcloneapplier):
257 257 requirements = streamclone.readbundle1header(fh)[2]
258 258 return 'none-packed1;%s' % _formatrequirementsparams(requirements)
259 259 else:
260 260 raise error.Abort(_('unknown bundle type: %s') % b)
261 261
262 262 def _computeoutgoing(repo, heads, common):
263 263 """Computes which revs are outgoing given a set of common
264 264 and a set of heads.
265 265
266 266 This is a separate function so extensions can have access to
267 267 the logic.
268 268
269 269 Returns a discovery.outgoing object.
270 270 """
271 271 cl = repo.changelog
272 272 if common:
273 273 hasnode = cl.hasnode
274 274 common = [n for n in common if hasnode(n)]
275 275 else:
276 276 common = [nullid]
277 277 if not heads:
278 278 heads = cl.heads()
279 279 return discovery.outgoing(repo, common, heads)
280 280
281 281 def _forcebundle1(op):
282 282 """return true if a pull/push must use bundle1
283 283
284 284 This function is used to allow testing of the older bundle version"""
285 285 ui = op.repo.ui
286 286 # The goal is this config is to allow developer to choose the bundle
287 287 # version used during exchanged. This is especially handy during test.
288 288 # Value is a list of bundle version to be picked from, highest version
289 289 # should be used.
290 290 #
291 291 # developer config: devel.legacy.exchange
292 292 exchange = ui.configlist('devel', 'legacy.exchange')
293 293 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
294 294 return forcebundle1 or not op.remote.capable('bundle2')
295 295
296 296 class pushoperation(object):
297 297 """A object that represent a single push operation
298 298
299 299 Its purpose is to carry push related state and very common operations.
300 300
301 301 A new pushoperation should be created at the beginning of each push and
302 302 discarded afterward.
303 303 """
304 304
305 305 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
306 306 bookmarks=(), pushvars=None):
307 307 # repo we push from
308 308 self.repo = repo
309 309 self.ui = repo.ui
310 310 # repo we push to
311 311 self.remote = remote
312 312 # force option provided
313 313 self.force = force
314 314 # revs to be pushed (None is "all")
315 315 self.revs = revs
316 316 # bookmark explicitly pushed
317 317 self.bookmarks = bookmarks
318 318 # allow push of new branch
319 319 self.newbranch = newbranch
320 320 # step already performed
321 321 # (used to check what steps have been already performed through bundle2)
322 322 self.stepsdone = set()
323 323 # Integer version of the changegroup push result
324 324 # - None means nothing to push
325 325 # - 0 means HTTP error
326 326 # - 1 means we pushed and remote head count is unchanged *or*
327 327 # we have outgoing changesets but refused to push
328 328 # - other values as described by addchangegroup()
329 329 self.cgresult = None
330 330 # Boolean value for the bookmark push
331 331 self.bkresult = None
332 332 # discover.outgoing object (contains common and outgoing data)
333 333 self.outgoing = None
334 334 # all remote topological heads before the push
335 335 self.remoteheads = None
336 336 # Details of the remote branch pre and post push
337 337 #
338 338 # mapping: {'branch': ([remoteheads],
339 339 # [newheads],
340 340 # [unsyncedheads],
341 341 # [discardedheads])}
342 342 # - branch: the branch name
343 343 # - remoteheads: the list of remote heads known locally
344 344 # None if the branch is new
345 345 # - newheads: the new remote heads (known locally) with outgoing pushed
346 346 # - unsyncedheads: the list of remote heads unknown locally.
347 347 # - discardedheads: the list of remote heads made obsolete by the push
348 348 self.pushbranchmap = None
349 349 # testable as a boolean indicating if any nodes are missing locally.
350 350 self.incoming = None
351 351 # summary of the remote phase situation
352 352 self.remotephases = None
353 353 # phases changes that must be pushed along side the changesets
354 354 self.outdatedphases = None
355 355 # phases changes that must be pushed if changeset push fails
356 356 self.fallbackoutdatedphases = None
357 357 # outgoing obsmarkers
358 358 self.outobsmarkers = set()
359 359 # outgoing bookmarks
360 360 self.outbookmarks = []
361 361 # transaction manager
362 362 self.trmanager = None
363 363 # map { pushkey partid -> callback handling failure}
364 364 # used to handle exception from mandatory pushkey part failure
365 365 self.pkfailcb = {}
366 366 # an iterable of pushvars or None
367 367 self.pushvars = pushvars
368 368
369 369 @util.propertycache
370 370 def futureheads(self):
371 371 """future remote heads if the changeset push succeeds"""
372 372 return self.outgoing.missingheads
373 373
374 374 @util.propertycache
375 375 def fallbackheads(self):
376 376 """future remote heads if the changeset push fails"""
377 377 if self.revs is None:
378 378 # not target to push, all common are relevant
379 379 return self.outgoing.commonheads
380 380 unfi = self.repo.unfiltered()
381 381 # I want cheads = heads(::missingheads and ::commonheads)
382 382 # (missingheads is revs with secret changeset filtered out)
383 383 #
384 384 # This can be expressed as:
385 385 # cheads = ( (missingheads and ::commonheads)
386 386 # + (commonheads and ::missingheads))"
387 387 # )
388 388 #
389 389 # while trying to push we already computed the following:
390 390 # common = (::commonheads)
391 391 # missing = ((commonheads::missingheads) - commonheads)
392 392 #
393 393 # We can pick:
394 394 # * missingheads part of common (::commonheads)
395 395 common = self.outgoing.common
396 396 nm = self.repo.changelog.nodemap
397 397 cheads = [node for node in self.revs if nm[node] in common]
398 398 # and
399 399 # * commonheads parents on missing
400 400 revset = unfi.set('%ln and parents(roots(%ln))',
401 401 self.outgoing.commonheads,
402 402 self.outgoing.missing)
403 403 cheads.extend(c.node() for c in revset)
404 404 return cheads
405 405
406 406 @property
407 407 def commonheads(self):
408 408 """set of all common heads after changeset bundle push"""
409 409 if self.cgresult:
410 410 return self.futureheads
411 411 else:
412 412 return self.fallbackheads
413 413
414 414 # mapping of message used when pushing bookmark
415 415 bookmsgmap = {'update': (_("updating bookmark %s\n"),
416 416 _('updating bookmark %s failed!\n')),
417 417 'export': (_("exporting bookmark %s\n"),
418 418 _('exporting bookmark %s failed!\n')),
419 419 'delete': (_("deleting remote bookmark %s\n"),
420 420 _('deleting remote bookmark %s failed!\n')),
421 421 }
422 422
423 423
424 424 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
425 425 opargs=None):
426 426 '''Push outgoing changesets (limited by revs) from a local
427 427 repository to remote. Return an integer:
428 428 - None means nothing to push
429 429 - 0 means HTTP error
430 430 - 1 means we pushed and remote head count is unchanged *or*
431 431 we have outgoing changesets but refused to push
432 432 - other values as described by addchangegroup()
433 433 '''
434 434 if opargs is None:
435 435 opargs = {}
436 436 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
437 437 **pycompat.strkwargs(opargs))
438 438 if pushop.remote.local():
439 439 missing = (set(pushop.repo.requirements)
440 440 - pushop.remote.local().supported)
441 441 if missing:
442 442 msg = _("required features are not"
443 443 " supported in the destination:"
444 444 " %s") % (', '.join(sorted(missing)))
445 445 raise error.Abort(msg)
446 446
447 447 if not pushop.remote.canpush():
448 448 raise error.Abort(_("destination does not support push"))
449 449
450 450 if not pushop.remote.capable('unbundle'):
451 451 raise error.Abort(_('cannot push: destination does not support the '
452 452 'unbundle wire protocol command'))
453 453
454 454 # get lock as we might write phase data
455 455 wlock = lock = None
456 456 try:
457 457 # bundle2 push may receive a reply bundle touching bookmarks or other
458 458 # things requiring the wlock. Take it now to ensure proper ordering.
459 459 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
460 460 if (not _forcebundle1(pushop)) and maypushback:
461 461 wlock = pushop.repo.wlock()
462 462 lock = pushop.repo.lock()
463 463 pushop.trmanager = transactionmanager(pushop.repo,
464 464 'push-response',
465 465 pushop.remote.url())
466 466 except IOError as err:
467 467 if err.errno != errno.EACCES:
468 468 raise
469 469 # source repo cannot be locked.
470 470 # We do not abort the push, but just disable the local phase
471 471 # synchronisation.
472 472 msg = 'cannot lock source repository: %s\n' % err
473 473 pushop.ui.debug(msg)
474 474
475 475 with wlock or util.nullcontextmanager(), \
476 476 lock or util.nullcontextmanager(), \
477 477 pushop.trmanager or util.nullcontextmanager():
478 478 pushop.repo.checkpush(pushop)
479 479 _pushdiscovery(pushop)
480 480 if not _forcebundle1(pushop):
481 481 _pushbundle2(pushop)
482 482 _pushchangeset(pushop)
483 483 _pushsyncphase(pushop)
484 484 _pushobsolete(pushop)
485 485 _pushbookmark(pushop)
486 486
487 487 return pushop
488 488
489 489 # list of steps to perform discovery before push
490 490 pushdiscoveryorder = []
491 491
492 492 # Mapping between step name and function
493 493 #
494 494 # This exists to help extensions wrap steps if necessary
495 495 pushdiscoverymapping = {}
496 496
497 497 def pushdiscovery(stepname):
498 498 """decorator for function performing discovery before push
499 499
500 500 The function is added to the step -> function mapping and appended to the
501 501 list of steps. Beware that decorated function will be added in order (this
502 502 may matter).
503 503
504 504 You can only use this decorator for a new step, if you want to wrap a step
505 505 from an extension, change the pushdiscovery dictionary directly."""
506 506 def dec(func):
507 507 assert stepname not in pushdiscoverymapping
508 508 pushdiscoverymapping[stepname] = func
509 509 pushdiscoveryorder.append(stepname)
510 510 return func
511 511 return dec
512 512
513 513 def _pushdiscovery(pushop):
514 514 """Run all discovery steps"""
515 515 for stepname in pushdiscoveryorder:
516 516 step = pushdiscoverymapping[stepname]
517 517 step(pushop)
518 518
519 519 @pushdiscovery('changeset')
520 520 def _pushdiscoverychangeset(pushop):
521 521 """discover the changeset that need to be pushed"""
522 522 fci = discovery.findcommonincoming
523 523 if pushop.revs:
524 524 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
525 525 ancestorsof=pushop.revs)
526 526 else:
527 527 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
528 528 common, inc, remoteheads = commoninc
529 529 fco = discovery.findcommonoutgoing
530 530 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
531 531 commoninc=commoninc, force=pushop.force)
532 532 pushop.outgoing = outgoing
533 533 pushop.remoteheads = remoteheads
534 534 pushop.incoming = inc
535 535
536 536 @pushdiscovery('phase')
537 537 def _pushdiscoveryphase(pushop):
538 538 """discover the phase that needs to be pushed
539 539
540 540 (computed for both success and failure case for changesets push)"""
541 541 outgoing = pushop.outgoing
542 542 unfi = pushop.repo.unfiltered()
543 543 remotephases = pushop.remote.listkeys('phases')
544 544 if (pushop.ui.configbool('ui', '_usedassubrepo')
545 545 and remotephases # server supports phases
546 546 and not pushop.outgoing.missing # no changesets to be pushed
547 547 and remotephases.get('publishing', False)):
548 548 # When:
549 549 # - this is a subrepo push
550 550 # - and remote support phase
551 551 # - and no changeset are to be pushed
552 552 # - and remote is publishing
553 553 # We may be in issue 3781 case!
554 554 # We drop the possible phase synchronisation done by
555 555 # courtesy to publish changesets possibly locally draft
556 556 # on the remote.
557 557 pushop.outdatedphases = []
558 558 pushop.fallbackoutdatedphases = []
559 559 return
560 560
561 561 pushop.remotephases = phases.remotephasessummary(pushop.repo,
562 562 pushop.fallbackheads,
563 563 remotephases)
564 564 droots = pushop.remotephases.draftroots
565 565
566 566 extracond = ''
567 567 if not pushop.remotephases.publishing:
568 568 extracond = ' and public()'
569 569 revset = 'heads((%%ln::%%ln) %s)' % extracond
570 570 # Get the list of all revs draft on remote by public here.
571 571 # XXX Beware that revset break if droots is not strictly
572 572 # XXX root we may want to ensure it is but it is costly
573 573 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
574 574 if not outgoing.missing:
575 575 future = fallback
576 576 else:
577 577 # adds changeset we are going to push as draft
578 578 #
579 579 # should not be necessary for publishing server, but because of an
580 580 # issue fixed in xxxxx we have to do it anyway.
581 581 fdroots = list(unfi.set('roots(%ln + %ln::)',
582 582 outgoing.missing, droots))
583 583 fdroots = [f.node() for f in fdroots]
584 584 future = list(unfi.set(revset, fdroots, pushop.futureheads))
585 585 pushop.outdatedphases = future
586 586 pushop.fallbackoutdatedphases = fallback
587 587
588 588 @pushdiscovery('obsmarker')
589 589 def _pushdiscoveryobsmarkers(pushop):
590 590 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
591 591 and pushop.repo.obsstore
592 592 and 'obsolete' in pushop.remote.listkeys('namespaces')):
593 593 repo = pushop.repo
594 594 # very naive computation, that can be quite expensive on big repo.
595 595 # However: evolution is currently slow on them anyway.
596 596 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
597 597 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
598 598
599 599 @pushdiscovery('bookmarks')
600 600 def _pushdiscoverybookmarks(pushop):
601 601 ui = pushop.ui
602 602 repo = pushop.repo.unfiltered()
603 603 remote = pushop.remote
604 604 ui.debug("checking for updated bookmarks\n")
605 605 ancestors = ()
606 606 if pushop.revs:
607 607 revnums = map(repo.changelog.rev, pushop.revs)
608 608 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
609 609 remotebookmark = remote.listkeys('bookmarks')
610 610
611 611 explicit = set([repo._bookmarks.expandname(bookmark)
612 612 for bookmark in pushop.bookmarks])
613 613
614 614 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
615 615 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
616 616
617 617 def safehex(x):
618 618 if x is None:
619 619 return x
620 620 return hex(x)
621 621
622 622 def hexifycompbookmarks(bookmarks):
623 623 return [(b, safehex(scid), safehex(dcid))
624 624 for (b, scid, dcid) in bookmarks]
625 625
626 626 comp = [hexifycompbookmarks(marks) for marks in comp]
627 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
628
629 def _processcompared(pushop, pushed, explicit, remotebms, comp):
630 """take decision on bookmark to pull from the remote bookmark
631
632 Exist to help extensions who want to alter this behavior.
633 """
627 634 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
628 635
636 repo = pushop.repo
637
629 638 for b, scid, dcid in advsrc:
630 639 if b in explicit:
631 640 explicit.remove(b)
632 if not ancestors or repo[scid].rev() in ancestors:
641 if not pushed or repo[scid].rev() in pushed:
633 642 pushop.outbookmarks.append((b, dcid, scid))
634 643 # search added bookmark
635 644 for b, scid, dcid in addsrc:
636 645 if b in explicit:
637 646 explicit.remove(b)
638 647 pushop.outbookmarks.append((b, '', scid))
639 648 # search for overwritten bookmark
640 649 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
641 650 if b in explicit:
642 651 explicit.remove(b)
643 652 pushop.outbookmarks.append((b, dcid, scid))
644 653 # search for bookmark to delete
645 654 for b, scid, dcid in adddst:
646 655 if b in explicit:
647 656 explicit.remove(b)
648 657 # treat as "deleted locally"
649 658 pushop.outbookmarks.append((b, dcid, ''))
650 659 # identical bookmarks shouldn't get reported
651 660 for b, scid, dcid in same:
652 661 if b in explicit:
653 662 explicit.remove(b)
654 663
655 664 if explicit:
656 665 explicit = sorted(explicit)
657 666 # we should probably list all of them
658 ui.warn(_('bookmark %s does not exist on the local '
659 'or remote repository!\n') % explicit[0])
667 pushop.ui.warn(_('bookmark %s does not exist on the local '
668 'or remote repository!\n') % explicit[0])
660 669 pushop.bkresult = 2
661 670
662 671 pushop.outbookmarks.sort()
663 672
664 673 def _pushcheckoutgoing(pushop):
665 674 outgoing = pushop.outgoing
666 675 unfi = pushop.repo.unfiltered()
667 676 if not outgoing.missing:
668 677 # nothing to push
669 678 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
670 679 return False
671 680 # something to push
672 681 if not pushop.force:
673 682 # if repo.obsstore == False --> no obsolete
674 683 # then, save the iteration
675 684 if unfi.obsstore:
676 685 # this message are here for 80 char limit reason
677 686 mso = _("push includes obsolete changeset: %s!")
678 687 mspd = _("push includes phase-divergent changeset: %s!")
679 688 mscd = _("push includes content-divergent changeset: %s!")
680 689 mst = {"orphan": _("push includes orphan changeset: %s!"),
681 690 "phase-divergent": mspd,
682 691 "content-divergent": mscd}
683 692 # If we are to push if there is at least one
684 693 # obsolete or unstable changeset in missing, at
685 694 # least one of the missinghead will be obsolete or
686 695 # unstable. So checking heads only is ok
687 696 for node in outgoing.missingheads:
688 697 ctx = unfi[node]
689 698 if ctx.obsolete():
690 699 raise error.Abort(mso % ctx)
691 700 elif ctx.isunstable():
692 701 # TODO print more than one instability in the abort
693 702 # message
694 703 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
695 704
696 705 discovery.checkheads(pushop)
697 706 return True
698 707
699 708 # List of names of steps to perform for an outgoing bundle2, order matters.
700 709 b2partsgenorder = []
701 710
702 711 # Mapping between step name and function
703 712 #
704 713 # This exists to help extensions wrap steps if necessary
705 714 b2partsgenmapping = {}
706 715
707 716 def b2partsgenerator(stepname, idx=None):
708 717 """decorator for function generating bundle2 part
709 718
710 719 The function is added to the step -> function mapping and appended to the
711 720 list of steps. Beware that decorated functions will be added in order
712 721 (this may matter).
713 722
714 723 You can only use this decorator for new steps, if you want to wrap a step
715 724 from an extension, attack the b2partsgenmapping dictionary directly."""
716 725 def dec(func):
717 726 assert stepname not in b2partsgenmapping
718 727 b2partsgenmapping[stepname] = func
719 728 if idx is None:
720 729 b2partsgenorder.append(stepname)
721 730 else:
722 731 b2partsgenorder.insert(idx, stepname)
723 732 return func
724 733 return dec
725 734
726 735 def _pushb2ctxcheckheads(pushop, bundler):
727 736 """Generate race condition checking parts
728 737
729 738 Exists as an independent function to aid extensions
730 739 """
731 740 # * 'force' do not check for push race,
732 741 # * if we don't push anything, there are nothing to check.
733 742 if not pushop.force and pushop.outgoing.missingheads:
734 743 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
735 744 emptyremote = pushop.pushbranchmap is None
736 745 if not allowunrelated or emptyremote:
737 746 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
738 747 else:
739 748 affected = set()
740 749 for branch, heads in pushop.pushbranchmap.iteritems():
741 750 remoteheads, newheads, unsyncedheads, discardedheads = heads
742 751 if remoteheads is not None:
743 752 remote = set(remoteheads)
744 753 affected |= set(discardedheads) & remote
745 754 affected |= remote - set(newheads)
746 755 if affected:
747 756 data = iter(sorted(affected))
748 757 bundler.newpart('check:updated-heads', data=data)
749 758
750 759 def _pushing(pushop):
751 760 """return True if we are pushing anything"""
752 761 return bool(pushop.outgoing.missing
753 762 or pushop.outdatedphases
754 763 or pushop.outobsmarkers
755 764 or pushop.outbookmarks)
756 765
757 766 @b2partsgenerator('check-bookmarks')
758 767 def _pushb2checkbookmarks(pushop, bundler):
759 768 """insert bookmark move checking"""
760 769 if not _pushing(pushop) or pushop.force:
761 770 return
762 771 b2caps = bundle2.bundle2caps(pushop.remote)
763 772 hasbookmarkcheck = 'bookmarks' in b2caps
764 773 if not (pushop.outbookmarks and hasbookmarkcheck):
765 774 return
766 775 data = []
767 776 for book, old, new in pushop.outbookmarks:
768 777 old = bin(old)
769 778 data.append((book, old))
770 779 checkdata = bookmod.binaryencode(data)
771 780 bundler.newpart('check:bookmarks', data=checkdata)
772 781
773 782 @b2partsgenerator('check-phases')
774 783 def _pushb2checkphases(pushop, bundler):
775 784 """insert phase move checking"""
776 785 if not _pushing(pushop) or pushop.force:
777 786 return
778 787 b2caps = bundle2.bundle2caps(pushop.remote)
779 788 hasphaseheads = 'heads' in b2caps.get('phases', ())
780 789 if pushop.remotephases is not None and hasphaseheads:
781 790 # check that the remote phase has not changed
782 791 checks = [[] for p in phases.allphases]
783 792 checks[phases.public].extend(pushop.remotephases.publicheads)
784 793 checks[phases.draft].extend(pushop.remotephases.draftroots)
785 794 if any(checks):
786 795 for nodes in checks:
787 796 nodes.sort()
788 797 checkdata = phases.binaryencode(checks)
789 798 bundler.newpart('check:phases', data=checkdata)
790 799
791 800 @b2partsgenerator('changeset')
792 801 def _pushb2ctx(pushop, bundler):
793 802 """handle changegroup push through bundle2
794 803
795 804 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
796 805 """
797 806 if 'changesets' in pushop.stepsdone:
798 807 return
799 808 pushop.stepsdone.add('changesets')
800 809 # Send known heads to the server for race detection.
801 810 if not _pushcheckoutgoing(pushop):
802 811 return
803 812 pushop.repo.prepushoutgoinghooks(pushop)
804 813
805 814 _pushb2ctxcheckheads(pushop, bundler)
806 815
807 816 b2caps = bundle2.bundle2caps(pushop.remote)
808 817 version = '01'
809 818 cgversions = b2caps.get('changegroup')
810 819 if cgversions: # 3.1 and 3.2 ship with an empty value
811 820 cgversions = [v for v in cgversions
812 821 if v in changegroup.supportedoutgoingversions(
813 822 pushop.repo)]
814 823 if not cgversions:
815 824 raise ValueError(_('no common changegroup version'))
816 825 version = max(cgversions)
817 826 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
818 827 'push')
819 828 cgpart = bundler.newpart('changegroup', data=cgstream)
820 829 if cgversions:
821 830 cgpart.addparam('version', version)
822 831 if 'treemanifest' in pushop.repo.requirements:
823 832 cgpart.addparam('treemanifest', '1')
824 833 def handlereply(op):
825 834 """extract addchangegroup returns from server reply"""
826 835 cgreplies = op.records.getreplies(cgpart.id)
827 836 assert len(cgreplies['changegroup']) == 1
828 837 pushop.cgresult = cgreplies['changegroup'][0]['return']
829 838 return handlereply
830 839
831 840 @b2partsgenerator('phase')
832 841 def _pushb2phases(pushop, bundler):
833 842 """handle phase push through bundle2"""
834 843 if 'phases' in pushop.stepsdone:
835 844 return
836 845 b2caps = bundle2.bundle2caps(pushop.remote)
837 846 ui = pushop.repo.ui
838 847
839 848 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
840 849 haspushkey = 'pushkey' in b2caps
841 850 hasphaseheads = 'heads' in b2caps.get('phases', ())
842 851
843 852 if hasphaseheads and not legacyphase:
844 853 return _pushb2phaseheads(pushop, bundler)
845 854 elif haspushkey:
846 855 return _pushb2phasespushkey(pushop, bundler)
847 856
848 857 def _pushb2phaseheads(pushop, bundler):
849 858 """push phase information through a bundle2 - binary part"""
850 859 pushop.stepsdone.add('phases')
851 860 if pushop.outdatedphases:
852 861 updates = [[] for p in phases.allphases]
853 862 updates[0].extend(h.node() for h in pushop.outdatedphases)
854 863 phasedata = phases.binaryencode(updates)
855 864 bundler.newpart('phase-heads', data=phasedata)
856 865
857 866 def _pushb2phasespushkey(pushop, bundler):
858 867 """push phase information through a bundle2 - pushkey part"""
859 868 pushop.stepsdone.add('phases')
860 869 part2node = []
861 870
862 871 def handlefailure(pushop, exc):
863 872 targetid = int(exc.partid)
864 873 for partid, node in part2node:
865 874 if partid == targetid:
866 875 raise error.Abort(_('updating %s to public failed') % node)
867 876
868 877 enc = pushkey.encode
869 878 for newremotehead in pushop.outdatedphases:
870 879 part = bundler.newpart('pushkey')
871 880 part.addparam('namespace', enc('phases'))
872 881 part.addparam('key', enc(newremotehead.hex()))
873 882 part.addparam('old', enc('%d' % phases.draft))
874 883 part.addparam('new', enc('%d' % phases.public))
875 884 part2node.append((part.id, newremotehead))
876 885 pushop.pkfailcb[part.id] = handlefailure
877 886
878 887 def handlereply(op):
879 888 for partid, node in part2node:
880 889 partrep = op.records.getreplies(partid)
881 890 results = partrep['pushkey']
882 891 assert len(results) <= 1
883 892 msg = None
884 893 if not results:
885 894 msg = _('server ignored update of %s to public!\n') % node
886 895 elif not int(results[0]['return']):
887 896 msg = _('updating %s to public failed!\n') % node
888 897 if msg is not None:
889 898 pushop.ui.warn(msg)
890 899 return handlereply
891 900
892 901 @b2partsgenerator('obsmarkers')
893 902 def _pushb2obsmarkers(pushop, bundler):
894 903 if 'obsmarkers' in pushop.stepsdone:
895 904 return
896 905 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
897 906 if obsolete.commonversion(remoteversions) is None:
898 907 return
899 908 pushop.stepsdone.add('obsmarkers')
900 909 if pushop.outobsmarkers:
901 910 markers = sorted(pushop.outobsmarkers)
902 911 bundle2.buildobsmarkerspart(bundler, markers)
903 912
904 913 @b2partsgenerator('bookmarks')
905 914 def _pushb2bookmarks(pushop, bundler):
906 915 """handle bookmark push through bundle2"""
907 916 if 'bookmarks' in pushop.stepsdone:
908 917 return
909 918 b2caps = bundle2.bundle2caps(pushop.remote)
910 919
911 920 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
912 921 legacybooks = 'bookmarks' in legacy
913 922
914 923 if not legacybooks and 'bookmarks' in b2caps:
915 924 return _pushb2bookmarkspart(pushop, bundler)
916 925 elif 'pushkey' in b2caps:
917 926 return _pushb2bookmarkspushkey(pushop, bundler)
918 927
919 928 def _bmaction(old, new):
920 929 """small utility for bookmark pushing"""
921 930 if not old:
922 931 return 'export'
923 932 elif not new:
924 933 return 'delete'
925 934 return 'update'
926 935
927 936 def _pushb2bookmarkspart(pushop, bundler):
928 937 pushop.stepsdone.add('bookmarks')
929 938 if not pushop.outbookmarks:
930 939 return
931 940
932 941 allactions = []
933 942 data = []
934 943 for book, old, new in pushop.outbookmarks:
935 944 new = bin(new)
936 945 data.append((book, new))
937 946 allactions.append((book, _bmaction(old, new)))
938 947 checkdata = bookmod.binaryencode(data)
939 948 bundler.newpart('bookmarks', data=checkdata)
940 949
941 950 def handlereply(op):
942 951 ui = pushop.ui
943 952 # if success
944 953 for book, action in allactions:
945 954 ui.status(bookmsgmap[action][0] % book)
946 955
947 956 return handlereply
948 957
949 958 def _pushb2bookmarkspushkey(pushop, bundler):
950 959 pushop.stepsdone.add('bookmarks')
951 960 part2book = []
952 961 enc = pushkey.encode
953 962
954 963 def handlefailure(pushop, exc):
955 964 targetid = int(exc.partid)
956 965 for partid, book, action in part2book:
957 966 if partid == targetid:
958 967 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
959 968 # we should not be called for part we did not generated
960 969 assert False
961 970
962 971 for book, old, new in pushop.outbookmarks:
963 972 part = bundler.newpart('pushkey')
964 973 part.addparam('namespace', enc('bookmarks'))
965 974 part.addparam('key', enc(book))
966 975 part.addparam('old', enc(old))
967 976 part.addparam('new', enc(new))
968 977 action = 'update'
969 978 if not old:
970 979 action = 'export'
971 980 elif not new:
972 981 action = 'delete'
973 982 part2book.append((part.id, book, action))
974 983 pushop.pkfailcb[part.id] = handlefailure
975 984
976 985 def handlereply(op):
977 986 ui = pushop.ui
978 987 for partid, book, action in part2book:
979 988 partrep = op.records.getreplies(partid)
980 989 results = partrep['pushkey']
981 990 assert len(results) <= 1
982 991 if not results:
983 992 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
984 993 else:
985 994 ret = int(results[0]['return'])
986 995 if ret:
987 996 ui.status(bookmsgmap[action][0] % book)
988 997 else:
989 998 ui.warn(bookmsgmap[action][1] % book)
990 999 if pushop.bkresult is not None:
991 1000 pushop.bkresult = 1
992 1001 return handlereply
993 1002
994 1003 @b2partsgenerator('pushvars', idx=0)
995 1004 def _getbundlesendvars(pushop, bundler):
996 1005 '''send shellvars via bundle2'''
997 1006 pushvars = pushop.pushvars
998 1007 if pushvars:
999 1008 shellvars = {}
1000 1009 for raw in pushvars:
1001 1010 if '=' not in raw:
1002 1011 msg = ("unable to parse variable '%s', should follow "
1003 1012 "'KEY=VALUE' or 'KEY=' format")
1004 1013 raise error.Abort(msg % raw)
1005 1014 k, v = raw.split('=', 1)
1006 1015 shellvars[k] = v
1007 1016
1008 1017 part = bundler.newpart('pushvars')
1009 1018
1010 1019 for key, value in shellvars.iteritems():
1011 1020 part.addparam(key, value, mandatory=False)
1012 1021
1013 1022 def _pushbundle2(pushop):
1014 1023 """push data to the remote using bundle2
1015 1024
1016 1025 The only currently supported type of data is changegroup but this will
1017 1026 evolve in the future."""
1018 1027 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1019 1028 pushback = (pushop.trmanager
1020 1029 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1021 1030
1022 1031 # create reply capability
1023 1032 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1024 1033 allowpushback=pushback,
1025 1034 role='client'))
1026 1035 bundler.newpart('replycaps', data=capsblob)
1027 1036 replyhandlers = []
1028 1037 for partgenname in b2partsgenorder:
1029 1038 partgen = b2partsgenmapping[partgenname]
1030 1039 ret = partgen(pushop, bundler)
1031 1040 if callable(ret):
1032 1041 replyhandlers.append(ret)
1033 1042 # do not push if nothing to push
1034 1043 if bundler.nbparts <= 1:
1035 1044 return
1036 1045 stream = util.chunkbuffer(bundler.getchunks())
1037 1046 try:
1038 1047 try:
1039 1048 reply = pushop.remote.unbundle(
1040 1049 stream, ['force'], pushop.remote.url())
1041 1050 except error.BundleValueError as exc:
1042 1051 raise error.Abort(_('missing support for %s') % exc)
1043 1052 try:
1044 1053 trgetter = None
1045 1054 if pushback:
1046 1055 trgetter = pushop.trmanager.transaction
1047 1056 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1048 1057 except error.BundleValueError as exc:
1049 1058 raise error.Abort(_('missing support for %s') % exc)
1050 1059 except bundle2.AbortFromPart as exc:
1051 1060 pushop.ui.status(_('remote: %s\n') % exc)
1052 1061 if exc.hint is not None:
1053 1062 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1054 1063 raise error.Abort(_('push failed on remote'))
1055 1064 except error.PushkeyFailed as exc:
1056 1065 partid = int(exc.partid)
1057 1066 if partid not in pushop.pkfailcb:
1058 1067 raise
1059 1068 pushop.pkfailcb[partid](pushop, exc)
1060 1069 for rephand in replyhandlers:
1061 1070 rephand(op)
1062 1071
1063 1072 def _pushchangeset(pushop):
1064 1073 """Make the actual push of changeset bundle to remote repo"""
1065 1074 if 'changesets' in pushop.stepsdone:
1066 1075 return
1067 1076 pushop.stepsdone.add('changesets')
1068 1077 if not _pushcheckoutgoing(pushop):
1069 1078 return
1070 1079
1071 1080 # Should have verified this in push().
1072 1081 assert pushop.remote.capable('unbundle')
1073 1082
1074 1083 pushop.repo.prepushoutgoinghooks(pushop)
1075 1084 outgoing = pushop.outgoing
1076 1085 # TODO: get bundlecaps from remote
1077 1086 bundlecaps = None
1078 1087 # create a changegroup from local
1079 1088 if pushop.revs is None and not (outgoing.excluded
1080 1089 or pushop.repo.changelog.filteredrevs):
1081 1090 # push everything,
1082 1091 # use the fast path, no race possible on push
1083 1092 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1084 1093 fastpath=True, bundlecaps=bundlecaps)
1085 1094 else:
1086 1095 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1087 1096 'push', bundlecaps=bundlecaps)
1088 1097
1089 1098 # apply changegroup to remote
1090 1099 # local repo finds heads on server, finds out what
1091 1100 # revs it must push. once revs transferred, if server
1092 1101 # finds it has different heads (someone else won
1093 1102 # commit/push race), server aborts.
1094 1103 if pushop.force:
1095 1104 remoteheads = ['force']
1096 1105 else:
1097 1106 remoteheads = pushop.remoteheads
1098 1107 # ssh: return remote's addchangegroup()
1099 1108 # http: return remote's addchangegroup() or 0 for error
1100 1109 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1101 1110 pushop.repo.url())
1102 1111
1103 1112 def _pushsyncphase(pushop):
1104 1113 """synchronise phase information locally and remotely"""
1105 1114 cheads = pushop.commonheads
1106 1115 # even when we don't push, exchanging phase data is useful
1107 1116 remotephases = pushop.remote.listkeys('phases')
1108 1117 if (pushop.ui.configbool('ui', '_usedassubrepo')
1109 1118 and remotephases # server supports phases
1110 1119 and pushop.cgresult is None # nothing was pushed
1111 1120 and remotephases.get('publishing', False)):
1112 1121 # When:
1113 1122 # - this is a subrepo push
1114 1123 # - and remote support phase
1115 1124 # - and no changeset was pushed
1116 1125 # - and remote is publishing
1117 1126 # We may be in issue 3871 case!
1118 1127 # We drop the possible phase synchronisation done by
1119 1128 # courtesy to publish changesets possibly locally draft
1120 1129 # on the remote.
1121 1130 remotephases = {'publishing': 'True'}
1122 1131 if not remotephases: # old server or public only reply from non-publishing
1123 1132 _localphasemove(pushop, cheads)
1124 1133 # don't push any phase data as there is nothing to push
1125 1134 else:
1126 1135 ana = phases.analyzeremotephases(pushop.repo, cheads,
1127 1136 remotephases)
1128 1137 pheads, droots = ana
1129 1138 ### Apply remote phase on local
1130 1139 if remotephases.get('publishing', False):
1131 1140 _localphasemove(pushop, cheads)
1132 1141 else: # publish = False
1133 1142 _localphasemove(pushop, pheads)
1134 1143 _localphasemove(pushop, cheads, phases.draft)
1135 1144 ### Apply local phase on remote
1136 1145
1137 1146 if pushop.cgresult:
1138 1147 if 'phases' in pushop.stepsdone:
1139 1148 # phases already pushed though bundle2
1140 1149 return
1141 1150 outdated = pushop.outdatedphases
1142 1151 else:
1143 1152 outdated = pushop.fallbackoutdatedphases
1144 1153
1145 1154 pushop.stepsdone.add('phases')
1146 1155
1147 1156 # filter heads already turned public by the push
1148 1157 outdated = [c for c in outdated if c.node() not in pheads]
1149 1158 # fallback to independent pushkey command
1150 1159 for newremotehead in outdated:
1151 1160 r = pushop.remote.pushkey('phases',
1152 1161 newremotehead.hex(),
1153 1162 ('%d' % phases.draft),
1154 1163 ('%d' % phases.public))
1155 1164 if not r:
1156 1165 pushop.ui.warn(_('updating %s to public failed!\n')
1157 1166 % newremotehead)
1158 1167
1159 1168 def _localphasemove(pushop, nodes, phase=phases.public):
1160 1169 """move <nodes> to <phase> in the local source repo"""
1161 1170 if pushop.trmanager:
1162 1171 phases.advanceboundary(pushop.repo,
1163 1172 pushop.trmanager.transaction(),
1164 1173 phase,
1165 1174 nodes)
1166 1175 else:
1167 1176 # repo is not locked, do not change any phases!
1168 1177 # Informs the user that phases should have been moved when
1169 1178 # applicable.
1170 1179 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1171 1180 phasestr = phases.phasenames[phase]
1172 1181 if actualmoves:
1173 1182 pushop.ui.status(_('cannot lock source repo, skipping '
1174 1183 'local %s phase update\n') % phasestr)
1175 1184
1176 1185 def _pushobsolete(pushop):
1177 1186 """utility function to push obsolete markers to a remote"""
1178 1187 if 'obsmarkers' in pushop.stepsdone:
1179 1188 return
1180 1189 repo = pushop.repo
1181 1190 remote = pushop.remote
1182 1191 pushop.stepsdone.add('obsmarkers')
1183 1192 if pushop.outobsmarkers:
1184 1193 pushop.ui.debug('try to push obsolete markers to remote\n')
1185 1194 rslts = []
1186 1195 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1187 1196 for key in sorted(remotedata, reverse=True):
1188 1197 # reverse sort to ensure we end with dump0
1189 1198 data = remotedata[key]
1190 1199 rslts.append(remote.pushkey('obsolete', key, '', data))
1191 1200 if [r for r in rslts if not r]:
1192 1201 msg = _('failed to push some obsolete markers!\n')
1193 1202 repo.ui.warn(msg)
1194 1203
1195 1204 def _pushbookmark(pushop):
1196 1205 """Update bookmark position on remote"""
1197 1206 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1198 1207 return
1199 1208 pushop.stepsdone.add('bookmarks')
1200 1209 ui = pushop.ui
1201 1210 remote = pushop.remote
1202 1211
1203 1212 for b, old, new in pushop.outbookmarks:
1204 1213 action = 'update'
1205 1214 if not old:
1206 1215 action = 'export'
1207 1216 elif not new:
1208 1217 action = 'delete'
1209 1218 if remote.pushkey('bookmarks', b, old, new):
1210 1219 ui.status(bookmsgmap[action][0] % b)
1211 1220 else:
1212 1221 ui.warn(bookmsgmap[action][1] % b)
1213 1222 # discovery can have set the value form invalid entry
1214 1223 if pushop.bkresult is not None:
1215 1224 pushop.bkresult = 1
1216 1225
1217 1226 class pulloperation(object):
1218 1227 """A object that represent a single pull operation
1219 1228
1220 1229 It purpose is to carry pull related state and very common operation.
1221 1230
1222 1231 A new should be created at the beginning of each pull and discarded
1223 1232 afterward.
1224 1233 """
1225 1234
1226 1235 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1227 1236 remotebookmarks=None, streamclonerequested=None):
1228 1237 # repo we pull into
1229 1238 self.repo = repo
1230 1239 # repo we pull from
1231 1240 self.remote = remote
1232 1241 # revision we try to pull (None is "all")
1233 1242 self.heads = heads
1234 1243 # bookmark pulled explicitly
1235 1244 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1236 1245 for bookmark in bookmarks]
1237 1246 # do we force pull?
1238 1247 self.force = force
1239 1248 # whether a streaming clone was requested
1240 1249 self.streamclonerequested = streamclonerequested
1241 1250 # transaction manager
1242 1251 self.trmanager = None
1243 1252 # set of common changeset between local and remote before pull
1244 1253 self.common = None
1245 1254 # set of pulled head
1246 1255 self.rheads = None
1247 1256 # list of missing changeset to fetch remotely
1248 1257 self.fetch = None
1249 1258 # remote bookmarks data
1250 1259 self.remotebookmarks = remotebookmarks
1251 1260 # result of changegroup pulling (used as return code by pull)
1252 1261 self.cgresult = None
1253 1262 # list of step already done
1254 1263 self.stepsdone = set()
1255 1264 # Whether we attempted a clone from pre-generated bundles.
1256 1265 self.clonebundleattempted = False
1257 1266
1258 1267 @util.propertycache
1259 1268 def pulledsubset(self):
1260 1269 """heads of the set of changeset target by the pull"""
1261 1270 # compute target subset
1262 1271 if self.heads is None:
1263 1272 # We pulled every thing possible
1264 1273 # sync on everything common
1265 1274 c = set(self.common)
1266 1275 ret = list(self.common)
1267 1276 for n in self.rheads:
1268 1277 if n not in c:
1269 1278 ret.append(n)
1270 1279 return ret
1271 1280 else:
1272 1281 # We pulled a specific subset
1273 1282 # sync on this subset
1274 1283 return self.heads
1275 1284
1276 1285 @util.propertycache
1277 1286 def canusebundle2(self):
1278 1287 return not _forcebundle1(self)
1279 1288
1280 1289 @util.propertycache
1281 1290 def remotebundle2caps(self):
1282 1291 return bundle2.bundle2caps(self.remote)
1283 1292
1284 1293 def gettransaction(self):
1285 1294 # deprecated; talk to trmanager directly
1286 1295 return self.trmanager.transaction()
1287 1296
1288 1297 class transactionmanager(util.transactional):
1289 1298 """An object to manage the life cycle of a transaction
1290 1299
1291 1300 It creates the transaction on demand and calls the appropriate hooks when
1292 1301 closing the transaction."""
1293 1302 def __init__(self, repo, source, url):
1294 1303 self.repo = repo
1295 1304 self.source = source
1296 1305 self.url = url
1297 1306 self._tr = None
1298 1307
1299 1308 def transaction(self):
1300 1309 """Return an open transaction object, constructing if necessary"""
1301 1310 if not self._tr:
1302 1311 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1303 1312 self._tr = self.repo.transaction(trname)
1304 1313 self._tr.hookargs['source'] = self.source
1305 1314 self._tr.hookargs['url'] = self.url
1306 1315 return self._tr
1307 1316
1308 1317 def close(self):
1309 1318 """close transaction if created"""
1310 1319 if self._tr is not None:
1311 1320 self._tr.close()
1312 1321
1313 1322 def release(self):
1314 1323 """release transaction if created"""
1315 1324 if self._tr is not None:
1316 1325 self._tr.release()
1317 1326
1318 1327 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1319 1328 streamclonerequested=None):
1320 1329 """Fetch repository data from a remote.
1321 1330
1322 1331 This is the main function used to retrieve data from a remote repository.
1323 1332
1324 1333 ``repo`` is the local repository to clone into.
1325 1334 ``remote`` is a peer instance.
1326 1335 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1327 1336 default) means to pull everything from the remote.
1328 1337 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1329 1338 default, all remote bookmarks are pulled.
1330 1339 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1331 1340 initialization.
1332 1341 ``streamclonerequested`` is a boolean indicating whether a "streaming
1333 1342 clone" is requested. A "streaming clone" is essentially a raw file copy
1334 1343 of revlogs from the server. This only works when the local repository is
1335 1344 empty. The default value of ``None`` means to respect the server
1336 1345 configuration for preferring stream clones.
1337 1346
1338 1347 Returns the ``pulloperation`` created for this pull.
1339 1348 """
1340 1349 if opargs is None:
1341 1350 opargs = {}
1342 1351 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1343 1352 streamclonerequested=streamclonerequested,
1344 1353 **pycompat.strkwargs(opargs))
1345 1354
1346 1355 peerlocal = pullop.remote.local()
1347 1356 if peerlocal:
1348 1357 missing = set(peerlocal.requirements) - pullop.repo.supported
1349 1358 if missing:
1350 1359 msg = _("required features are not"
1351 1360 " supported in the destination:"
1352 1361 " %s") % (', '.join(sorted(missing)))
1353 1362 raise error.Abort(msg)
1354 1363
1355 1364 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1356 1365 with repo.wlock(), repo.lock(), pullop.trmanager:
1357 1366 # This should ideally be in _pullbundle2(). However, it needs to run
1358 1367 # before discovery to avoid extra work.
1359 1368 _maybeapplyclonebundle(pullop)
1360 1369 streamclone.maybeperformlegacystreamclone(pullop)
1361 1370 _pulldiscovery(pullop)
1362 1371 if pullop.canusebundle2:
1363 1372 _pullbundle2(pullop)
1364 1373 _pullchangeset(pullop)
1365 1374 _pullphase(pullop)
1366 1375 _pullbookmarks(pullop)
1367 1376 _pullobsolete(pullop)
1368 1377
1369 1378 # storing remotenames
1370 1379 if repo.ui.configbool('experimental', 'remotenames'):
1371 1380 logexchange.pullremotenames(repo, remote)
1372 1381
1373 1382 return pullop
1374 1383
1375 1384 # list of steps to perform discovery before pull
1376 1385 pulldiscoveryorder = []
1377 1386
1378 1387 # Mapping between step name and function
1379 1388 #
1380 1389 # This exists to help extensions wrap steps if necessary
1381 1390 pulldiscoverymapping = {}
1382 1391
1383 1392 def pulldiscovery(stepname):
1384 1393 """decorator for function performing discovery before pull
1385 1394
1386 1395 The function is added to the step -> function mapping and appended to the
1387 1396 list of steps. Beware that decorated function will be added in order (this
1388 1397 may matter).
1389 1398
1390 1399 You can only use this decorator for a new step, if you want to wrap a step
1391 1400 from an extension, change the pulldiscovery dictionary directly."""
1392 1401 def dec(func):
1393 1402 assert stepname not in pulldiscoverymapping
1394 1403 pulldiscoverymapping[stepname] = func
1395 1404 pulldiscoveryorder.append(stepname)
1396 1405 return func
1397 1406 return dec
1398 1407
1399 1408 def _pulldiscovery(pullop):
1400 1409 """Run all discovery steps"""
1401 1410 for stepname in pulldiscoveryorder:
1402 1411 step = pulldiscoverymapping[stepname]
1403 1412 step(pullop)
1404 1413
1405 1414 @pulldiscovery('b1:bookmarks')
1406 1415 def _pullbookmarkbundle1(pullop):
1407 1416 """fetch bookmark data in bundle1 case
1408 1417
1409 1418 If not using bundle2, we have to fetch bookmarks before changeset
1410 1419 discovery to reduce the chance and impact of race conditions."""
1411 1420 if pullop.remotebookmarks is not None:
1412 1421 return
1413 1422 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1414 1423 # all known bundle2 servers now support listkeys, but lets be nice with
1415 1424 # new implementation.
1416 1425 return
1417 1426 books = pullop.remote.listkeys('bookmarks')
1418 1427 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1419 1428
1420 1429
1421 1430 @pulldiscovery('changegroup')
1422 1431 def _pulldiscoverychangegroup(pullop):
1423 1432 """discovery phase for the pull
1424 1433
1425 1434 Current handle changeset discovery only, will change handle all discovery
1426 1435 at some point."""
1427 1436 tmp = discovery.findcommonincoming(pullop.repo,
1428 1437 pullop.remote,
1429 1438 heads=pullop.heads,
1430 1439 force=pullop.force)
1431 1440 common, fetch, rheads = tmp
1432 1441 nm = pullop.repo.unfiltered().changelog.nodemap
1433 1442 if fetch and rheads:
1434 1443 # If a remote heads is filtered locally, put in back in common.
1435 1444 #
1436 1445 # This is a hackish solution to catch most of "common but locally
1437 1446 # hidden situation". We do not performs discovery on unfiltered
1438 1447 # repository because it end up doing a pathological amount of round
1439 1448 # trip for w huge amount of changeset we do not care about.
1440 1449 #
1441 1450 # If a set of such "common but filtered" changeset exist on the server
1442 1451 # but are not including a remote heads, we'll not be able to detect it,
1443 1452 scommon = set(common)
1444 1453 for n in rheads:
1445 1454 if n in nm:
1446 1455 if n not in scommon:
1447 1456 common.append(n)
1448 1457 if set(rheads).issubset(set(common)):
1449 1458 fetch = []
1450 1459 pullop.common = common
1451 1460 pullop.fetch = fetch
1452 1461 pullop.rheads = rheads
1453 1462
1454 1463 def _pullbundle2(pullop):
1455 1464 """pull data using bundle2
1456 1465
1457 1466 For now, the only supported data are changegroup."""
1458 1467 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1459 1468
1460 1469 # make ui easier to access
1461 1470 ui = pullop.repo.ui
1462 1471
1463 1472 # At the moment we don't do stream clones over bundle2. If that is
1464 1473 # implemented then here's where the check for that will go.
1465 1474 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1466 1475
1467 1476 # declare pull perimeters
1468 1477 kwargs['common'] = pullop.common
1469 1478 kwargs['heads'] = pullop.heads or pullop.rheads
1470 1479
1471 1480 if streaming:
1472 1481 kwargs['cg'] = False
1473 1482 kwargs['stream'] = True
1474 1483 pullop.stepsdone.add('changegroup')
1475 1484 pullop.stepsdone.add('phases')
1476 1485
1477 1486 else:
1478 1487 # pulling changegroup
1479 1488 pullop.stepsdone.add('changegroup')
1480 1489
1481 1490 kwargs['cg'] = pullop.fetch
1482 1491
1483 1492 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1484 1493 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1485 1494 if (not legacyphase and hasbinaryphase):
1486 1495 kwargs['phases'] = True
1487 1496 pullop.stepsdone.add('phases')
1488 1497
1489 1498 if 'listkeys' in pullop.remotebundle2caps:
1490 1499 if 'phases' not in pullop.stepsdone:
1491 1500 kwargs['listkeys'] = ['phases']
1492 1501
1493 1502 bookmarksrequested = False
1494 1503 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1495 1504 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1496 1505
1497 1506 if pullop.remotebookmarks is not None:
1498 1507 pullop.stepsdone.add('request-bookmarks')
1499 1508
1500 1509 if ('request-bookmarks' not in pullop.stepsdone
1501 1510 and pullop.remotebookmarks is None
1502 1511 and not legacybookmark and hasbinarybook):
1503 1512 kwargs['bookmarks'] = True
1504 1513 bookmarksrequested = True
1505 1514
1506 1515 if 'listkeys' in pullop.remotebundle2caps:
1507 1516 if 'request-bookmarks' not in pullop.stepsdone:
1508 1517 # make sure to always includes bookmark data when migrating
1509 1518 # `hg incoming --bundle` to using this function.
1510 1519 pullop.stepsdone.add('request-bookmarks')
1511 1520 kwargs.setdefault('listkeys', []).append('bookmarks')
1512 1521
1513 1522 # If this is a full pull / clone and the server supports the clone bundles
1514 1523 # feature, tell the server whether we attempted a clone bundle. The
1515 1524 # presence of this flag indicates the client supports clone bundles. This
1516 1525 # will enable the server to treat clients that support clone bundles
1517 1526 # differently from those that don't.
1518 1527 if (pullop.remote.capable('clonebundles')
1519 1528 and pullop.heads is None and list(pullop.common) == [nullid]):
1520 1529 kwargs['cbattempted'] = pullop.clonebundleattempted
1521 1530
1522 1531 if streaming:
1523 1532 pullop.repo.ui.status(_('streaming all changes\n'))
1524 1533 elif not pullop.fetch:
1525 1534 pullop.repo.ui.status(_("no changes found\n"))
1526 1535 pullop.cgresult = 0
1527 1536 else:
1528 1537 if pullop.heads is None and list(pullop.common) == [nullid]:
1529 1538 pullop.repo.ui.status(_("requesting all changes\n"))
1530 1539 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1531 1540 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1532 1541 if obsolete.commonversion(remoteversions) is not None:
1533 1542 kwargs['obsmarkers'] = True
1534 1543 pullop.stepsdone.add('obsmarkers')
1535 1544 _pullbundle2extraprepare(pullop, kwargs)
1536 1545 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1537 1546 try:
1538 1547 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction)
1539 1548 op.modes['bookmarks'] = 'records'
1540 1549 bundle2.processbundle(pullop.repo, bundle, op=op)
1541 1550 except bundle2.AbortFromPart as exc:
1542 1551 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1543 1552 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1544 1553 except error.BundleValueError as exc:
1545 1554 raise error.Abort(_('missing support for %s') % exc)
1546 1555
1547 1556 if pullop.fetch:
1548 1557 pullop.cgresult = bundle2.combinechangegroupresults(op)
1549 1558
1550 1559 # processing phases change
1551 1560 for namespace, value in op.records['listkeys']:
1552 1561 if namespace == 'phases':
1553 1562 _pullapplyphases(pullop, value)
1554 1563
1555 1564 # processing bookmark update
1556 1565 if bookmarksrequested:
1557 1566 books = {}
1558 1567 for record in op.records['bookmarks']:
1559 1568 books[record['bookmark']] = record["node"]
1560 1569 pullop.remotebookmarks = books
1561 1570 else:
1562 1571 for namespace, value in op.records['listkeys']:
1563 1572 if namespace == 'bookmarks':
1564 1573 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1565 1574
1566 1575 # bookmark data were either already there or pulled in the bundle
1567 1576 if pullop.remotebookmarks is not None:
1568 1577 _pullbookmarks(pullop)
1569 1578
1570 1579 def _pullbundle2extraprepare(pullop, kwargs):
1571 1580 """hook function so that extensions can extend the getbundle call"""
1572 1581
1573 1582 def _pullchangeset(pullop):
1574 1583 """pull changeset from unbundle into the local repo"""
1575 1584 # We delay the open of the transaction as late as possible so we
1576 1585 # don't open transaction for nothing or you break future useful
1577 1586 # rollback call
1578 1587 if 'changegroup' in pullop.stepsdone:
1579 1588 return
1580 1589 pullop.stepsdone.add('changegroup')
1581 1590 if not pullop.fetch:
1582 1591 pullop.repo.ui.status(_("no changes found\n"))
1583 1592 pullop.cgresult = 0
1584 1593 return
1585 1594 tr = pullop.gettransaction()
1586 1595 if pullop.heads is None and list(pullop.common) == [nullid]:
1587 1596 pullop.repo.ui.status(_("requesting all changes\n"))
1588 1597 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1589 1598 # issue1320, avoid a race if remote changed after discovery
1590 1599 pullop.heads = pullop.rheads
1591 1600
1592 1601 if pullop.remote.capable('getbundle'):
1593 1602 # TODO: get bundlecaps from remote
1594 1603 cg = pullop.remote.getbundle('pull', common=pullop.common,
1595 1604 heads=pullop.heads or pullop.rheads)
1596 1605 elif pullop.heads is None:
1597 1606 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1598 1607 elif not pullop.remote.capable('changegroupsubset'):
1599 1608 raise error.Abort(_("partial pull cannot be done because "
1600 1609 "other repository doesn't support "
1601 1610 "changegroupsubset."))
1602 1611 else:
1603 1612 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1604 1613 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1605 1614 pullop.remote.url())
1606 1615 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1607 1616
1608 1617 def _pullphase(pullop):
1609 1618 # Get remote phases data from remote
1610 1619 if 'phases' in pullop.stepsdone:
1611 1620 return
1612 1621 remotephases = pullop.remote.listkeys('phases')
1613 1622 _pullapplyphases(pullop, remotephases)
1614 1623
1615 1624 def _pullapplyphases(pullop, remotephases):
1616 1625 """apply phase movement from observed remote state"""
1617 1626 if 'phases' in pullop.stepsdone:
1618 1627 return
1619 1628 pullop.stepsdone.add('phases')
1620 1629 publishing = bool(remotephases.get('publishing', False))
1621 1630 if remotephases and not publishing:
1622 1631 # remote is new and non-publishing
1623 1632 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1624 1633 pullop.pulledsubset,
1625 1634 remotephases)
1626 1635 dheads = pullop.pulledsubset
1627 1636 else:
1628 1637 # Remote is old or publishing all common changesets
1629 1638 # should be seen as public
1630 1639 pheads = pullop.pulledsubset
1631 1640 dheads = []
1632 1641 unfi = pullop.repo.unfiltered()
1633 1642 phase = unfi._phasecache.phase
1634 1643 rev = unfi.changelog.nodemap.get
1635 1644 public = phases.public
1636 1645 draft = phases.draft
1637 1646
1638 1647 # exclude changesets already public locally and update the others
1639 1648 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1640 1649 if pheads:
1641 1650 tr = pullop.gettransaction()
1642 1651 phases.advanceboundary(pullop.repo, tr, public, pheads)
1643 1652
1644 1653 # exclude changesets already draft locally and update the others
1645 1654 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1646 1655 if dheads:
1647 1656 tr = pullop.gettransaction()
1648 1657 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1649 1658
1650 1659 def _pullbookmarks(pullop):
1651 1660 """process the remote bookmark information to update the local one"""
1652 1661 if 'bookmarks' in pullop.stepsdone:
1653 1662 return
1654 1663 pullop.stepsdone.add('bookmarks')
1655 1664 repo = pullop.repo
1656 1665 remotebookmarks = pullop.remotebookmarks
1657 1666 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1658 1667 pullop.remote.url(),
1659 1668 pullop.gettransaction,
1660 1669 explicit=pullop.explicitbookmarks)
1661 1670
1662 1671 def _pullobsolete(pullop):
1663 1672 """utility function to pull obsolete markers from a remote
1664 1673
1665 1674 The `gettransaction` is function that return the pull transaction, creating
1666 1675 one if necessary. We return the transaction to inform the calling code that
1667 1676 a new transaction have been created (when applicable).
1668 1677
1669 1678 Exists mostly to allow overriding for experimentation purpose"""
1670 1679 if 'obsmarkers' in pullop.stepsdone:
1671 1680 return
1672 1681 pullop.stepsdone.add('obsmarkers')
1673 1682 tr = None
1674 1683 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1675 1684 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1676 1685 remoteobs = pullop.remote.listkeys('obsolete')
1677 1686 if 'dump0' in remoteobs:
1678 1687 tr = pullop.gettransaction()
1679 1688 markers = []
1680 1689 for key in sorted(remoteobs, reverse=True):
1681 1690 if key.startswith('dump'):
1682 1691 data = util.b85decode(remoteobs[key])
1683 1692 version, newmarks = obsolete._readmarkers(data)
1684 1693 markers += newmarks
1685 1694 if markers:
1686 1695 pullop.repo.obsstore.add(tr, markers)
1687 1696 pullop.repo.invalidatevolatilesets()
1688 1697 return tr
1689 1698
1690 1699 def caps20to10(repo, role):
1691 1700 """return a set with appropriate options to use bundle20 during getbundle"""
1692 1701 caps = {'HG20'}
1693 1702 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
1694 1703 caps.add('bundle2=' + urlreq.quote(capsblob))
1695 1704 return caps
1696 1705
1697 1706 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1698 1707 getbundle2partsorder = []
1699 1708
1700 1709 # Mapping between step name and function
1701 1710 #
1702 1711 # This exists to help extensions wrap steps if necessary
1703 1712 getbundle2partsmapping = {}
1704 1713
1705 1714 def getbundle2partsgenerator(stepname, idx=None):
1706 1715 """decorator for function generating bundle2 part for getbundle
1707 1716
1708 1717 The function is added to the step -> function mapping and appended to the
1709 1718 list of steps. Beware that decorated functions will be added in order
1710 1719 (this may matter).
1711 1720
1712 1721 You can only use this decorator for new steps, if you want to wrap a step
1713 1722 from an extension, attack the getbundle2partsmapping dictionary directly."""
1714 1723 def dec(func):
1715 1724 assert stepname not in getbundle2partsmapping
1716 1725 getbundle2partsmapping[stepname] = func
1717 1726 if idx is None:
1718 1727 getbundle2partsorder.append(stepname)
1719 1728 else:
1720 1729 getbundle2partsorder.insert(idx, stepname)
1721 1730 return func
1722 1731 return dec
1723 1732
1724 1733 def bundle2requested(bundlecaps):
1725 1734 if bundlecaps is not None:
1726 1735 return any(cap.startswith('HG2') for cap in bundlecaps)
1727 1736 return False
1728 1737
1729 1738 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1730 1739 **kwargs):
1731 1740 """Return chunks constituting a bundle's raw data.
1732 1741
1733 1742 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1734 1743 passed.
1735 1744
1736 1745 Returns a 2-tuple of a dict with metadata about the generated bundle
1737 1746 and an iterator over raw chunks (of varying sizes).
1738 1747 """
1739 1748 kwargs = pycompat.byteskwargs(kwargs)
1740 1749 info = {}
1741 1750 usebundle2 = bundle2requested(bundlecaps)
1742 1751 # bundle10 case
1743 1752 if not usebundle2:
1744 1753 if bundlecaps and not kwargs.get('cg', True):
1745 1754 raise ValueError(_('request for bundle10 must include changegroup'))
1746 1755
1747 1756 if kwargs:
1748 1757 raise ValueError(_('unsupported getbundle arguments: %s')
1749 1758 % ', '.join(sorted(kwargs.keys())))
1750 1759 outgoing = _computeoutgoing(repo, heads, common)
1751 1760 info['bundleversion'] = 1
1752 1761 return info, changegroup.makestream(repo, outgoing, '01', source,
1753 1762 bundlecaps=bundlecaps)
1754 1763
1755 1764 # bundle20 case
1756 1765 info['bundleversion'] = 2
1757 1766 b2caps = {}
1758 1767 for bcaps in bundlecaps:
1759 1768 if bcaps.startswith('bundle2='):
1760 1769 blob = urlreq.unquote(bcaps[len('bundle2='):])
1761 1770 b2caps.update(bundle2.decodecaps(blob))
1762 1771 bundler = bundle2.bundle20(repo.ui, b2caps)
1763 1772
1764 1773 kwargs['heads'] = heads
1765 1774 kwargs['common'] = common
1766 1775
1767 1776 for name in getbundle2partsorder:
1768 1777 func = getbundle2partsmapping[name]
1769 1778 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1770 1779 **pycompat.strkwargs(kwargs))
1771 1780
1772 1781 info['prefercompressed'] = bundler.prefercompressed
1773 1782
1774 1783 return info, bundler.getchunks()
1775 1784
1776 1785 @getbundle2partsgenerator('stream2')
1777 1786 def _getbundlestream2(bundler, repo, source, bundlecaps=None,
1778 1787 b2caps=None, heads=None, common=None, **kwargs):
1779 1788 if not kwargs.get('stream', False):
1780 1789 return
1781 1790
1782 1791 if not streamclone.allowservergeneration(repo):
1783 1792 raise error.Abort(_('stream data requested but server does not allow '
1784 1793 'this feature'),
1785 1794 hint=_('well-behaved clients should not be '
1786 1795 'requesting stream data from servers not '
1787 1796 'advertising it; the client may be buggy'))
1788 1797
1789 1798 # Stream clones don't compress well. And compression undermines a
1790 1799 # goal of stream clones, which is to be fast. Communicate the desire
1791 1800 # to avoid compression to consumers of the bundle.
1792 1801 bundler.prefercompressed = False
1793 1802
1794 1803 filecount, bytecount, it = streamclone.generatev2(repo)
1795 1804 requirements = _formatrequirementsspec(repo.requirements)
1796 1805 part = bundler.newpart('stream2', data=it)
1797 1806 part.addparam('bytecount', '%d' % bytecount, mandatory=True)
1798 1807 part.addparam('filecount', '%d' % filecount, mandatory=True)
1799 1808 part.addparam('requirements', requirements, mandatory=True)
1800 1809
1801 1810 @getbundle2partsgenerator('changegroup')
1802 1811 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1803 1812 b2caps=None, heads=None, common=None, **kwargs):
1804 1813 """add a changegroup part to the requested bundle"""
1805 1814 cgstream = None
1806 1815 if kwargs.get(r'cg', True):
1807 1816 # build changegroup bundle here.
1808 1817 version = '01'
1809 1818 cgversions = b2caps.get('changegroup')
1810 1819 if cgversions: # 3.1 and 3.2 ship with an empty value
1811 1820 cgversions = [v for v in cgversions
1812 1821 if v in changegroup.supportedoutgoingversions(repo)]
1813 1822 if not cgversions:
1814 1823 raise ValueError(_('no common changegroup version'))
1815 1824 version = max(cgversions)
1816 1825 outgoing = _computeoutgoing(repo, heads, common)
1817 1826 if outgoing.missing:
1818 1827 cgstream = changegroup.makestream(repo, outgoing, version, source,
1819 1828 bundlecaps=bundlecaps)
1820 1829
1821 1830 if cgstream:
1822 1831 part = bundler.newpart('changegroup', data=cgstream)
1823 1832 if cgversions:
1824 1833 part.addparam('version', version)
1825 1834 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1826 1835 mandatory=False)
1827 1836 if 'treemanifest' in repo.requirements:
1828 1837 part.addparam('treemanifest', '1')
1829 1838
1830 1839 @getbundle2partsgenerator('bookmarks')
1831 1840 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
1832 1841 b2caps=None, **kwargs):
1833 1842 """add a bookmark part to the requested bundle"""
1834 1843 if not kwargs.get(r'bookmarks', False):
1835 1844 return
1836 1845 if 'bookmarks' not in b2caps:
1837 1846 raise ValueError(_('no common bookmarks exchange method'))
1838 1847 books = bookmod.listbinbookmarks(repo)
1839 1848 data = bookmod.binaryencode(books)
1840 1849 if data:
1841 1850 bundler.newpart('bookmarks', data=data)
1842 1851
1843 1852 @getbundle2partsgenerator('listkeys')
1844 1853 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1845 1854 b2caps=None, **kwargs):
1846 1855 """add parts containing listkeys namespaces to the requested bundle"""
1847 1856 listkeys = kwargs.get(r'listkeys', ())
1848 1857 for namespace in listkeys:
1849 1858 part = bundler.newpart('listkeys')
1850 1859 part.addparam('namespace', namespace)
1851 1860 keys = repo.listkeys(namespace).items()
1852 1861 part.data = pushkey.encodekeys(keys)
1853 1862
1854 1863 @getbundle2partsgenerator('obsmarkers')
1855 1864 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1856 1865 b2caps=None, heads=None, **kwargs):
1857 1866 """add an obsolescence markers part to the requested bundle"""
1858 1867 if kwargs.get(r'obsmarkers', False):
1859 1868 if heads is None:
1860 1869 heads = repo.heads()
1861 1870 subset = [c.node() for c in repo.set('::%ln', heads)]
1862 1871 markers = repo.obsstore.relevantmarkers(subset)
1863 1872 markers = sorted(markers)
1864 1873 bundle2.buildobsmarkerspart(bundler, markers)
1865 1874
1866 1875 @getbundle2partsgenerator('phases')
1867 1876 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1868 1877 b2caps=None, heads=None, **kwargs):
1869 1878 """add phase heads part to the requested bundle"""
1870 1879 if kwargs.get(r'phases', False):
1871 1880 if not 'heads' in b2caps.get('phases'):
1872 1881 raise ValueError(_('no common phases exchange method'))
1873 1882 if heads is None:
1874 1883 heads = repo.heads()
1875 1884
1876 1885 headsbyphase = collections.defaultdict(set)
1877 1886 if repo.publishing():
1878 1887 headsbyphase[phases.public] = heads
1879 1888 else:
1880 1889 # find the appropriate heads to move
1881 1890
1882 1891 phase = repo._phasecache.phase
1883 1892 node = repo.changelog.node
1884 1893 rev = repo.changelog.rev
1885 1894 for h in heads:
1886 1895 headsbyphase[phase(repo, rev(h))].add(h)
1887 1896 seenphases = list(headsbyphase.keys())
1888 1897
1889 1898 # We do not handle anything but public and draft phase for now)
1890 1899 if seenphases:
1891 1900 assert max(seenphases) <= phases.draft
1892 1901
1893 1902 # if client is pulling non-public changesets, we need to find
1894 1903 # intermediate public heads.
1895 1904 draftheads = headsbyphase.get(phases.draft, set())
1896 1905 if draftheads:
1897 1906 publicheads = headsbyphase.get(phases.public, set())
1898 1907
1899 1908 revset = 'heads(only(%ln, %ln) and public())'
1900 1909 extraheads = repo.revs(revset, draftheads, publicheads)
1901 1910 for r in extraheads:
1902 1911 headsbyphase[phases.public].add(node(r))
1903 1912
1904 1913 # transform data in a format used by the encoding function
1905 1914 phasemapping = []
1906 1915 for phase in phases.allphases:
1907 1916 phasemapping.append(sorted(headsbyphase[phase]))
1908 1917
1909 1918 # generate the actual part
1910 1919 phasedata = phases.binaryencode(phasemapping)
1911 1920 bundler.newpart('phase-heads', data=phasedata)
1912 1921
1913 1922 @getbundle2partsgenerator('hgtagsfnodes')
1914 1923 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1915 1924 b2caps=None, heads=None, common=None,
1916 1925 **kwargs):
1917 1926 """Transfer the .hgtags filenodes mapping.
1918 1927
1919 1928 Only values for heads in this bundle will be transferred.
1920 1929
1921 1930 The part data consists of pairs of 20 byte changeset node and .hgtags
1922 1931 filenodes raw values.
1923 1932 """
1924 1933 # Don't send unless:
1925 1934 # - changeset are being exchanged,
1926 1935 # - the client supports it.
1927 1936 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
1928 1937 return
1929 1938
1930 1939 outgoing = _computeoutgoing(repo, heads, common)
1931 1940 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1932 1941
1933 1942 def check_heads(repo, their_heads, context):
1934 1943 """check if the heads of a repo have been modified
1935 1944
1936 1945 Used by peer for unbundling.
1937 1946 """
1938 1947 heads = repo.heads()
1939 1948 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1940 1949 if not (their_heads == ['force'] or their_heads == heads or
1941 1950 their_heads == ['hashed', heads_hash]):
1942 1951 # someone else committed/pushed/unbundled while we
1943 1952 # were transferring data
1944 1953 raise error.PushRaced('repository changed while %s - '
1945 1954 'please try again' % context)
1946 1955
1947 1956 def unbundle(repo, cg, heads, source, url):
1948 1957 """Apply a bundle to a repo.
1949 1958
1950 1959 this function makes sure the repo is locked during the application and have
1951 1960 mechanism to check that no push race occurred between the creation of the
1952 1961 bundle and its application.
1953 1962
1954 1963 If the push was raced as PushRaced exception is raised."""
1955 1964 r = 0
1956 1965 # need a transaction when processing a bundle2 stream
1957 1966 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1958 1967 lockandtr = [None, None, None]
1959 1968 recordout = None
1960 1969 # quick fix for output mismatch with bundle2 in 3.4
1961 1970 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
1962 1971 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1963 1972 captureoutput = True
1964 1973 try:
1965 1974 # note: outside bundle1, 'heads' is expected to be empty and this
1966 1975 # 'check_heads' call wil be a no-op
1967 1976 check_heads(repo, heads, 'uploading changes')
1968 1977 # push can proceed
1969 1978 if not isinstance(cg, bundle2.unbundle20):
1970 1979 # legacy case: bundle1 (changegroup 01)
1971 1980 txnname = "\n".join([source, util.hidepassword(url)])
1972 1981 with repo.lock(), repo.transaction(txnname) as tr:
1973 1982 op = bundle2.applybundle(repo, cg, tr, source, url)
1974 1983 r = bundle2.combinechangegroupresults(op)
1975 1984 else:
1976 1985 r = None
1977 1986 try:
1978 1987 def gettransaction():
1979 1988 if not lockandtr[2]:
1980 1989 lockandtr[0] = repo.wlock()
1981 1990 lockandtr[1] = repo.lock()
1982 1991 lockandtr[2] = repo.transaction(source)
1983 1992 lockandtr[2].hookargs['source'] = source
1984 1993 lockandtr[2].hookargs['url'] = url
1985 1994 lockandtr[2].hookargs['bundle2'] = '1'
1986 1995 return lockandtr[2]
1987 1996
1988 1997 # Do greedy locking by default until we're satisfied with lazy
1989 1998 # locking.
1990 1999 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1991 2000 gettransaction()
1992 2001
1993 2002 op = bundle2.bundleoperation(repo, gettransaction,
1994 2003 captureoutput=captureoutput)
1995 2004 try:
1996 2005 op = bundle2.processbundle(repo, cg, op=op)
1997 2006 finally:
1998 2007 r = op.reply
1999 2008 if captureoutput and r is not None:
2000 2009 repo.ui.pushbuffer(error=True, subproc=True)
2001 2010 def recordout(output):
2002 2011 r.newpart('output', data=output, mandatory=False)
2003 2012 if lockandtr[2] is not None:
2004 2013 lockandtr[2].close()
2005 2014 except BaseException as exc:
2006 2015 exc.duringunbundle2 = True
2007 2016 if captureoutput and r is not None:
2008 2017 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2009 2018 def recordout(output):
2010 2019 part = bundle2.bundlepart('output', data=output,
2011 2020 mandatory=False)
2012 2021 parts.append(part)
2013 2022 raise
2014 2023 finally:
2015 2024 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2016 2025 if recordout is not None:
2017 2026 recordout(repo.ui.popbuffer())
2018 2027 return r
2019 2028
2020 2029 def _maybeapplyclonebundle(pullop):
2021 2030 """Apply a clone bundle from a remote, if possible."""
2022 2031
2023 2032 repo = pullop.repo
2024 2033 remote = pullop.remote
2025 2034
2026 2035 if not repo.ui.configbool('ui', 'clonebundles'):
2027 2036 return
2028 2037
2029 2038 # Only run if local repo is empty.
2030 2039 if len(repo):
2031 2040 return
2032 2041
2033 2042 if pullop.heads:
2034 2043 return
2035 2044
2036 2045 if not remote.capable('clonebundles'):
2037 2046 return
2038 2047
2039 2048 res = remote._call('clonebundles')
2040 2049
2041 2050 # If we call the wire protocol command, that's good enough to record the
2042 2051 # attempt.
2043 2052 pullop.clonebundleattempted = True
2044 2053
2045 2054 entries = parseclonebundlesmanifest(repo, res)
2046 2055 if not entries:
2047 2056 repo.ui.note(_('no clone bundles available on remote; '
2048 2057 'falling back to regular clone\n'))
2049 2058 return
2050 2059
2051 2060 entries = filterclonebundleentries(
2052 2061 repo, entries, streamclonerequested=pullop.streamclonerequested)
2053 2062
2054 2063 if not entries:
2055 2064 # There is a thundering herd concern here. However, if a server
2056 2065 # operator doesn't advertise bundles appropriate for its clients,
2057 2066 # they deserve what's coming. Furthermore, from a client's
2058 2067 # perspective, no automatic fallback would mean not being able to
2059 2068 # clone!
2060 2069 repo.ui.warn(_('no compatible clone bundles available on server; '
2061 2070 'falling back to regular clone\n'))
2062 2071 repo.ui.warn(_('(you may want to report this to the server '
2063 2072 'operator)\n'))
2064 2073 return
2065 2074
2066 2075 entries = sortclonebundleentries(repo.ui, entries)
2067 2076
2068 2077 url = entries[0]['URL']
2069 2078 repo.ui.status(_('applying clone bundle from %s\n') % url)
2070 2079 if trypullbundlefromurl(repo.ui, repo, url):
2071 2080 repo.ui.status(_('finished applying clone bundle\n'))
2072 2081 # Bundle failed.
2073 2082 #
2074 2083 # We abort by default to avoid the thundering herd of
2075 2084 # clients flooding a server that was expecting expensive
2076 2085 # clone load to be offloaded.
2077 2086 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2078 2087 repo.ui.warn(_('falling back to normal clone\n'))
2079 2088 else:
2080 2089 raise error.Abort(_('error applying bundle'),
2081 2090 hint=_('if this error persists, consider contacting '
2082 2091 'the server operator or disable clone '
2083 2092 'bundles via '
2084 2093 '"--config ui.clonebundles=false"'))
2085 2094
2086 2095 def parseclonebundlesmanifest(repo, s):
2087 2096 """Parses the raw text of a clone bundles manifest.
2088 2097
2089 2098 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2090 2099 to the URL and other keys are the attributes for the entry.
2091 2100 """
2092 2101 m = []
2093 2102 for line in s.splitlines():
2094 2103 fields = line.split()
2095 2104 if not fields:
2096 2105 continue
2097 2106 attrs = {'URL': fields[0]}
2098 2107 for rawattr in fields[1:]:
2099 2108 key, value = rawattr.split('=', 1)
2100 2109 key = urlreq.unquote(key)
2101 2110 value = urlreq.unquote(value)
2102 2111 attrs[key] = value
2103 2112
2104 2113 # Parse BUNDLESPEC into components. This makes client-side
2105 2114 # preferences easier to specify since you can prefer a single
2106 2115 # component of the BUNDLESPEC.
2107 2116 if key == 'BUNDLESPEC':
2108 2117 try:
2109 2118 comp, version, params = parsebundlespec(repo, value,
2110 2119 externalnames=True)
2111 2120 attrs['COMPRESSION'] = comp
2112 2121 attrs['VERSION'] = version
2113 2122 except error.InvalidBundleSpecification:
2114 2123 pass
2115 2124 except error.UnsupportedBundleSpecification:
2116 2125 pass
2117 2126
2118 2127 m.append(attrs)
2119 2128
2120 2129 return m
2121 2130
2122 2131 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2123 2132 """Remove incompatible clone bundle manifest entries.
2124 2133
2125 2134 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2126 2135 and returns a new list consisting of only the entries that this client
2127 2136 should be able to apply.
2128 2137
2129 2138 There is no guarantee we'll be able to apply all returned entries because
2130 2139 the metadata we use to filter on may be missing or wrong.
2131 2140 """
2132 2141 newentries = []
2133 2142 for entry in entries:
2134 2143 spec = entry.get('BUNDLESPEC')
2135 2144 if spec:
2136 2145 try:
2137 2146 comp, version, params = parsebundlespec(repo, spec, strict=True)
2138 2147
2139 2148 # If a stream clone was requested, filter out non-streamclone
2140 2149 # entries.
2141 2150 if streamclonerequested and (comp != 'UN' or version != 's1'):
2142 2151 repo.ui.debug('filtering %s because not a stream clone\n' %
2143 2152 entry['URL'])
2144 2153 continue
2145 2154
2146 2155 except error.InvalidBundleSpecification as e:
2147 2156 repo.ui.debug(str(e) + '\n')
2148 2157 continue
2149 2158 except error.UnsupportedBundleSpecification as e:
2150 2159 repo.ui.debug('filtering %s because unsupported bundle '
2151 2160 'spec: %s\n' % (
2152 2161 entry['URL'], util.forcebytestr(e)))
2153 2162 continue
2154 2163 # If we don't have a spec and requested a stream clone, we don't know
2155 2164 # what the entry is so don't attempt to apply it.
2156 2165 elif streamclonerequested:
2157 2166 repo.ui.debug('filtering %s because cannot determine if a stream '
2158 2167 'clone bundle\n' % entry['URL'])
2159 2168 continue
2160 2169
2161 2170 if 'REQUIRESNI' in entry and not sslutil.hassni:
2162 2171 repo.ui.debug('filtering %s because SNI not supported\n' %
2163 2172 entry['URL'])
2164 2173 continue
2165 2174
2166 2175 newentries.append(entry)
2167 2176
2168 2177 return newentries
2169 2178
2170 2179 class clonebundleentry(object):
2171 2180 """Represents an item in a clone bundles manifest.
2172 2181
2173 2182 This rich class is needed to support sorting since sorted() in Python 3
2174 2183 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2175 2184 won't work.
2176 2185 """
2177 2186
2178 2187 def __init__(self, value, prefers):
2179 2188 self.value = value
2180 2189 self.prefers = prefers
2181 2190
2182 2191 def _cmp(self, other):
2183 2192 for prefkey, prefvalue in self.prefers:
2184 2193 avalue = self.value.get(prefkey)
2185 2194 bvalue = other.value.get(prefkey)
2186 2195
2187 2196 # Special case for b missing attribute and a matches exactly.
2188 2197 if avalue is not None and bvalue is None and avalue == prefvalue:
2189 2198 return -1
2190 2199
2191 2200 # Special case for a missing attribute and b matches exactly.
2192 2201 if bvalue is not None and avalue is None and bvalue == prefvalue:
2193 2202 return 1
2194 2203
2195 2204 # We can't compare unless attribute present on both.
2196 2205 if avalue is None or bvalue is None:
2197 2206 continue
2198 2207
2199 2208 # Same values should fall back to next attribute.
2200 2209 if avalue == bvalue:
2201 2210 continue
2202 2211
2203 2212 # Exact matches come first.
2204 2213 if avalue == prefvalue:
2205 2214 return -1
2206 2215 if bvalue == prefvalue:
2207 2216 return 1
2208 2217
2209 2218 # Fall back to next attribute.
2210 2219 continue
2211 2220
2212 2221 # If we got here we couldn't sort by attributes and prefers. Fall
2213 2222 # back to index order.
2214 2223 return 0
2215 2224
2216 2225 def __lt__(self, other):
2217 2226 return self._cmp(other) < 0
2218 2227
2219 2228 def __gt__(self, other):
2220 2229 return self._cmp(other) > 0
2221 2230
2222 2231 def __eq__(self, other):
2223 2232 return self._cmp(other) == 0
2224 2233
2225 2234 def __le__(self, other):
2226 2235 return self._cmp(other) <= 0
2227 2236
2228 2237 def __ge__(self, other):
2229 2238 return self._cmp(other) >= 0
2230 2239
2231 2240 def __ne__(self, other):
2232 2241 return self._cmp(other) != 0
2233 2242
2234 2243 def sortclonebundleentries(ui, entries):
2235 2244 prefers = ui.configlist('ui', 'clonebundleprefers')
2236 2245 if not prefers:
2237 2246 return list(entries)
2238 2247
2239 2248 prefers = [p.split('=', 1) for p in prefers]
2240 2249
2241 2250 items = sorted(clonebundleentry(v, prefers) for v in entries)
2242 2251 return [i.value for i in items]
2243 2252
2244 2253 def trypullbundlefromurl(ui, repo, url):
2245 2254 """Attempt to apply a bundle from a URL."""
2246 2255 with repo.lock(), repo.transaction('bundleurl') as tr:
2247 2256 try:
2248 2257 fh = urlmod.open(ui, url)
2249 2258 cg = readbundle(ui, fh, 'stream')
2250 2259
2251 2260 if isinstance(cg, streamclone.streamcloneapplier):
2252 2261 cg.apply(repo)
2253 2262 else:
2254 2263 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2255 2264 return True
2256 2265 except urlerr.httperror as e:
2257 2266 ui.warn(_('HTTP error fetching bundle: %s\n') %
2258 2267 util.forcebytestr(e))
2259 2268 except urlerr.urlerror as e:
2260 2269 ui.warn(_('error fetching bundle: %s\n') %
2261 2270 util.forcebytestr(e.reason))
2262 2271
2263 2272 return False
General Comments 0
You need to be logged in to leave comments. Login now