##// END OF EJS Templates
exchange: use cg?unpacker.apply() instead of changegroup.addchangegroup()
Augie Fackler -
r26700:dbc3d945 default
parent child Browse files
Show More
@@ -1,1801 +1,1800
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import hex, nullid
10 10 import errno, urllib, urllib2
11 11 import util, scmutil, changegroup, base85, error
12 12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13 13 import lock as lockmod
14 14 import streamclone
15 15 import sslutil
16 16 import tags
17 17 import url as urlmod
18 18
19 19 # Maps bundle compression human names to internal representation.
20 20 _bundlespeccompressions = {'none': None,
21 21 'bzip2': 'BZ',
22 22 'gzip': 'GZ',
23 23 }
24 24
25 25 # Maps bundle version human names to changegroup versions.
26 26 _bundlespeccgversions = {'v1': '01',
27 27 'v2': '02',
28 28 'bundle2': '02', #legacy
29 29 }
30 30
31 31 def parsebundlespec(repo, spec, strict=True, externalnames=False):
32 32 """Parse a bundle string specification into parts.
33 33
34 34 Bundle specifications denote a well-defined bundle/exchange format.
35 35 The content of a given specification should not change over time in
36 36 order to ensure that bundles produced by a newer version of Mercurial are
37 37 readable from an older version.
38 38
39 39 The string currently has the form:
40 40
41 41 <compression>-<type>
42 42
43 43 Where <compression> is one of the supported compression formats
44 44 and <type> is (currently) a version string.
45 45
46 46 If ``strict`` is True (the default) <compression> is required. Otherwise,
47 47 it is optional.
48 48
49 49 If ``externalnames`` is False (the default), the human-centric names will
50 50 be converted to their internal representation.
51 51
52 52 Returns a 2-tuple of (compression, version). Compression will be ``None``
53 53 if not in strict mode and a compression isn't defined.
54 54
55 55 An ``InvalidBundleSpecification`` is raised when the specification is
56 56 not syntactically well formed.
57 57
58 58 An ``UnsupportedBundleSpecification`` is raised when the compression or
59 59 bundle type/version is not recognized.
60 60
61 61 Note: this function will likely eventually return a more complex data
62 62 structure, including bundle2 part information.
63 63 """
64 64 if strict and '-' not in spec:
65 65 raise error.InvalidBundleSpecification(
66 66 _('invalid bundle specification; '
67 67 'must be prefixed with compression: %s') % spec)
68 68
69 69 if '-' in spec:
70 70 compression, version = spec.split('-', 1)
71 71
72 72 if compression not in _bundlespeccompressions:
73 73 raise error.UnsupportedBundleSpecification(
74 74 _('%s compression is not supported') % compression)
75 75
76 76 if version not in _bundlespeccgversions:
77 77 raise error.UnsupportedBundleSpecification(
78 78 _('%s is not a recognized bundle version') % version)
79 79 else:
80 80 # Value could be just the compression or just the version, in which
81 81 # case some defaults are assumed (but only when not in strict mode).
82 82 assert not strict
83 83
84 84 if spec in _bundlespeccompressions:
85 85 compression = spec
86 86 version = 'v1'
87 87 if 'generaldelta' in repo.requirements:
88 88 version = 'v2'
89 89 elif spec in _bundlespeccgversions:
90 90 compression = 'bzip2'
91 91 version = spec
92 92 else:
93 93 raise error.UnsupportedBundleSpecification(
94 94 _('%s is not a recognized bundle specification') % spec)
95 95
96 96 if not externalnames:
97 97 compression = _bundlespeccompressions[compression]
98 98 version = _bundlespeccgversions[version]
99 99 return compression, version
100 100
101 101 def readbundle(ui, fh, fname, vfs=None):
102 102 header = changegroup.readexactly(fh, 4)
103 103
104 104 alg = None
105 105 if not fname:
106 106 fname = "stream"
107 107 if not header.startswith('HG') and header.startswith('\0'):
108 108 fh = changegroup.headerlessfixup(fh, header)
109 109 header = "HG10"
110 110 alg = 'UN'
111 111 elif vfs:
112 112 fname = vfs.join(fname)
113 113
114 114 magic, version = header[0:2], header[2:4]
115 115
116 116 if magic != 'HG':
117 117 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
118 118 if version == '10':
119 119 if alg is None:
120 120 alg = changegroup.readexactly(fh, 2)
121 121 return changegroup.cg1unpacker(fh, alg)
122 122 elif version.startswith('2'):
123 123 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
124 124 else:
125 125 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
126 126
127 127 def buildobsmarkerspart(bundler, markers):
128 128 """add an obsmarker part to the bundler with <markers>
129 129
130 130 No part is created if markers is empty.
131 131 Raises ValueError if the bundler doesn't support any known obsmarker format.
132 132 """
133 133 if markers:
134 134 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
135 135 version = obsolete.commonversion(remoteversions)
136 136 if version is None:
137 137 raise ValueError('bundler do not support common obsmarker format')
138 138 stream = obsolete.encodemarkers(markers, True, version=version)
139 139 return bundler.newpart('obsmarkers', data=stream)
140 140 return None
141 141
142 142 def _canusebundle2(op):
143 143 """return true if a pull/push can use bundle2
144 144
145 145 Feel free to nuke this function when we drop the experimental option"""
146 146 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
147 147 and op.remote.capable('bundle2'))
148 148
149 149
150 150 class pushoperation(object):
151 151 """A object that represent a single push operation
152 152
153 153 It purpose is to carry push related state and very common operation.
154 154
155 155 A new should be created at the beginning of each push and discarded
156 156 afterward.
157 157 """
158 158
159 159 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
160 160 bookmarks=()):
161 161 # repo we push from
162 162 self.repo = repo
163 163 self.ui = repo.ui
164 164 # repo we push to
165 165 self.remote = remote
166 166 # force option provided
167 167 self.force = force
168 168 # revs to be pushed (None is "all")
169 169 self.revs = revs
170 170 # bookmark explicitly pushed
171 171 self.bookmarks = bookmarks
172 172 # allow push of new branch
173 173 self.newbranch = newbranch
174 174 # did a local lock get acquired?
175 175 self.locallocked = None
176 176 # step already performed
177 177 # (used to check what steps have been already performed through bundle2)
178 178 self.stepsdone = set()
179 179 # Integer version of the changegroup push result
180 180 # - None means nothing to push
181 181 # - 0 means HTTP error
182 182 # - 1 means we pushed and remote head count is unchanged *or*
183 183 # we have outgoing changesets but refused to push
184 184 # - other values as described by addchangegroup()
185 185 self.cgresult = None
186 186 # Boolean value for the bookmark push
187 187 self.bkresult = None
188 188 # discover.outgoing object (contains common and outgoing data)
189 189 self.outgoing = None
190 190 # all remote heads before the push
191 191 self.remoteheads = None
192 192 # testable as a boolean indicating if any nodes are missing locally.
193 193 self.incoming = None
194 194 # phases changes that must be pushed along side the changesets
195 195 self.outdatedphases = None
196 196 # phases changes that must be pushed if changeset push fails
197 197 self.fallbackoutdatedphases = None
198 198 # outgoing obsmarkers
199 199 self.outobsmarkers = set()
200 200 # outgoing bookmarks
201 201 self.outbookmarks = []
202 202 # transaction manager
203 203 self.trmanager = None
204 204 # map { pushkey partid -> callback handling failure}
205 205 # used to handle exception from mandatory pushkey part failure
206 206 self.pkfailcb = {}
207 207
208 208 @util.propertycache
209 209 def futureheads(self):
210 210 """future remote heads if the changeset push succeeds"""
211 211 return self.outgoing.missingheads
212 212
213 213 @util.propertycache
214 214 def fallbackheads(self):
215 215 """future remote heads if the changeset push fails"""
216 216 if self.revs is None:
217 217 # not target to push, all common are relevant
218 218 return self.outgoing.commonheads
219 219 unfi = self.repo.unfiltered()
220 220 # I want cheads = heads(::missingheads and ::commonheads)
221 221 # (missingheads is revs with secret changeset filtered out)
222 222 #
223 223 # This can be expressed as:
224 224 # cheads = ( (missingheads and ::commonheads)
225 225 # + (commonheads and ::missingheads))"
226 226 # )
227 227 #
228 228 # while trying to push we already computed the following:
229 229 # common = (::commonheads)
230 230 # missing = ((commonheads::missingheads) - commonheads)
231 231 #
232 232 # We can pick:
233 233 # * missingheads part of common (::commonheads)
234 234 common = self.outgoing.common
235 235 nm = self.repo.changelog.nodemap
236 236 cheads = [node for node in self.revs if nm[node] in common]
237 237 # and
238 238 # * commonheads parents on missing
239 239 revset = unfi.set('%ln and parents(roots(%ln))',
240 240 self.outgoing.commonheads,
241 241 self.outgoing.missing)
242 242 cheads.extend(c.node() for c in revset)
243 243 return cheads
244 244
245 245 @property
246 246 def commonheads(self):
247 247 """set of all common heads after changeset bundle push"""
248 248 if self.cgresult:
249 249 return self.futureheads
250 250 else:
251 251 return self.fallbackheads
252 252
253 253 # mapping of message used when pushing bookmark
254 254 bookmsgmap = {'update': (_("updating bookmark %s\n"),
255 255 _('updating bookmark %s failed!\n')),
256 256 'export': (_("exporting bookmark %s\n"),
257 257 _('exporting bookmark %s failed!\n')),
258 258 'delete': (_("deleting remote bookmark %s\n"),
259 259 _('deleting remote bookmark %s failed!\n')),
260 260 }
261 261
262 262
263 263 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
264 264 '''Push outgoing changesets (limited by revs) from a local
265 265 repository to remote. Return an integer:
266 266 - None means nothing to push
267 267 - 0 means HTTP error
268 268 - 1 means we pushed and remote head count is unchanged *or*
269 269 we have outgoing changesets but refused to push
270 270 - other values as described by addchangegroup()
271 271 '''
272 272 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
273 273 if pushop.remote.local():
274 274 missing = (set(pushop.repo.requirements)
275 275 - pushop.remote.local().supported)
276 276 if missing:
277 277 msg = _("required features are not"
278 278 " supported in the destination:"
279 279 " %s") % (', '.join(sorted(missing)))
280 280 raise error.Abort(msg)
281 281
282 282 # there are two ways to push to remote repo:
283 283 #
284 284 # addchangegroup assumes local user can lock remote
285 285 # repo (local filesystem, old ssh servers).
286 286 #
287 287 # unbundle assumes local user cannot lock remote repo (new ssh
288 288 # servers, http servers).
289 289
290 290 if not pushop.remote.canpush():
291 291 raise error.Abort(_("destination does not support push"))
292 292 # get local lock as we might write phase data
293 293 localwlock = locallock = None
294 294 try:
295 295 # bundle2 push may receive a reply bundle touching bookmarks or other
296 296 # things requiring the wlock. Take it now to ensure proper ordering.
297 297 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
298 298 if _canusebundle2(pushop) and maypushback:
299 299 localwlock = pushop.repo.wlock()
300 300 locallock = pushop.repo.lock()
301 301 pushop.locallocked = True
302 302 except IOError as err:
303 303 pushop.locallocked = False
304 304 if err.errno != errno.EACCES:
305 305 raise
306 306 # source repo cannot be locked.
307 307 # We do not abort the push, but just disable the local phase
308 308 # synchronisation.
309 309 msg = 'cannot lock source repository: %s\n' % err
310 310 pushop.ui.debug(msg)
311 311 try:
312 312 if pushop.locallocked:
313 313 pushop.trmanager = transactionmanager(pushop.repo,
314 314 'push-response',
315 315 pushop.remote.url())
316 316 pushop.repo.checkpush(pushop)
317 317 lock = None
318 318 unbundle = pushop.remote.capable('unbundle')
319 319 if not unbundle:
320 320 lock = pushop.remote.lock()
321 321 try:
322 322 _pushdiscovery(pushop)
323 323 if _canusebundle2(pushop):
324 324 _pushbundle2(pushop)
325 325 _pushchangeset(pushop)
326 326 _pushsyncphase(pushop)
327 327 _pushobsolete(pushop)
328 328 _pushbookmark(pushop)
329 329 finally:
330 330 if lock is not None:
331 331 lock.release()
332 332 if pushop.trmanager:
333 333 pushop.trmanager.close()
334 334 finally:
335 335 if pushop.trmanager:
336 336 pushop.trmanager.release()
337 337 if locallock is not None:
338 338 locallock.release()
339 339 if localwlock is not None:
340 340 localwlock.release()
341 341
342 342 return pushop
343 343
344 344 # list of steps to perform discovery before push
345 345 pushdiscoveryorder = []
346 346
347 347 # Mapping between step name and function
348 348 #
349 349 # This exists to help extensions wrap steps if necessary
350 350 pushdiscoverymapping = {}
351 351
352 352 def pushdiscovery(stepname):
353 353 """decorator for function performing discovery before push
354 354
355 355 The function is added to the step -> function mapping and appended to the
356 356 list of steps. Beware that decorated function will be added in order (this
357 357 may matter).
358 358
359 359 You can only use this decorator for a new step, if you want to wrap a step
360 360 from an extension, change the pushdiscovery dictionary directly."""
361 361 def dec(func):
362 362 assert stepname not in pushdiscoverymapping
363 363 pushdiscoverymapping[stepname] = func
364 364 pushdiscoveryorder.append(stepname)
365 365 return func
366 366 return dec
367 367
368 368 def _pushdiscovery(pushop):
369 369 """Run all discovery steps"""
370 370 for stepname in pushdiscoveryorder:
371 371 step = pushdiscoverymapping[stepname]
372 372 step(pushop)
373 373
374 374 @pushdiscovery('changeset')
375 375 def _pushdiscoverychangeset(pushop):
376 376 """discover the changeset that need to be pushed"""
377 377 fci = discovery.findcommonincoming
378 378 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
379 379 common, inc, remoteheads = commoninc
380 380 fco = discovery.findcommonoutgoing
381 381 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
382 382 commoninc=commoninc, force=pushop.force)
383 383 pushop.outgoing = outgoing
384 384 pushop.remoteheads = remoteheads
385 385 pushop.incoming = inc
386 386
387 387 @pushdiscovery('phase')
388 388 def _pushdiscoveryphase(pushop):
389 389 """discover the phase that needs to be pushed
390 390
391 391 (computed for both success and failure case for changesets push)"""
392 392 outgoing = pushop.outgoing
393 393 unfi = pushop.repo.unfiltered()
394 394 remotephases = pushop.remote.listkeys('phases')
395 395 publishing = remotephases.get('publishing', False)
396 396 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
397 397 and remotephases # server supports phases
398 398 and not pushop.outgoing.missing # no changesets to be pushed
399 399 and publishing):
400 400 # When:
401 401 # - this is a subrepo push
402 402 # - and remote support phase
403 403 # - and no changeset are to be pushed
404 404 # - and remote is publishing
405 405 # We may be in issue 3871 case!
406 406 # We drop the possible phase synchronisation done by
407 407 # courtesy to publish changesets possibly locally draft
408 408 # on the remote.
409 409 remotephases = {'publishing': 'True'}
410 410 ana = phases.analyzeremotephases(pushop.repo,
411 411 pushop.fallbackheads,
412 412 remotephases)
413 413 pheads, droots = ana
414 414 extracond = ''
415 415 if not publishing:
416 416 extracond = ' and public()'
417 417 revset = 'heads((%%ln::%%ln) %s)' % extracond
418 418 # Get the list of all revs draft on remote by public here.
419 419 # XXX Beware that revset break if droots is not strictly
420 420 # XXX root we may want to ensure it is but it is costly
421 421 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
422 422 if not outgoing.missing:
423 423 future = fallback
424 424 else:
425 425 # adds changeset we are going to push as draft
426 426 #
427 427 # should not be necessary for publishing server, but because of an
428 428 # issue fixed in xxxxx we have to do it anyway.
429 429 fdroots = list(unfi.set('roots(%ln + %ln::)',
430 430 outgoing.missing, droots))
431 431 fdroots = [f.node() for f in fdroots]
432 432 future = list(unfi.set(revset, fdroots, pushop.futureheads))
433 433 pushop.outdatedphases = future
434 434 pushop.fallbackoutdatedphases = fallback
435 435
436 436 @pushdiscovery('obsmarker')
437 437 def _pushdiscoveryobsmarkers(pushop):
438 438 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
439 439 and pushop.repo.obsstore
440 440 and 'obsolete' in pushop.remote.listkeys('namespaces')):
441 441 repo = pushop.repo
442 442 # very naive computation, that can be quite expensive on big repo.
443 443 # However: evolution is currently slow on them anyway.
444 444 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
445 445 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
446 446
447 447 @pushdiscovery('bookmarks')
448 448 def _pushdiscoverybookmarks(pushop):
449 449 ui = pushop.ui
450 450 repo = pushop.repo.unfiltered()
451 451 remote = pushop.remote
452 452 ui.debug("checking for updated bookmarks\n")
453 453 ancestors = ()
454 454 if pushop.revs:
455 455 revnums = map(repo.changelog.rev, pushop.revs)
456 456 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
457 457 remotebookmark = remote.listkeys('bookmarks')
458 458
459 459 explicit = set(pushop.bookmarks)
460 460
461 461 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
462 462 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
463 463 for b, scid, dcid in advsrc:
464 464 if b in explicit:
465 465 explicit.remove(b)
466 466 if not ancestors or repo[scid].rev() in ancestors:
467 467 pushop.outbookmarks.append((b, dcid, scid))
468 468 # search added bookmark
469 469 for b, scid, dcid in addsrc:
470 470 if b in explicit:
471 471 explicit.remove(b)
472 472 pushop.outbookmarks.append((b, '', scid))
473 473 # search for overwritten bookmark
474 474 for b, scid, dcid in advdst + diverge + differ:
475 475 if b in explicit:
476 476 explicit.remove(b)
477 477 pushop.outbookmarks.append((b, dcid, scid))
478 478 # search for bookmark to delete
479 479 for b, scid, dcid in adddst:
480 480 if b in explicit:
481 481 explicit.remove(b)
482 482 # treat as "deleted locally"
483 483 pushop.outbookmarks.append((b, dcid, ''))
484 484 # identical bookmarks shouldn't get reported
485 485 for b, scid, dcid in same:
486 486 if b in explicit:
487 487 explicit.remove(b)
488 488
489 489 if explicit:
490 490 explicit = sorted(explicit)
491 491 # we should probably list all of them
492 492 ui.warn(_('bookmark %s does not exist on the local '
493 493 'or remote repository!\n') % explicit[0])
494 494 pushop.bkresult = 2
495 495
496 496 pushop.outbookmarks.sort()
497 497
498 498 def _pushcheckoutgoing(pushop):
499 499 outgoing = pushop.outgoing
500 500 unfi = pushop.repo.unfiltered()
501 501 if not outgoing.missing:
502 502 # nothing to push
503 503 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
504 504 return False
505 505 # something to push
506 506 if not pushop.force:
507 507 # if repo.obsstore == False --> no obsolete
508 508 # then, save the iteration
509 509 if unfi.obsstore:
510 510 # this message are here for 80 char limit reason
511 511 mso = _("push includes obsolete changeset: %s!")
512 512 mst = {"unstable": _("push includes unstable changeset: %s!"),
513 513 "bumped": _("push includes bumped changeset: %s!"),
514 514 "divergent": _("push includes divergent changeset: %s!")}
515 515 # If we are to push if there is at least one
516 516 # obsolete or unstable changeset in missing, at
517 517 # least one of the missinghead will be obsolete or
518 518 # unstable. So checking heads only is ok
519 519 for node in outgoing.missingheads:
520 520 ctx = unfi[node]
521 521 if ctx.obsolete():
522 522 raise error.Abort(mso % ctx)
523 523 elif ctx.troubled():
524 524 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
525 525
526 526 # internal config: bookmarks.pushing
527 527 newbm = pushop.ui.configlist('bookmarks', 'pushing')
528 528 discovery.checkheads(unfi, pushop.remote, outgoing,
529 529 pushop.remoteheads,
530 530 pushop.newbranch,
531 531 bool(pushop.incoming),
532 532 newbm)
533 533 return True
534 534
535 535 # List of names of steps to perform for an outgoing bundle2, order matters.
536 536 b2partsgenorder = []
537 537
538 538 # Mapping between step name and function
539 539 #
540 540 # This exists to help extensions wrap steps if necessary
541 541 b2partsgenmapping = {}
542 542
543 543 def b2partsgenerator(stepname, idx=None):
544 544 """decorator for function generating bundle2 part
545 545
546 546 The function is added to the step -> function mapping and appended to the
547 547 list of steps. Beware that decorated functions will be added in order
548 548 (this may matter).
549 549
550 550 You can only use this decorator for new steps, if you want to wrap a step
551 551 from an extension, attack the b2partsgenmapping dictionary directly."""
552 552 def dec(func):
553 553 assert stepname not in b2partsgenmapping
554 554 b2partsgenmapping[stepname] = func
555 555 if idx is None:
556 556 b2partsgenorder.append(stepname)
557 557 else:
558 558 b2partsgenorder.insert(idx, stepname)
559 559 return func
560 560 return dec
561 561
562 562 def _pushb2ctxcheckheads(pushop, bundler):
563 563 """Generate race condition checking parts
564 564
565 565 Exists as an indepedent function to aid extensions
566 566 """
567 567 if not pushop.force:
568 568 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
569 569
570 570 @b2partsgenerator('changeset')
571 571 def _pushb2ctx(pushop, bundler):
572 572 """handle changegroup push through bundle2
573 573
574 574 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
575 575 """
576 576 if 'changesets' in pushop.stepsdone:
577 577 return
578 578 pushop.stepsdone.add('changesets')
579 579 # Send known heads to the server for race detection.
580 580 if not _pushcheckoutgoing(pushop):
581 581 return
582 582 pushop.repo.prepushoutgoinghooks(pushop.repo,
583 583 pushop.remote,
584 584 pushop.outgoing)
585 585
586 586 _pushb2ctxcheckheads(pushop, bundler)
587 587
588 588 b2caps = bundle2.bundle2caps(pushop.remote)
589 589 version = None
590 590 cgversions = b2caps.get('changegroup')
591 591 if not cgversions: # 3.1 and 3.2 ship with an empty value
592 592 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
593 593 pushop.outgoing)
594 594 else:
595 595 cgversions = [v for v in cgversions if v in changegroup.packermap]
596 596 if not cgversions:
597 597 raise ValueError(_('no common changegroup version'))
598 598 version = max(cgversions)
599 599 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
600 600 pushop.outgoing,
601 601 version=version)
602 602 cgpart = bundler.newpart('changegroup', data=cg)
603 603 if version is not None:
604 604 cgpart.addparam('version', version)
605 605 def handlereply(op):
606 606 """extract addchangegroup returns from server reply"""
607 607 cgreplies = op.records.getreplies(cgpart.id)
608 608 assert len(cgreplies['changegroup']) == 1
609 609 pushop.cgresult = cgreplies['changegroup'][0]['return']
610 610 return handlereply
611 611
612 612 @b2partsgenerator('phase')
613 613 def _pushb2phases(pushop, bundler):
614 614 """handle phase push through bundle2"""
615 615 if 'phases' in pushop.stepsdone:
616 616 return
617 617 b2caps = bundle2.bundle2caps(pushop.remote)
618 618 if not 'pushkey' in b2caps:
619 619 return
620 620 pushop.stepsdone.add('phases')
621 621 part2node = []
622 622
623 623 def handlefailure(pushop, exc):
624 624 targetid = int(exc.partid)
625 625 for partid, node in part2node:
626 626 if partid == targetid:
627 627 raise error.Abort(_('updating %s to public failed') % node)
628 628
629 629 enc = pushkey.encode
630 630 for newremotehead in pushop.outdatedphases:
631 631 part = bundler.newpart('pushkey')
632 632 part.addparam('namespace', enc('phases'))
633 633 part.addparam('key', enc(newremotehead.hex()))
634 634 part.addparam('old', enc(str(phases.draft)))
635 635 part.addparam('new', enc(str(phases.public)))
636 636 part2node.append((part.id, newremotehead))
637 637 pushop.pkfailcb[part.id] = handlefailure
638 638
639 639 def handlereply(op):
640 640 for partid, node in part2node:
641 641 partrep = op.records.getreplies(partid)
642 642 results = partrep['pushkey']
643 643 assert len(results) <= 1
644 644 msg = None
645 645 if not results:
646 646 msg = _('server ignored update of %s to public!\n') % node
647 647 elif not int(results[0]['return']):
648 648 msg = _('updating %s to public failed!\n') % node
649 649 if msg is not None:
650 650 pushop.ui.warn(msg)
651 651 return handlereply
652 652
653 653 @b2partsgenerator('obsmarkers')
654 654 def _pushb2obsmarkers(pushop, bundler):
655 655 if 'obsmarkers' in pushop.stepsdone:
656 656 return
657 657 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
658 658 if obsolete.commonversion(remoteversions) is None:
659 659 return
660 660 pushop.stepsdone.add('obsmarkers')
661 661 if pushop.outobsmarkers:
662 662 markers = sorted(pushop.outobsmarkers)
663 663 buildobsmarkerspart(bundler, markers)
664 664
665 665 @b2partsgenerator('bookmarks')
666 666 def _pushb2bookmarks(pushop, bundler):
667 667 """handle bookmark push through bundle2"""
668 668 if 'bookmarks' in pushop.stepsdone:
669 669 return
670 670 b2caps = bundle2.bundle2caps(pushop.remote)
671 671 if 'pushkey' not in b2caps:
672 672 return
673 673 pushop.stepsdone.add('bookmarks')
674 674 part2book = []
675 675 enc = pushkey.encode
676 676
677 677 def handlefailure(pushop, exc):
678 678 targetid = int(exc.partid)
679 679 for partid, book, action in part2book:
680 680 if partid == targetid:
681 681 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
682 682 # we should not be called for part we did not generated
683 683 assert False
684 684
685 685 for book, old, new in pushop.outbookmarks:
686 686 part = bundler.newpart('pushkey')
687 687 part.addparam('namespace', enc('bookmarks'))
688 688 part.addparam('key', enc(book))
689 689 part.addparam('old', enc(old))
690 690 part.addparam('new', enc(new))
691 691 action = 'update'
692 692 if not old:
693 693 action = 'export'
694 694 elif not new:
695 695 action = 'delete'
696 696 part2book.append((part.id, book, action))
697 697 pushop.pkfailcb[part.id] = handlefailure
698 698
699 699 def handlereply(op):
700 700 ui = pushop.ui
701 701 for partid, book, action in part2book:
702 702 partrep = op.records.getreplies(partid)
703 703 results = partrep['pushkey']
704 704 assert len(results) <= 1
705 705 if not results:
706 706 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
707 707 else:
708 708 ret = int(results[0]['return'])
709 709 if ret:
710 710 ui.status(bookmsgmap[action][0] % book)
711 711 else:
712 712 ui.warn(bookmsgmap[action][1] % book)
713 713 if pushop.bkresult is not None:
714 714 pushop.bkresult = 1
715 715 return handlereply
716 716
717 717
718 718 def _pushbundle2(pushop):
719 719 """push data to the remote using bundle2
720 720
721 721 The only currently supported type of data is changegroup but this will
722 722 evolve in the future."""
723 723 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
724 724 pushback = (pushop.trmanager
725 725 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
726 726
727 727 # create reply capability
728 728 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
729 729 allowpushback=pushback))
730 730 bundler.newpart('replycaps', data=capsblob)
731 731 replyhandlers = []
732 732 for partgenname in b2partsgenorder:
733 733 partgen = b2partsgenmapping[partgenname]
734 734 ret = partgen(pushop, bundler)
735 735 if callable(ret):
736 736 replyhandlers.append(ret)
737 737 # do not push if nothing to push
738 738 if bundler.nbparts <= 1:
739 739 return
740 740 stream = util.chunkbuffer(bundler.getchunks())
741 741 try:
742 742 try:
743 743 reply = pushop.remote.unbundle(stream, ['force'], 'push')
744 744 except error.BundleValueError as exc:
745 745 raise error.Abort('missing support for %s' % exc)
746 746 try:
747 747 trgetter = None
748 748 if pushback:
749 749 trgetter = pushop.trmanager.transaction
750 750 op = bundle2.processbundle(pushop.repo, reply, trgetter)
751 751 except error.BundleValueError as exc:
752 752 raise error.Abort('missing support for %s' % exc)
753 753 except error.PushkeyFailed as exc:
754 754 partid = int(exc.partid)
755 755 if partid not in pushop.pkfailcb:
756 756 raise
757 757 pushop.pkfailcb[partid](pushop, exc)
758 758 for rephand in replyhandlers:
759 759 rephand(op)
760 760
761 761 def _pushchangeset(pushop):
762 762 """Make the actual push of changeset bundle to remote repo"""
763 763 if 'changesets' in pushop.stepsdone:
764 764 return
765 765 pushop.stepsdone.add('changesets')
766 766 if not _pushcheckoutgoing(pushop):
767 767 return
768 768 pushop.repo.prepushoutgoinghooks(pushop.repo,
769 769 pushop.remote,
770 770 pushop.outgoing)
771 771 outgoing = pushop.outgoing
772 772 unbundle = pushop.remote.capable('unbundle')
773 773 # TODO: get bundlecaps from remote
774 774 bundlecaps = None
775 775 # create a changegroup from local
776 776 if pushop.revs is None and not (outgoing.excluded
777 777 or pushop.repo.changelog.filteredrevs):
778 778 # push everything,
779 779 # use the fast path, no race possible on push
780 780 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
781 781 cg = changegroup.getsubset(pushop.repo,
782 782 outgoing,
783 783 bundler,
784 784 'push',
785 785 fastpath=True)
786 786 else:
787 787 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
788 788 bundlecaps)
789 789
790 790 # apply changegroup to remote
791 791 if unbundle:
792 792 # local repo finds heads on server, finds out what
793 793 # revs it must push. once revs transferred, if server
794 794 # finds it has different heads (someone else won
795 795 # commit/push race), server aborts.
796 796 if pushop.force:
797 797 remoteheads = ['force']
798 798 else:
799 799 remoteheads = pushop.remoteheads
800 800 # ssh: return remote's addchangegroup()
801 801 # http: return remote's addchangegroup() or 0 for error
802 802 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
803 803 pushop.repo.url())
804 804 else:
805 805 # we return an integer indicating remote head count
806 806 # change
807 807 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
808 808 pushop.repo.url())
809 809
810 810 def _pushsyncphase(pushop):
811 811 """synchronise phase information locally and remotely"""
812 812 cheads = pushop.commonheads
813 813 # even when we don't push, exchanging phase data is useful
814 814 remotephases = pushop.remote.listkeys('phases')
815 815 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
816 816 and remotephases # server supports phases
817 817 and pushop.cgresult is None # nothing was pushed
818 818 and remotephases.get('publishing', False)):
819 819 # When:
820 820 # - this is a subrepo push
821 821 # - and remote support phase
822 822 # - and no changeset was pushed
823 823 # - and remote is publishing
824 824 # We may be in issue 3871 case!
825 825 # We drop the possible phase synchronisation done by
826 826 # courtesy to publish changesets possibly locally draft
827 827 # on the remote.
828 828 remotephases = {'publishing': 'True'}
829 829 if not remotephases: # old server or public only reply from non-publishing
830 830 _localphasemove(pushop, cheads)
831 831 # don't push any phase data as there is nothing to push
832 832 else:
833 833 ana = phases.analyzeremotephases(pushop.repo, cheads,
834 834 remotephases)
835 835 pheads, droots = ana
836 836 ### Apply remote phase on local
837 837 if remotephases.get('publishing', False):
838 838 _localphasemove(pushop, cheads)
839 839 else: # publish = False
840 840 _localphasemove(pushop, pheads)
841 841 _localphasemove(pushop, cheads, phases.draft)
842 842 ### Apply local phase on remote
843 843
844 844 if pushop.cgresult:
845 845 if 'phases' in pushop.stepsdone:
846 846 # phases already pushed though bundle2
847 847 return
848 848 outdated = pushop.outdatedphases
849 849 else:
850 850 outdated = pushop.fallbackoutdatedphases
851 851
852 852 pushop.stepsdone.add('phases')
853 853
854 854 # filter heads already turned public by the push
855 855 outdated = [c for c in outdated if c.node() not in pheads]
856 856 # fallback to independent pushkey command
857 857 for newremotehead in outdated:
858 858 r = pushop.remote.pushkey('phases',
859 859 newremotehead.hex(),
860 860 str(phases.draft),
861 861 str(phases.public))
862 862 if not r:
863 863 pushop.ui.warn(_('updating %s to public failed!\n')
864 864 % newremotehead)
865 865
866 866 def _localphasemove(pushop, nodes, phase=phases.public):
867 867 """move <nodes> to <phase> in the local source repo"""
868 868 if pushop.trmanager:
869 869 phases.advanceboundary(pushop.repo,
870 870 pushop.trmanager.transaction(),
871 871 phase,
872 872 nodes)
873 873 else:
874 874 # repo is not locked, do not change any phases!
875 875 # Informs the user that phases should have been moved when
876 876 # applicable.
877 877 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
878 878 phasestr = phases.phasenames[phase]
879 879 if actualmoves:
880 880 pushop.ui.status(_('cannot lock source repo, skipping '
881 881 'local %s phase update\n') % phasestr)
882 882
883 883 def _pushobsolete(pushop):
884 884 """utility function to push obsolete markers to a remote"""
885 885 if 'obsmarkers' in pushop.stepsdone:
886 886 return
887 887 repo = pushop.repo
888 888 remote = pushop.remote
889 889 pushop.stepsdone.add('obsmarkers')
890 890 if pushop.outobsmarkers:
891 891 pushop.ui.debug('try to push obsolete markers to remote\n')
892 892 rslts = []
893 893 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
894 894 for key in sorted(remotedata, reverse=True):
895 895 # reverse sort to ensure we end with dump0
896 896 data = remotedata[key]
897 897 rslts.append(remote.pushkey('obsolete', key, '', data))
898 898 if [r for r in rslts if not r]:
899 899 msg = _('failed to push some obsolete markers!\n')
900 900 repo.ui.warn(msg)
901 901
902 902 def _pushbookmark(pushop):
903 903 """Update bookmark position on remote"""
904 904 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
905 905 return
906 906 pushop.stepsdone.add('bookmarks')
907 907 ui = pushop.ui
908 908 remote = pushop.remote
909 909
910 910 for b, old, new in pushop.outbookmarks:
911 911 action = 'update'
912 912 if not old:
913 913 action = 'export'
914 914 elif not new:
915 915 action = 'delete'
916 916 if remote.pushkey('bookmarks', b, old, new):
917 917 ui.status(bookmsgmap[action][0] % b)
918 918 else:
919 919 ui.warn(bookmsgmap[action][1] % b)
920 920 # discovery can have set the value form invalid entry
921 921 if pushop.bkresult is not None:
922 922 pushop.bkresult = 1
923 923
924 924 class pulloperation(object):
925 925 """A object that represent a single pull operation
926 926
927 927 It purpose is to carry pull related state and very common operation.
928 928
929 929 A new should be created at the beginning of each pull and discarded
930 930 afterward.
931 931 """
932 932
933 933 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
934 934 remotebookmarks=None, streamclonerequested=None):
935 935 # repo we pull into
936 936 self.repo = repo
937 937 # repo we pull from
938 938 self.remote = remote
939 939 # revision we try to pull (None is "all")
940 940 self.heads = heads
941 941 # bookmark pulled explicitly
942 942 self.explicitbookmarks = bookmarks
943 943 # do we force pull?
944 944 self.force = force
945 945 # whether a streaming clone was requested
946 946 self.streamclonerequested = streamclonerequested
947 947 # transaction manager
948 948 self.trmanager = None
949 949 # set of common changeset between local and remote before pull
950 950 self.common = None
951 951 # set of pulled head
952 952 self.rheads = None
953 953 # list of missing changeset to fetch remotely
954 954 self.fetch = None
955 955 # remote bookmarks data
956 956 self.remotebookmarks = remotebookmarks
957 957 # result of changegroup pulling (used as return code by pull)
958 958 self.cgresult = None
959 959 # list of step already done
960 960 self.stepsdone = set()
961 961 # Whether we attempted a clone from pre-generated bundles.
962 962 self.clonebundleattempted = False
963 963
964 964 @util.propertycache
965 965 def pulledsubset(self):
966 966 """heads of the set of changeset target by the pull"""
967 967 # compute target subset
968 968 if self.heads is None:
969 969 # We pulled every thing possible
970 970 # sync on everything common
971 971 c = set(self.common)
972 972 ret = list(self.common)
973 973 for n in self.rheads:
974 974 if n not in c:
975 975 ret.append(n)
976 976 return ret
977 977 else:
978 978 # We pulled a specific subset
979 979 # sync on this subset
980 980 return self.heads
981 981
982 982 @util.propertycache
983 983 def canusebundle2(self):
984 984 return _canusebundle2(self)
985 985
986 986 @util.propertycache
987 987 def remotebundle2caps(self):
988 988 return bundle2.bundle2caps(self.remote)
989 989
990 990 def gettransaction(self):
991 991 # deprecated; talk to trmanager directly
992 992 return self.trmanager.transaction()
993 993
994 994 class transactionmanager(object):
995 995 """An object to manage the life cycle of a transaction
996 996
997 997 It creates the transaction on demand and calls the appropriate hooks when
998 998 closing the transaction."""
999 999 def __init__(self, repo, source, url):
1000 1000 self.repo = repo
1001 1001 self.source = source
1002 1002 self.url = url
1003 1003 self._tr = None
1004 1004
1005 1005 def transaction(self):
1006 1006 """Return an open transaction object, constructing if necessary"""
1007 1007 if not self._tr:
1008 1008 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1009 1009 self._tr = self.repo.transaction(trname)
1010 1010 self._tr.hookargs['source'] = self.source
1011 1011 self._tr.hookargs['url'] = self.url
1012 1012 return self._tr
1013 1013
1014 1014 def close(self):
1015 1015 """close transaction if created"""
1016 1016 if self._tr is not None:
1017 1017 self._tr.close()
1018 1018
1019 1019 def release(self):
1020 1020 """release transaction if created"""
1021 1021 if self._tr is not None:
1022 1022 self._tr.release()
1023 1023
1024 1024 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1025 1025 streamclonerequested=None):
1026 1026 """Fetch repository data from a remote.
1027 1027
1028 1028 This is the main function used to retrieve data from a remote repository.
1029 1029
1030 1030 ``repo`` is the local repository to clone into.
1031 1031 ``remote`` is a peer instance.
1032 1032 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1033 1033 default) means to pull everything from the remote.
1034 1034 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1035 1035 default, all remote bookmarks are pulled.
1036 1036 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1037 1037 initialization.
1038 1038 ``streamclonerequested`` is a boolean indicating whether a "streaming
1039 1039 clone" is requested. A "streaming clone" is essentially a raw file copy
1040 1040 of revlogs from the server. This only works when the local repository is
1041 1041 empty. The default value of ``None`` means to respect the server
1042 1042 configuration for preferring stream clones.
1043 1043
1044 1044 Returns the ``pulloperation`` created for this pull.
1045 1045 """
1046 1046 if opargs is None:
1047 1047 opargs = {}
1048 1048 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1049 1049 streamclonerequested=streamclonerequested, **opargs)
1050 1050 if pullop.remote.local():
1051 1051 missing = set(pullop.remote.requirements) - pullop.repo.supported
1052 1052 if missing:
1053 1053 msg = _("required features are not"
1054 1054 " supported in the destination:"
1055 1055 " %s") % (', '.join(sorted(missing)))
1056 1056 raise error.Abort(msg)
1057 1057
1058 1058 lock = pullop.repo.lock()
1059 1059 try:
1060 1060 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1061 1061 streamclone.maybeperformlegacystreamclone(pullop)
1062 1062 # This should ideally be in _pullbundle2(). However, it needs to run
1063 1063 # before discovery to avoid extra work.
1064 1064 _maybeapplyclonebundle(pullop)
1065 1065 _pulldiscovery(pullop)
1066 1066 if pullop.canusebundle2:
1067 1067 _pullbundle2(pullop)
1068 1068 _pullchangeset(pullop)
1069 1069 _pullphase(pullop)
1070 1070 _pullbookmarks(pullop)
1071 1071 _pullobsolete(pullop)
1072 1072 pullop.trmanager.close()
1073 1073 finally:
1074 1074 pullop.trmanager.release()
1075 1075 lock.release()
1076 1076
1077 1077 return pullop
1078 1078
1079 1079 # list of steps to perform discovery before pull
1080 1080 pulldiscoveryorder = []
1081 1081
1082 1082 # Mapping between step name and function
1083 1083 #
1084 1084 # This exists to help extensions wrap steps if necessary
1085 1085 pulldiscoverymapping = {}
1086 1086
1087 1087 def pulldiscovery(stepname):
1088 1088 """decorator for function performing discovery before pull
1089 1089
1090 1090 The function is added to the step -> function mapping and appended to the
1091 1091 list of steps. Beware that decorated function will be added in order (this
1092 1092 may matter).
1093 1093
1094 1094 You can only use this decorator for a new step, if you want to wrap a step
1095 1095 from an extension, change the pulldiscovery dictionary directly."""
1096 1096 def dec(func):
1097 1097 assert stepname not in pulldiscoverymapping
1098 1098 pulldiscoverymapping[stepname] = func
1099 1099 pulldiscoveryorder.append(stepname)
1100 1100 return func
1101 1101 return dec
1102 1102
1103 1103 def _pulldiscovery(pullop):
1104 1104 """Run all discovery steps"""
1105 1105 for stepname in pulldiscoveryorder:
1106 1106 step = pulldiscoverymapping[stepname]
1107 1107 step(pullop)
1108 1108
1109 1109 @pulldiscovery('b1:bookmarks')
1110 1110 def _pullbookmarkbundle1(pullop):
1111 1111 """fetch bookmark data in bundle1 case
1112 1112
1113 1113 If not using bundle2, we have to fetch bookmarks before changeset
1114 1114 discovery to reduce the chance and impact of race conditions."""
1115 1115 if pullop.remotebookmarks is not None:
1116 1116 return
1117 1117 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1118 1118 # all known bundle2 servers now support listkeys, but lets be nice with
1119 1119 # new implementation.
1120 1120 return
1121 1121 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1122 1122
1123 1123
1124 1124 @pulldiscovery('changegroup')
1125 1125 def _pulldiscoverychangegroup(pullop):
1126 1126 """discovery phase for the pull
1127 1127
1128 1128 Current handle changeset discovery only, will change handle all discovery
1129 1129 at some point."""
1130 1130 tmp = discovery.findcommonincoming(pullop.repo,
1131 1131 pullop.remote,
1132 1132 heads=pullop.heads,
1133 1133 force=pullop.force)
1134 1134 common, fetch, rheads = tmp
1135 1135 nm = pullop.repo.unfiltered().changelog.nodemap
1136 1136 if fetch and rheads:
1137 1137 # If a remote heads in filtered locally, lets drop it from the unknown
1138 1138 # remote heads and put in back in common.
1139 1139 #
1140 1140 # This is a hackish solution to catch most of "common but locally
1141 1141 # hidden situation". We do not performs discovery on unfiltered
1142 1142 # repository because it end up doing a pathological amount of round
1143 1143 # trip for w huge amount of changeset we do not care about.
1144 1144 #
1145 1145 # If a set of such "common but filtered" changeset exist on the server
1146 1146 # but are not including a remote heads, we'll not be able to detect it,
1147 1147 scommon = set(common)
1148 1148 filteredrheads = []
1149 1149 for n in rheads:
1150 1150 if n in nm:
1151 1151 if n not in scommon:
1152 1152 common.append(n)
1153 1153 else:
1154 1154 filteredrheads.append(n)
1155 1155 if not filteredrheads:
1156 1156 fetch = []
1157 1157 rheads = filteredrheads
1158 1158 pullop.common = common
1159 1159 pullop.fetch = fetch
1160 1160 pullop.rheads = rheads
1161 1161
1162 1162 def _pullbundle2(pullop):
1163 1163 """pull data using bundle2
1164 1164
1165 1165 For now, the only supported data are changegroup."""
1166 1166 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1167 1167
1168 1168 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1169 1169
1170 1170 # pulling changegroup
1171 1171 pullop.stepsdone.add('changegroup')
1172 1172
1173 1173 kwargs['common'] = pullop.common
1174 1174 kwargs['heads'] = pullop.heads or pullop.rheads
1175 1175 kwargs['cg'] = pullop.fetch
1176 1176 if 'listkeys' in pullop.remotebundle2caps:
1177 1177 kwargs['listkeys'] = ['phase']
1178 1178 if pullop.remotebookmarks is None:
1179 1179 # make sure to always includes bookmark data when migrating
1180 1180 # `hg incoming --bundle` to using this function.
1181 1181 kwargs['listkeys'].append('bookmarks')
1182 1182
1183 1183 # If this is a full pull / clone and the server supports the clone bundles
1184 1184 # feature, tell the server whether we attempted a clone bundle. The
1185 1185 # presence of this flag indicates the client supports clone bundles. This
1186 1186 # will enable the server to treat clients that support clone bundles
1187 1187 # differently from those that don't.
1188 1188 if (pullop.remote.capable('clonebundles')
1189 1189 and pullop.heads is None and list(pullop.common) == [nullid]):
1190 1190 kwargs['cbattempted'] = pullop.clonebundleattempted
1191 1191
1192 1192 if streaming:
1193 1193 pullop.repo.ui.status(_('streaming all changes\n'))
1194 1194 elif not pullop.fetch:
1195 1195 pullop.repo.ui.status(_("no changes found\n"))
1196 1196 pullop.cgresult = 0
1197 1197 else:
1198 1198 if pullop.heads is None and list(pullop.common) == [nullid]:
1199 1199 pullop.repo.ui.status(_("requesting all changes\n"))
1200 1200 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1201 1201 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1202 1202 if obsolete.commonversion(remoteversions) is not None:
1203 1203 kwargs['obsmarkers'] = True
1204 1204 pullop.stepsdone.add('obsmarkers')
1205 1205 _pullbundle2extraprepare(pullop, kwargs)
1206 1206 bundle = pullop.remote.getbundle('pull', **kwargs)
1207 1207 try:
1208 1208 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1209 1209 except error.BundleValueError as exc:
1210 1210 raise error.Abort('missing support for %s' % exc)
1211 1211
1212 1212 if pullop.fetch:
1213 1213 results = [cg['return'] for cg in op.records['changegroup']]
1214 1214 pullop.cgresult = changegroup.combineresults(results)
1215 1215
1216 1216 # processing phases change
1217 1217 for namespace, value in op.records['listkeys']:
1218 1218 if namespace == 'phases':
1219 1219 _pullapplyphases(pullop, value)
1220 1220
1221 1221 # processing bookmark update
1222 1222 for namespace, value in op.records['listkeys']:
1223 1223 if namespace == 'bookmarks':
1224 1224 pullop.remotebookmarks = value
1225 1225
1226 1226 # bookmark data were either already there or pulled in the bundle
1227 1227 if pullop.remotebookmarks is not None:
1228 1228 _pullbookmarks(pullop)
1229 1229
1230 1230 def _pullbundle2extraprepare(pullop, kwargs):
1231 1231 """hook function so that extensions can extend the getbundle call"""
1232 1232 pass
1233 1233
1234 1234 def _pullchangeset(pullop):
1235 1235 """pull changeset from unbundle into the local repo"""
1236 1236 # We delay the open of the transaction as late as possible so we
1237 1237 # don't open transaction for nothing or you break future useful
1238 1238 # rollback call
1239 1239 if 'changegroup' in pullop.stepsdone:
1240 1240 return
1241 1241 pullop.stepsdone.add('changegroup')
1242 1242 if not pullop.fetch:
1243 1243 pullop.repo.ui.status(_("no changes found\n"))
1244 1244 pullop.cgresult = 0
1245 1245 return
1246 1246 pullop.gettransaction()
1247 1247 if pullop.heads is None and list(pullop.common) == [nullid]:
1248 1248 pullop.repo.ui.status(_("requesting all changes\n"))
1249 1249 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1250 1250 # issue1320, avoid a race if remote changed after discovery
1251 1251 pullop.heads = pullop.rheads
1252 1252
1253 1253 if pullop.remote.capable('getbundle'):
1254 1254 # TODO: get bundlecaps from remote
1255 1255 cg = pullop.remote.getbundle('pull', common=pullop.common,
1256 1256 heads=pullop.heads or pullop.rheads)
1257 1257 elif pullop.heads is None:
1258 1258 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1259 1259 elif not pullop.remote.capable('changegroupsubset'):
1260 1260 raise error.Abort(_("partial pull cannot be done because "
1261 1261 "other repository doesn't support "
1262 1262 "changegroupsubset."))
1263 1263 else:
1264 1264 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1265 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1266 pullop.remote.url())
1265 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1267 1266
1268 1267 def _pullphase(pullop):
1269 1268 # Get remote phases data from remote
1270 1269 if 'phases' in pullop.stepsdone:
1271 1270 return
1272 1271 remotephases = pullop.remote.listkeys('phases')
1273 1272 _pullapplyphases(pullop, remotephases)
1274 1273
1275 1274 def _pullapplyphases(pullop, remotephases):
1276 1275 """apply phase movement from observed remote state"""
1277 1276 if 'phases' in pullop.stepsdone:
1278 1277 return
1279 1278 pullop.stepsdone.add('phases')
1280 1279 publishing = bool(remotephases.get('publishing', False))
1281 1280 if remotephases and not publishing:
1282 1281 # remote is new and unpublishing
1283 1282 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1284 1283 pullop.pulledsubset,
1285 1284 remotephases)
1286 1285 dheads = pullop.pulledsubset
1287 1286 else:
1288 1287 # Remote is old or publishing all common changesets
1289 1288 # should be seen as public
1290 1289 pheads = pullop.pulledsubset
1291 1290 dheads = []
1292 1291 unfi = pullop.repo.unfiltered()
1293 1292 phase = unfi._phasecache.phase
1294 1293 rev = unfi.changelog.nodemap.get
1295 1294 public = phases.public
1296 1295 draft = phases.draft
1297 1296
1298 1297 # exclude changesets already public locally and update the others
1299 1298 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1300 1299 if pheads:
1301 1300 tr = pullop.gettransaction()
1302 1301 phases.advanceboundary(pullop.repo, tr, public, pheads)
1303 1302
1304 1303 # exclude changesets already draft locally and update the others
1305 1304 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1306 1305 if dheads:
1307 1306 tr = pullop.gettransaction()
1308 1307 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1309 1308
1310 1309 def _pullbookmarks(pullop):
1311 1310 """process the remote bookmark information to update the local one"""
1312 1311 if 'bookmarks' in pullop.stepsdone:
1313 1312 return
1314 1313 pullop.stepsdone.add('bookmarks')
1315 1314 repo = pullop.repo
1316 1315 remotebookmarks = pullop.remotebookmarks
1317 1316 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1318 1317 pullop.remote.url(),
1319 1318 pullop.gettransaction,
1320 1319 explicit=pullop.explicitbookmarks)
1321 1320
1322 1321 def _pullobsolete(pullop):
1323 1322 """utility function to pull obsolete markers from a remote
1324 1323
1325 1324 The `gettransaction` is function that return the pull transaction, creating
1326 1325 one if necessary. We return the transaction to inform the calling code that
1327 1326 a new transaction have been created (when applicable).
1328 1327
1329 1328 Exists mostly to allow overriding for experimentation purpose"""
1330 1329 if 'obsmarkers' in pullop.stepsdone:
1331 1330 return
1332 1331 pullop.stepsdone.add('obsmarkers')
1333 1332 tr = None
1334 1333 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1335 1334 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1336 1335 remoteobs = pullop.remote.listkeys('obsolete')
1337 1336 if 'dump0' in remoteobs:
1338 1337 tr = pullop.gettransaction()
1339 1338 for key in sorted(remoteobs, reverse=True):
1340 1339 if key.startswith('dump'):
1341 1340 data = base85.b85decode(remoteobs[key])
1342 1341 pullop.repo.obsstore.mergemarkers(tr, data)
1343 1342 pullop.repo.invalidatevolatilesets()
1344 1343 return tr
1345 1344
1346 1345 def caps20to10(repo):
1347 1346 """return a set with appropriate options to use bundle20 during getbundle"""
1348 1347 caps = set(['HG20'])
1349 1348 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1350 1349 caps.add('bundle2=' + urllib.quote(capsblob))
1351 1350 return caps
1352 1351
1353 1352 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1354 1353 getbundle2partsorder = []
1355 1354
1356 1355 # Mapping between step name and function
1357 1356 #
1358 1357 # This exists to help extensions wrap steps if necessary
1359 1358 getbundle2partsmapping = {}
1360 1359
1361 1360 def getbundle2partsgenerator(stepname, idx=None):
1362 1361 """decorator for function generating bundle2 part for getbundle
1363 1362
1364 1363 The function is added to the step -> function mapping and appended to the
1365 1364 list of steps. Beware that decorated functions will be added in order
1366 1365 (this may matter).
1367 1366
1368 1367 You can only use this decorator for new steps, if you want to wrap a step
1369 1368 from an extension, attack the getbundle2partsmapping dictionary directly."""
1370 1369 def dec(func):
1371 1370 assert stepname not in getbundle2partsmapping
1372 1371 getbundle2partsmapping[stepname] = func
1373 1372 if idx is None:
1374 1373 getbundle2partsorder.append(stepname)
1375 1374 else:
1376 1375 getbundle2partsorder.insert(idx, stepname)
1377 1376 return func
1378 1377 return dec
1379 1378
1380 1379 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1381 1380 **kwargs):
1382 1381 """return a full bundle (with potentially multiple kind of parts)
1383 1382
1384 1383 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1385 1384 passed. For now, the bundle can contain only changegroup, but this will
1386 1385 changes when more part type will be available for bundle2.
1387 1386
1388 1387 This is different from changegroup.getchangegroup that only returns an HG10
1389 1388 changegroup bundle. They may eventually get reunited in the future when we
1390 1389 have a clearer idea of the API we what to query different data.
1391 1390
1392 1391 The implementation is at a very early stage and will get massive rework
1393 1392 when the API of bundle is refined.
1394 1393 """
1395 1394 # bundle10 case
1396 1395 usebundle2 = False
1397 1396 if bundlecaps is not None:
1398 1397 usebundle2 = any((cap.startswith('HG2') for cap in bundlecaps))
1399 1398 if not usebundle2:
1400 1399 if bundlecaps and not kwargs.get('cg', True):
1401 1400 raise ValueError(_('request for bundle10 must include changegroup'))
1402 1401
1403 1402 if kwargs:
1404 1403 raise ValueError(_('unsupported getbundle arguments: %s')
1405 1404 % ', '.join(sorted(kwargs.keys())))
1406 1405 return changegroup.getchangegroup(repo, source, heads=heads,
1407 1406 common=common, bundlecaps=bundlecaps)
1408 1407
1409 1408 # bundle20 case
1410 1409 b2caps = {}
1411 1410 for bcaps in bundlecaps:
1412 1411 if bcaps.startswith('bundle2='):
1413 1412 blob = urllib.unquote(bcaps[len('bundle2='):])
1414 1413 b2caps.update(bundle2.decodecaps(blob))
1415 1414 bundler = bundle2.bundle20(repo.ui, b2caps)
1416 1415
1417 1416 kwargs['heads'] = heads
1418 1417 kwargs['common'] = common
1419 1418
1420 1419 for name in getbundle2partsorder:
1421 1420 func = getbundle2partsmapping[name]
1422 1421 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1423 1422 **kwargs)
1424 1423
1425 1424 return util.chunkbuffer(bundler.getchunks())
1426 1425
1427 1426 @getbundle2partsgenerator('changegroup')
1428 1427 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1429 1428 b2caps=None, heads=None, common=None, **kwargs):
1430 1429 """add a changegroup part to the requested bundle"""
1431 1430 cg = None
1432 1431 if kwargs.get('cg', True):
1433 1432 # build changegroup bundle here.
1434 1433 version = None
1435 1434 cgversions = b2caps.get('changegroup')
1436 1435 getcgkwargs = {}
1437 1436 if cgversions: # 3.1 and 3.2 ship with an empty value
1438 1437 cgversions = [v for v in cgversions if v in changegroup.packermap]
1439 1438 if not cgversions:
1440 1439 raise ValueError(_('no common changegroup version'))
1441 1440 version = getcgkwargs['version'] = max(cgversions)
1442 1441 outgoing = changegroup.computeoutgoing(repo, heads, common)
1443 1442 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1444 1443 bundlecaps=bundlecaps,
1445 1444 **getcgkwargs)
1446 1445
1447 1446 if cg:
1448 1447 part = bundler.newpart('changegroup', data=cg)
1449 1448 if version is not None:
1450 1449 part.addparam('version', version)
1451 1450 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1452 1451
1453 1452 @getbundle2partsgenerator('listkeys')
1454 1453 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1455 1454 b2caps=None, **kwargs):
1456 1455 """add parts containing listkeys namespaces to the requested bundle"""
1457 1456 listkeys = kwargs.get('listkeys', ())
1458 1457 for namespace in listkeys:
1459 1458 part = bundler.newpart('listkeys')
1460 1459 part.addparam('namespace', namespace)
1461 1460 keys = repo.listkeys(namespace).items()
1462 1461 part.data = pushkey.encodekeys(keys)
1463 1462
1464 1463 @getbundle2partsgenerator('obsmarkers')
1465 1464 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1466 1465 b2caps=None, heads=None, **kwargs):
1467 1466 """add an obsolescence markers part to the requested bundle"""
1468 1467 if kwargs.get('obsmarkers', False):
1469 1468 if heads is None:
1470 1469 heads = repo.heads()
1471 1470 subset = [c.node() for c in repo.set('::%ln', heads)]
1472 1471 markers = repo.obsstore.relevantmarkers(subset)
1473 1472 markers = sorted(markers)
1474 1473 buildobsmarkerspart(bundler, markers)
1475 1474
1476 1475 @getbundle2partsgenerator('hgtagsfnodes')
1477 1476 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1478 1477 b2caps=None, heads=None, common=None,
1479 1478 **kwargs):
1480 1479 """Transfer the .hgtags filenodes mapping.
1481 1480
1482 1481 Only values for heads in this bundle will be transferred.
1483 1482
1484 1483 The part data consists of pairs of 20 byte changeset node and .hgtags
1485 1484 filenodes raw values.
1486 1485 """
1487 1486 # Don't send unless:
1488 1487 # - changeset are being exchanged,
1489 1488 # - the client supports it.
1490 1489 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1491 1490 return
1492 1491
1493 1492 outgoing = changegroup.computeoutgoing(repo, heads, common)
1494 1493
1495 1494 if not outgoing.missingheads:
1496 1495 return
1497 1496
1498 1497 cache = tags.hgtagsfnodescache(repo.unfiltered())
1499 1498 chunks = []
1500 1499
1501 1500 # .hgtags fnodes are only relevant for head changesets. While we could
1502 1501 # transfer values for all known nodes, there will likely be little to
1503 1502 # no benefit.
1504 1503 #
1505 1504 # We don't bother using a generator to produce output data because
1506 1505 # a) we only have 40 bytes per head and even esoteric numbers of heads
1507 1506 # consume little memory (1M heads is 40MB) b) we don't want to send the
1508 1507 # part if we don't have entries and knowing if we have entries requires
1509 1508 # cache lookups.
1510 1509 for node in outgoing.missingheads:
1511 1510 # Don't compute missing, as this may slow down serving.
1512 1511 fnode = cache.getfnode(node, computemissing=False)
1513 1512 if fnode is not None:
1514 1513 chunks.extend([node, fnode])
1515 1514
1516 1515 if chunks:
1517 1516 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1518 1517
1519 1518 def check_heads(repo, their_heads, context):
1520 1519 """check if the heads of a repo have been modified
1521 1520
1522 1521 Used by peer for unbundling.
1523 1522 """
1524 1523 heads = repo.heads()
1525 1524 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1526 1525 if not (their_heads == ['force'] or their_heads == heads or
1527 1526 their_heads == ['hashed', heads_hash]):
1528 1527 # someone else committed/pushed/unbundled while we
1529 1528 # were transferring data
1530 1529 raise error.PushRaced('repository changed while %s - '
1531 1530 'please try again' % context)
1532 1531
1533 1532 def unbundle(repo, cg, heads, source, url):
1534 1533 """Apply a bundle to a repo.
1535 1534
1536 1535 this function makes sure the repo is locked during the application and have
1537 1536 mechanism to check that no push race occurred between the creation of the
1538 1537 bundle and its application.
1539 1538
1540 1539 If the push was raced as PushRaced exception is raised."""
1541 1540 r = 0
1542 1541 # need a transaction when processing a bundle2 stream
1543 1542 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1544 1543 lockandtr = [None, None, None]
1545 1544 recordout = None
1546 1545 # quick fix for output mismatch with bundle2 in 3.4
1547 1546 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1548 1547 False)
1549 1548 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1550 1549 captureoutput = True
1551 1550 try:
1552 1551 check_heads(repo, heads, 'uploading changes')
1553 1552 # push can proceed
1554 1553 if util.safehasattr(cg, 'params'):
1555 1554 r = None
1556 1555 try:
1557 1556 def gettransaction():
1558 1557 if not lockandtr[2]:
1559 1558 lockandtr[0] = repo.wlock()
1560 1559 lockandtr[1] = repo.lock()
1561 1560 lockandtr[2] = repo.transaction(source)
1562 1561 lockandtr[2].hookargs['source'] = source
1563 1562 lockandtr[2].hookargs['url'] = url
1564 1563 lockandtr[2].hookargs['bundle2'] = '1'
1565 1564 return lockandtr[2]
1566 1565
1567 1566 # Do greedy locking by default until we're satisfied with lazy
1568 1567 # locking.
1569 1568 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1570 1569 gettransaction()
1571 1570
1572 1571 op = bundle2.bundleoperation(repo, gettransaction,
1573 1572 captureoutput=captureoutput)
1574 1573 try:
1575 1574 op = bundle2.processbundle(repo, cg, op=op)
1576 1575 finally:
1577 1576 r = op.reply
1578 1577 if captureoutput and r is not None:
1579 1578 repo.ui.pushbuffer(error=True, subproc=True)
1580 1579 def recordout(output):
1581 1580 r.newpart('output', data=output, mandatory=False)
1582 1581 if lockandtr[2] is not None:
1583 1582 lockandtr[2].close()
1584 1583 except BaseException as exc:
1585 1584 exc.duringunbundle2 = True
1586 1585 if captureoutput and r is not None:
1587 1586 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1588 1587 def recordout(output):
1589 1588 part = bundle2.bundlepart('output', data=output,
1590 1589 mandatory=False)
1591 1590 parts.append(part)
1592 1591 raise
1593 1592 else:
1594 1593 lockandtr[1] = repo.lock()
1595 r = changegroup.addchangegroup(repo, cg, source, url)
1594 r = cg.apply(repo, source, url)
1596 1595 finally:
1597 1596 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1598 1597 if recordout is not None:
1599 1598 recordout(repo.ui.popbuffer())
1600 1599 return r
1601 1600
1602 1601 def _maybeapplyclonebundle(pullop):
1603 1602 """Apply a clone bundle from a remote, if possible."""
1604 1603
1605 1604 repo = pullop.repo
1606 1605 remote = pullop.remote
1607 1606
1608 1607 if not repo.ui.configbool('experimental', 'clonebundles', False):
1609 1608 return
1610 1609
1611 1610 if pullop.heads:
1612 1611 return
1613 1612
1614 1613 if not remote.capable('clonebundles'):
1615 1614 return
1616 1615
1617 1616 res = remote._call('clonebundles')
1618 1617
1619 1618 # If we call the wire protocol command, that's good enough to record the
1620 1619 # attempt.
1621 1620 pullop.clonebundleattempted = True
1622 1621
1623 1622 entries = parseclonebundlesmanifest(repo, res)
1624 1623 if not entries:
1625 1624 repo.ui.note(_('no clone bundles available on remote; '
1626 1625 'falling back to regular clone\n'))
1627 1626 return
1628 1627
1629 1628 entries = filterclonebundleentries(repo, entries)
1630 1629 if not entries:
1631 1630 # There is a thundering herd concern here. However, if a server
1632 1631 # operator doesn't advertise bundles appropriate for its clients,
1633 1632 # they deserve what's coming. Furthermore, from a client's
1634 1633 # perspective, no automatic fallback would mean not being able to
1635 1634 # clone!
1636 1635 repo.ui.warn(_('no compatible clone bundles available on server; '
1637 1636 'falling back to regular clone\n'))
1638 1637 repo.ui.warn(_('(you may want to report this to the server '
1639 1638 'operator)\n'))
1640 1639 return
1641 1640
1642 1641 entries = sortclonebundleentries(repo.ui, entries)
1643 1642
1644 1643 url = entries[0]['URL']
1645 1644 repo.ui.status(_('applying clone bundle from %s\n') % url)
1646 1645 if trypullbundlefromurl(repo.ui, repo, url):
1647 1646 repo.ui.status(_('finished applying clone bundle\n'))
1648 1647 # Bundle failed.
1649 1648 #
1650 1649 # We abort by default to avoid the thundering herd of
1651 1650 # clients flooding a server that was expecting expensive
1652 1651 # clone load to be offloaded.
1653 1652 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1654 1653 repo.ui.warn(_('falling back to normal clone\n'))
1655 1654 else:
1656 1655 raise error.Abort(_('error applying bundle'),
1657 1656 hint=_('if this error persists, consider contacting '
1658 1657 'the server operator or disable clone '
1659 1658 'bundles via '
1660 1659 '"--config experimental.clonebundles=false"'))
1661 1660
1662 1661 def parseclonebundlesmanifest(repo, s):
1663 1662 """Parses the raw text of a clone bundles manifest.
1664 1663
1665 1664 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1666 1665 to the URL and other keys are the attributes for the entry.
1667 1666 """
1668 1667 m = []
1669 1668 for line in s.splitlines():
1670 1669 fields = line.split()
1671 1670 if not fields:
1672 1671 continue
1673 1672 attrs = {'URL': fields[0]}
1674 1673 for rawattr in fields[1:]:
1675 1674 key, value = rawattr.split('=', 1)
1676 1675 key = urllib.unquote(key)
1677 1676 value = urllib.unquote(value)
1678 1677 attrs[key] = value
1679 1678
1680 1679 # Parse BUNDLESPEC into components. This makes client-side
1681 1680 # preferences easier to specify since you can prefer a single
1682 1681 # component of the BUNDLESPEC.
1683 1682 if key == 'BUNDLESPEC':
1684 1683 try:
1685 1684 comp, version = parsebundlespec(repo, value,
1686 1685 externalnames=True)
1687 1686 attrs['COMPRESSION'] = comp
1688 1687 attrs['VERSION'] = version
1689 1688 except error.InvalidBundleSpecification:
1690 1689 pass
1691 1690 except error.UnsupportedBundleSpecification:
1692 1691 pass
1693 1692
1694 1693 m.append(attrs)
1695 1694
1696 1695 return m
1697 1696
1698 1697 def filterclonebundleentries(repo, entries):
1699 1698 """Remove incompatible clone bundle manifest entries.
1700 1699
1701 1700 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1702 1701 and returns a new list consisting of only the entries that this client
1703 1702 should be able to apply.
1704 1703
1705 1704 There is no guarantee we'll be able to apply all returned entries because
1706 1705 the metadata we use to filter on may be missing or wrong.
1707 1706 """
1708 1707 newentries = []
1709 1708 for entry in entries:
1710 1709 spec = entry.get('BUNDLESPEC')
1711 1710 if spec:
1712 1711 try:
1713 1712 parsebundlespec(repo, spec, strict=True)
1714 1713 except error.InvalidBundleSpecification as e:
1715 1714 repo.ui.debug(str(e) + '\n')
1716 1715 continue
1717 1716 except error.UnsupportedBundleSpecification as e:
1718 1717 repo.ui.debug('filtering %s because unsupported bundle '
1719 1718 'spec: %s\n' % (entry['URL'], str(e)))
1720 1719 continue
1721 1720
1722 1721 if 'REQUIRESNI' in entry and not sslutil.hassni:
1723 1722 repo.ui.debug('filtering %s because SNI not supported\n' %
1724 1723 entry['URL'])
1725 1724 continue
1726 1725
1727 1726 newentries.append(entry)
1728 1727
1729 1728 return newentries
1730 1729
1731 1730 def sortclonebundleentries(ui, entries):
1732 1731 # experimental config: experimental.clonebundleprefers
1733 1732 prefers = ui.configlist('experimental', 'clonebundleprefers', default=[])
1734 1733 if not prefers:
1735 1734 return list(entries)
1736 1735
1737 1736 prefers = [p.split('=', 1) for p in prefers]
1738 1737
1739 1738 # Our sort function.
1740 1739 def compareentry(a, b):
1741 1740 for prefkey, prefvalue in prefers:
1742 1741 avalue = a.get(prefkey)
1743 1742 bvalue = b.get(prefkey)
1744 1743
1745 1744 # Special case for b missing attribute and a matches exactly.
1746 1745 if avalue is not None and bvalue is None and avalue == prefvalue:
1747 1746 return -1
1748 1747
1749 1748 # Special case for a missing attribute and b matches exactly.
1750 1749 if bvalue is not None and avalue is None and bvalue == prefvalue:
1751 1750 return 1
1752 1751
1753 1752 # We can't compare unless attribute present on both.
1754 1753 if avalue is None or bvalue is None:
1755 1754 continue
1756 1755
1757 1756 # Same values should fall back to next attribute.
1758 1757 if avalue == bvalue:
1759 1758 continue
1760 1759
1761 1760 # Exact matches come first.
1762 1761 if avalue == prefvalue:
1763 1762 return -1
1764 1763 if bvalue == prefvalue:
1765 1764 return 1
1766 1765
1767 1766 # Fall back to next attribute.
1768 1767 continue
1769 1768
1770 1769 # If we got here we couldn't sort by attributes and prefers. Fall
1771 1770 # back to index order.
1772 1771 return 0
1773 1772
1774 1773 return sorted(entries, cmp=compareentry)
1775 1774
1776 1775 def trypullbundlefromurl(ui, repo, url):
1777 1776 """Attempt to apply a bundle from a URL."""
1778 1777 lock = repo.lock()
1779 1778 try:
1780 1779 tr = repo.transaction('bundleurl')
1781 1780 try:
1782 1781 try:
1783 1782 fh = urlmod.open(ui, url)
1784 1783 cg = readbundle(ui, fh, 'stream')
1785 1784
1786 1785 if isinstance(cg, bundle2.unbundle20):
1787 1786 bundle2.processbundle(repo, cg, lambda: tr)
1788 1787 else:
1789 changegroup.addchangegroup(repo, cg, 'clonebundles', url)
1788 cg.apply(repo, 'clonebundles', url)
1790 1789 tr.close()
1791 1790 return True
1792 1791 except urllib2.HTTPError as e:
1793 1792 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1794 1793 except urllib2.URLError as e:
1795 1794 ui.warn(_('error fetching bundle: %s\n') % e.reason)
1796 1795
1797 1796 return False
1798 1797 finally:
1799 1798 tr.release()
1800 1799 finally:
1801 1800 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now