##// END OF EJS Templates
exchange: standalone function to determine if bundle2 is requested...
Gregory Szorc -
r27244:709977a4 default
parent child Browse files
Show More
@@ -1,1851 +1,1854 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import hex, nullid
10 10 import errno, urllib, urllib2
11 11 import util, scmutil, changegroup, base85, error
12 12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13 13 import lock as lockmod
14 14 import streamclone
15 15 import sslutil
16 16 import tags
17 17 import url as urlmod
18 18
19 19 # Maps bundle compression human names to internal representation.
20 20 _bundlespeccompressions = {'none': None,
21 21 'bzip2': 'BZ',
22 22 'gzip': 'GZ',
23 23 }
24 24
25 25 # Maps bundle version human names to changegroup versions.
26 26 _bundlespeccgversions = {'v1': '01',
27 27 'v2': '02',
28 28 'packed1': 's1',
29 29 'bundle2': '02', #legacy
30 30 }
31 31
32 32 def parsebundlespec(repo, spec, strict=True, externalnames=False):
33 33 """Parse a bundle string specification into parts.
34 34
35 35 Bundle specifications denote a well-defined bundle/exchange format.
36 36 The content of a given specification should not change over time in
37 37 order to ensure that bundles produced by a newer version of Mercurial are
38 38 readable from an older version.
39 39
40 40 The string currently has the form:
41 41
42 42 <compression>-<type>[;<parameter0>[;<parameter1>]]
43 43
44 44 Where <compression> is one of the supported compression formats
45 45 and <type> is (currently) a version string. A ";" can follow the type and
46 46 all text afterwards is interpretted as URI encoded, ";" delimited key=value
47 47 pairs.
48 48
49 49 If ``strict`` is True (the default) <compression> is required. Otherwise,
50 50 it is optional.
51 51
52 52 If ``externalnames`` is False (the default), the human-centric names will
53 53 be converted to their internal representation.
54 54
55 55 Returns a 3-tuple of (compression, version, parameters). Compression will
56 56 be ``None`` if not in strict mode and a compression isn't defined.
57 57
58 58 An ``InvalidBundleSpecification`` is raised when the specification is
59 59 not syntactically well formed.
60 60
61 61 An ``UnsupportedBundleSpecification`` is raised when the compression or
62 62 bundle type/version is not recognized.
63 63
64 64 Note: this function will likely eventually return a more complex data
65 65 structure, including bundle2 part information.
66 66 """
67 67 def parseparams(s):
68 68 if ';' not in s:
69 69 return s, {}
70 70
71 71 params = {}
72 72 version, paramstr = s.split(';', 1)
73 73
74 74 for p in paramstr.split(';'):
75 75 if '=' not in p:
76 76 raise error.InvalidBundleSpecification(
77 77 _('invalid bundle specification: '
78 78 'missing "=" in parameter: %s') % p)
79 79
80 80 key, value = p.split('=', 1)
81 81 key = urllib.unquote(key)
82 82 value = urllib.unquote(value)
83 83 params[key] = value
84 84
85 85 return version, params
86 86
87 87
88 88 if strict and '-' not in spec:
89 89 raise error.InvalidBundleSpecification(
90 90 _('invalid bundle specification; '
91 91 'must be prefixed with compression: %s') % spec)
92 92
93 93 if '-' in spec:
94 94 compression, version = spec.split('-', 1)
95 95
96 96 if compression not in _bundlespeccompressions:
97 97 raise error.UnsupportedBundleSpecification(
98 98 _('%s compression is not supported') % compression)
99 99
100 100 version, params = parseparams(version)
101 101
102 102 if version not in _bundlespeccgversions:
103 103 raise error.UnsupportedBundleSpecification(
104 104 _('%s is not a recognized bundle version') % version)
105 105 else:
106 106 # Value could be just the compression or just the version, in which
107 107 # case some defaults are assumed (but only when not in strict mode).
108 108 assert not strict
109 109
110 110 spec, params = parseparams(spec)
111 111
112 112 if spec in _bundlespeccompressions:
113 113 compression = spec
114 114 version = 'v1'
115 115 if 'generaldelta' in repo.requirements:
116 116 version = 'v2'
117 117 elif spec in _bundlespeccgversions:
118 118 if spec == 'packed1':
119 119 compression = 'none'
120 120 else:
121 121 compression = 'bzip2'
122 122 version = spec
123 123 else:
124 124 raise error.UnsupportedBundleSpecification(
125 125 _('%s is not a recognized bundle specification') % spec)
126 126
127 127 # The specification for packed1 can optionally declare the data formats
128 128 # required to apply it. If we see this metadata, compare against what the
129 129 # repo supports and error if the bundle isn't compatible.
130 130 if version == 'packed1' and 'requirements' in params:
131 131 requirements = set(params['requirements'].split(','))
132 132 missingreqs = requirements - repo.supportedformats
133 133 if missingreqs:
134 134 raise error.UnsupportedBundleSpecification(
135 135 _('missing support for repository features: %s') %
136 136 ', '.join(sorted(missingreqs)))
137 137
138 138 if not externalnames:
139 139 compression = _bundlespeccompressions[compression]
140 140 version = _bundlespeccgversions[version]
141 141 return compression, version, params
142 142
143 143 def readbundle(ui, fh, fname, vfs=None):
144 144 header = changegroup.readexactly(fh, 4)
145 145
146 146 alg = None
147 147 if not fname:
148 148 fname = "stream"
149 149 if not header.startswith('HG') and header.startswith('\0'):
150 150 fh = changegroup.headerlessfixup(fh, header)
151 151 header = "HG10"
152 152 alg = 'UN'
153 153 elif vfs:
154 154 fname = vfs.join(fname)
155 155
156 156 magic, version = header[0:2], header[2:4]
157 157
158 158 if magic != 'HG':
159 159 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
160 160 if version == '10':
161 161 if alg is None:
162 162 alg = changegroup.readexactly(fh, 2)
163 163 return changegroup.cg1unpacker(fh, alg)
164 164 elif version.startswith('2'):
165 165 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
166 166 elif version == 'S1':
167 167 return streamclone.streamcloneapplier(fh)
168 168 else:
169 169 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
170 170
171 171 def buildobsmarkerspart(bundler, markers):
172 172 """add an obsmarker part to the bundler with <markers>
173 173
174 174 No part is created if markers is empty.
175 175 Raises ValueError if the bundler doesn't support any known obsmarker format.
176 176 """
177 177 if markers:
178 178 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
179 179 version = obsolete.commonversion(remoteversions)
180 180 if version is None:
181 181 raise ValueError('bundler does not support common obsmarker format')
182 182 stream = obsolete.encodemarkers(markers, True, version=version)
183 183 return bundler.newpart('obsmarkers', data=stream)
184 184 return None
185 185
186 186 def _canusebundle2(op):
187 187 """return true if a pull/push can use bundle2
188 188
189 189 Feel free to nuke this function when we drop the experimental option"""
190 190 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
191 191 and op.remote.capable('bundle2'))
192 192
193 193
194 194 class pushoperation(object):
195 195 """A object that represent a single push operation
196 196
197 197 It purpose is to carry push related state and very common operation.
198 198
199 199 A new should be created at the beginning of each push and discarded
200 200 afterward.
201 201 """
202 202
203 203 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
204 204 bookmarks=()):
205 205 # repo we push from
206 206 self.repo = repo
207 207 self.ui = repo.ui
208 208 # repo we push to
209 209 self.remote = remote
210 210 # force option provided
211 211 self.force = force
212 212 # revs to be pushed (None is "all")
213 213 self.revs = revs
214 214 # bookmark explicitly pushed
215 215 self.bookmarks = bookmarks
216 216 # allow push of new branch
217 217 self.newbranch = newbranch
218 218 # did a local lock get acquired?
219 219 self.locallocked = None
220 220 # step already performed
221 221 # (used to check what steps have been already performed through bundle2)
222 222 self.stepsdone = set()
223 223 # Integer version of the changegroup push result
224 224 # - None means nothing to push
225 225 # - 0 means HTTP error
226 226 # - 1 means we pushed and remote head count is unchanged *or*
227 227 # we have outgoing changesets but refused to push
228 228 # - other values as described by addchangegroup()
229 229 self.cgresult = None
230 230 # Boolean value for the bookmark push
231 231 self.bkresult = None
232 232 # discover.outgoing object (contains common and outgoing data)
233 233 self.outgoing = None
234 234 # all remote heads before the push
235 235 self.remoteheads = None
236 236 # testable as a boolean indicating if any nodes are missing locally.
237 237 self.incoming = None
238 238 # phases changes that must be pushed along side the changesets
239 239 self.outdatedphases = None
240 240 # phases changes that must be pushed if changeset push fails
241 241 self.fallbackoutdatedphases = None
242 242 # outgoing obsmarkers
243 243 self.outobsmarkers = set()
244 244 # outgoing bookmarks
245 245 self.outbookmarks = []
246 246 # transaction manager
247 247 self.trmanager = None
248 248 # map { pushkey partid -> callback handling failure}
249 249 # used to handle exception from mandatory pushkey part failure
250 250 self.pkfailcb = {}
251 251
252 252 @util.propertycache
253 253 def futureheads(self):
254 254 """future remote heads if the changeset push succeeds"""
255 255 return self.outgoing.missingheads
256 256
257 257 @util.propertycache
258 258 def fallbackheads(self):
259 259 """future remote heads if the changeset push fails"""
260 260 if self.revs is None:
261 261 # not target to push, all common are relevant
262 262 return self.outgoing.commonheads
263 263 unfi = self.repo.unfiltered()
264 264 # I want cheads = heads(::missingheads and ::commonheads)
265 265 # (missingheads is revs with secret changeset filtered out)
266 266 #
267 267 # This can be expressed as:
268 268 # cheads = ( (missingheads and ::commonheads)
269 269 # + (commonheads and ::missingheads))"
270 270 # )
271 271 #
272 272 # while trying to push we already computed the following:
273 273 # common = (::commonheads)
274 274 # missing = ((commonheads::missingheads) - commonheads)
275 275 #
276 276 # We can pick:
277 277 # * missingheads part of common (::commonheads)
278 278 common = self.outgoing.common
279 279 nm = self.repo.changelog.nodemap
280 280 cheads = [node for node in self.revs if nm[node] in common]
281 281 # and
282 282 # * commonheads parents on missing
283 283 revset = unfi.set('%ln and parents(roots(%ln))',
284 284 self.outgoing.commonheads,
285 285 self.outgoing.missing)
286 286 cheads.extend(c.node() for c in revset)
287 287 return cheads
288 288
289 289 @property
290 290 def commonheads(self):
291 291 """set of all common heads after changeset bundle push"""
292 292 if self.cgresult:
293 293 return self.futureheads
294 294 else:
295 295 return self.fallbackheads
296 296
297 297 # mapping of message used when pushing bookmark
298 298 bookmsgmap = {'update': (_("updating bookmark %s\n"),
299 299 _('updating bookmark %s failed!\n')),
300 300 'export': (_("exporting bookmark %s\n"),
301 301 _('exporting bookmark %s failed!\n')),
302 302 'delete': (_("deleting remote bookmark %s\n"),
303 303 _('deleting remote bookmark %s failed!\n')),
304 304 }
305 305
306 306
307 307 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
308 308 opargs=None):
309 309 '''Push outgoing changesets (limited by revs) from a local
310 310 repository to remote. Return an integer:
311 311 - None means nothing to push
312 312 - 0 means HTTP error
313 313 - 1 means we pushed and remote head count is unchanged *or*
314 314 we have outgoing changesets but refused to push
315 315 - other values as described by addchangegroup()
316 316 '''
317 317 if opargs is None:
318 318 opargs = {}
319 319 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
320 320 **opargs)
321 321 if pushop.remote.local():
322 322 missing = (set(pushop.repo.requirements)
323 323 - pushop.remote.local().supported)
324 324 if missing:
325 325 msg = _("required features are not"
326 326 " supported in the destination:"
327 327 " %s") % (', '.join(sorted(missing)))
328 328 raise error.Abort(msg)
329 329
330 330 # there are two ways to push to remote repo:
331 331 #
332 332 # addchangegroup assumes local user can lock remote
333 333 # repo (local filesystem, old ssh servers).
334 334 #
335 335 # unbundle assumes local user cannot lock remote repo (new ssh
336 336 # servers, http servers).
337 337
338 338 if not pushop.remote.canpush():
339 339 raise error.Abort(_("destination does not support push"))
340 340 # get local lock as we might write phase data
341 341 localwlock = locallock = None
342 342 try:
343 343 # bundle2 push may receive a reply bundle touching bookmarks or other
344 344 # things requiring the wlock. Take it now to ensure proper ordering.
345 345 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
346 346 if _canusebundle2(pushop) and maypushback:
347 347 localwlock = pushop.repo.wlock()
348 348 locallock = pushop.repo.lock()
349 349 pushop.locallocked = True
350 350 except IOError as err:
351 351 pushop.locallocked = False
352 352 if err.errno != errno.EACCES:
353 353 raise
354 354 # source repo cannot be locked.
355 355 # We do not abort the push, but just disable the local phase
356 356 # synchronisation.
357 357 msg = 'cannot lock source repository: %s\n' % err
358 358 pushop.ui.debug(msg)
359 359 try:
360 360 if pushop.locallocked:
361 361 pushop.trmanager = transactionmanager(pushop.repo,
362 362 'push-response',
363 363 pushop.remote.url())
364 364 pushop.repo.checkpush(pushop)
365 365 lock = None
366 366 unbundle = pushop.remote.capable('unbundle')
367 367 if not unbundle:
368 368 lock = pushop.remote.lock()
369 369 try:
370 370 _pushdiscovery(pushop)
371 371 if _canusebundle2(pushop):
372 372 _pushbundle2(pushop)
373 373 _pushchangeset(pushop)
374 374 _pushsyncphase(pushop)
375 375 _pushobsolete(pushop)
376 376 _pushbookmark(pushop)
377 377 finally:
378 378 if lock is not None:
379 379 lock.release()
380 380 if pushop.trmanager:
381 381 pushop.trmanager.close()
382 382 finally:
383 383 if pushop.trmanager:
384 384 pushop.trmanager.release()
385 385 if locallock is not None:
386 386 locallock.release()
387 387 if localwlock is not None:
388 388 localwlock.release()
389 389
390 390 return pushop
391 391
392 392 # list of steps to perform discovery before push
393 393 pushdiscoveryorder = []
394 394
395 395 # Mapping between step name and function
396 396 #
397 397 # This exists to help extensions wrap steps if necessary
398 398 pushdiscoverymapping = {}
399 399
400 400 def pushdiscovery(stepname):
401 401 """decorator for function performing discovery before push
402 402
403 403 The function is added to the step -> function mapping and appended to the
404 404 list of steps. Beware that decorated function will be added in order (this
405 405 may matter).
406 406
407 407 You can only use this decorator for a new step, if you want to wrap a step
408 408 from an extension, change the pushdiscovery dictionary directly."""
409 409 def dec(func):
410 410 assert stepname not in pushdiscoverymapping
411 411 pushdiscoverymapping[stepname] = func
412 412 pushdiscoveryorder.append(stepname)
413 413 return func
414 414 return dec
415 415
416 416 def _pushdiscovery(pushop):
417 417 """Run all discovery steps"""
418 418 for stepname in pushdiscoveryorder:
419 419 step = pushdiscoverymapping[stepname]
420 420 step(pushop)
421 421
422 422 @pushdiscovery('changeset')
423 423 def _pushdiscoverychangeset(pushop):
424 424 """discover the changeset that need to be pushed"""
425 425 fci = discovery.findcommonincoming
426 426 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
427 427 common, inc, remoteheads = commoninc
428 428 fco = discovery.findcommonoutgoing
429 429 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
430 430 commoninc=commoninc, force=pushop.force)
431 431 pushop.outgoing = outgoing
432 432 pushop.remoteheads = remoteheads
433 433 pushop.incoming = inc
434 434
435 435 @pushdiscovery('phase')
436 436 def _pushdiscoveryphase(pushop):
437 437 """discover the phase that needs to be pushed
438 438
439 439 (computed for both success and failure case for changesets push)"""
440 440 outgoing = pushop.outgoing
441 441 unfi = pushop.repo.unfiltered()
442 442 remotephases = pushop.remote.listkeys('phases')
443 443 publishing = remotephases.get('publishing', False)
444 444 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
445 445 and remotephases # server supports phases
446 446 and not pushop.outgoing.missing # no changesets to be pushed
447 447 and publishing):
448 448 # When:
449 449 # - this is a subrepo push
450 450 # - and remote support phase
451 451 # - and no changeset are to be pushed
452 452 # - and remote is publishing
453 453 # We may be in issue 3871 case!
454 454 # We drop the possible phase synchronisation done by
455 455 # courtesy to publish changesets possibly locally draft
456 456 # on the remote.
457 457 remotephases = {'publishing': 'True'}
458 458 ana = phases.analyzeremotephases(pushop.repo,
459 459 pushop.fallbackheads,
460 460 remotephases)
461 461 pheads, droots = ana
462 462 extracond = ''
463 463 if not publishing:
464 464 extracond = ' and public()'
465 465 revset = 'heads((%%ln::%%ln) %s)' % extracond
466 466 # Get the list of all revs draft on remote by public here.
467 467 # XXX Beware that revset break if droots is not strictly
468 468 # XXX root we may want to ensure it is but it is costly
469 469 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
470 470 if not outgoing.missing:
471 471 future = fallback
472 472 else:
473 473 # adds changeset we are going to push as draft
474 474 #
475 475 # should not be necessary for publishing server, but because of an
476 476 # issue fixed in xxxxx we have to do it anyway.
477 477 fdroots = list(unfi.set('roots(%ln + %ln::)',
478 478 outgoing.missing, droots))
479 479 fdroots = [f.node() for f in fdroots]
480 480 future = list(unfi.set(revset, fdroots, pushop.futureheads))
481 481 pushop.outdatedphases = future
482 482 pushop.fallbackoutdatedphases = fallback
483 483
484 484 @pushdiscovery('obsmarker')
485 485 def _pushdiscoveryobsmarkers(pushop):
486 486 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
487 487 and pushop.repo.obsstore
488 488 and 'obsolete' in pushop.remote.listkeys('namespaces')):
489 489 repo = pushop.repo
490 490 # very naive computation, that can be quite expensive on big repo.
491 491 # However: evolution is currently slow on them anyway.
492 492 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
493 493 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
494 494
495 495 @pushdiscovery('bookmarks')
496 496 def _pushdiscoverybookmarks(pushop):
497 497 ui = pushop.ui
498 498 repo = pushop.repo.unfiltered()
499 499 remote = pushop.remote
500 500 ui.debug("checking for updated bookmarks\n")
501 501 ancestors = ()
502 502 if pushop.revs:
503 503 revnums = map(repo.changelog.rev, pushop.revs)
504 504 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
505 505 remotebookmark = remote.listkeys('bookmarks')
506 506
507 507 explicit = set(pushop.bookmarks)
508 508
509 509 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
510 510 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
511 511 for b, scid, dcid in advsrc:
512 512 if b in explicit:
513 513 explicit.remove(b)
514 514 if not ancestors or repo[scid].rev() in ancestors:
515 515 pushop.outbookmarks.append((b, dcid, scid))
516 516 # search added bookmark
517 517 for b, scid, dcid in addsrc:
518 518 if b in explicit:
519 519 explicit.remove(b)
520 520 pushop.outbookmarks.append((b, '', scid))
521 521 # search for overwritten bookmark
522 522 for b, scid, dcid in advdst + diverge + differ:
523 523 if b in explicit:
524 524 explicit.remove(b)
525 525 pushop.outbookmarks.append((b, dcid, scid))
526 526 # search for bookmark to delete
527 527 for b, scid, dcid in adddst:
528 528 if b in explicit:
529 529 explicit.remove(b)
530 530 # treat as "deleted locally"
531 531 pushop.outbookmarks.append((b, dcid, ''))
532 532 # identical bookmarks shouldn't get reported
533 533 for b, scid, dcid in same:
534 534 if b in explicit:
535 535 explicit.remove(b)
536 536
537 537 if explicit:
538 538 explicit = sorted(explicit)
539 539 # we should probably list all of them
540 540 ui.warn(_('bookmark %s does not exist on the local '
541 541 'or remote repository!\n') % explicit[0])
542 542 pushop.bkresult = 2
543 543
544 544 pushop.outbookmarks.sort()
545 545
546 546 def _pushcheckoutgoing(pushop):
547 547 outgoing = pushop.outgoing
548 548 unfi = pushop.repo.unfiltered()
549 549 if not outgoing.missing:
550 550 # nothing to push
551 551 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
552 552 return False
553 553 # something to push
554 554 if not pushop.force:
555 555 # if repo.obsstore == False --> no obsolete
556 556 # then, save the iteration
557 557 if unfi.obsstore:
558 558 # this message are here for 80 char limit reason
559 559 mso = _("push includes obsolete changeset: %s!")
560 560 mst = {"unstable": _("push includes unstable changeset: %s!"),
561 561 "bumped": _("push includes bumped changeset: %s!"),
562 562 "divergent": _("push includes divergent changeset: %s!")}
563 563 # If we are to push if there is at least one
564 564 # obsolete or unstable changeset in missing, at
565 565 # least one of the missinghead will be obsolete or
566 566 # unstable. So checking heads only is ok
567 567 for node in outgoing.missingheads:
568 568 ctx = unfi[node]
569 569 if ctx.obsolete():
570 570 raise error.Abort(mso % ctx)
571 571 elif ctx.troubled():
572 572 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
573 573
574 574 discovery.checkheads(pushop)
575 575 return True
576 576
577 577 # List of names of steps to perform for an outgoing bundle2, order matters.
578 578 b2partsgenorder = []
579 579
580 580 # Mapping between step name and function
581 581 #
582 582 # This exists to help extensions wrap steps if necessary
583 583 b2partsgenmapping = {}
584 584
585 585 def b2partsgenerator(stepname, idx=None):
586 586 """decorator for function generating bundle2 part
587 587
588 588 The function is added to the step -> function mapping and appended to the
589 589 list of steps. Beware that decorated functions will be added in order
590 590 (this may matter).
591 591
592 592 You can only use this decorator for new steps, if you want to wrap a step
593 593 from an extension, attack the b2partsgenmapping dictionary directly."""
594 594 def dec(func):
595 595 assert stepname not in b2partsgenmapping
596 596 b2partsgenmapping[stepname] = func
597 597 if idx is None:
598 598 b2partsgenorder.append(stepname)
599 599 else:
600 600 b2partsgenorder.insert(idx, stepname)
601 601 return func
602 602 return dec
603 603
604 604 def _pushb2ctxcheckheads(pushop, bundler):
605 605 """Generate race condition checking parts
606 606
607 607 Exists as an independent function to aid extensions
608 608 """
609 609 if not pushop.force:
610 610 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
611 611
612 612 @b2partsgenerator('changeset')
613 613 def _pushb2ctx(pushop, bundler):
614 614 """handle changegroup push through bundle2
615 615
616 616 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
617 617 """
618 618 if 'changesets' in pushop.stepsdone:
619 619 return
620 620 pushop.stepsdone.add('changesets')
621 621 # Send known heads to the server for race detection.
622 622 if not _pushcheckoutgoing(pushop):
623 623 return
624 624 pushop.repo.prepushoutgoinghooks(pushop.repo,
625 625 pushop.remote,
626 626 pushop.outgoing)
627 627
628 628 _pushb2ctxcheckheads(pushop, bundler)
629 629
630 630 b2caps = bundle2.bundle2caps(pushop.remote)
631 631 version = None
632 632 cgversions = b2caps.get('changegroup')
633 633 if not cgversions: # 3.1 and 3.2 ship with an empty value
634 634 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
635 635 pushop.outgoing)
636 636 else:
637 637 cgversions = [v for v in cgversions if v in changegroup.packermap]
638 638 if not cgversions:
639 639 raise ValueError(_('no common changegroup version'))
640 640 version = max(cgversions)
641 641 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
642 642 pushop.outgoing,
643 643 version=version)
644 644 cgpart = bundler.newpart('changegroup', data=cg)
645 645 if version is not None:
646 646 cgpart.addparam('version', version)
647 647 def handlereply(op):
648 648 """extract addchangegroup returns from server reply"""
649 649 cgreplies = op.records.getreplies(cgpart.id)
650 650 assert len(cgreplies['changegroup']) == 1
651 651 pushop.cgresult = cgreplies['changegroup'][0]['return']
652 652 return handlereply
653 653
654 654 @b2partsgenerator('phase')
655 655 def _pushb2phases(pushop, bundler):
656 656 """handle phase push through bundle2"""
657 657 if 'phases' in pushop.stepsdone:
658 658 return
659 659 b2caps = bundle2.bundle2caps(pushop.remote)
660 660 if not 'pushkey' in b2caps:
661 661 return
662 662 pushop.stepsdone.add('phases')
663 663 part2node = []
664 664
665 665 def handlefailure(pushop, exc):
666 666 targetid = int(exc.partid)
667 667 for partid, node in part2node:
668 668 if partid == targetid:
669 669 raise error.Abort(_('updating %s to public failed') % node)
670 670
671 671 enc = pushkey.encode
672 672 for newremotehead in pushop.outdatedphases:
673 673 part = bundler.newpart('pushkey')
674 674 part.addparam('namespace', enc('phases'))
675 675 part.addparam('key', enc(newremotehead.hex()))
676 676 part.addparam('old', enc(str(phases.draft)))
677 677 part.addparam('new', enc(str(phases.public)))
678 678 part2node.append((part.id, newremotehead))
679 679 pushop.pkfailcb[part.id] = handlefailure
680 680
681 681 def handlereply(op):
682 682 for partid, node in part2node:
683 683 partrep = op.records.getreplies(partid)
684 684 results = partrep['pushkey']
685 685 assert len(results) <= 1
686 686 msg = None
687 687 if not results:
688 688 msg = _('server ignored update of %s to public!\n') % node
689 689 elif not int(results[0]['return']):
690 690 msg = _('updating %s to public failed!\n') % node
691 691 if msg is not None:
692 692 pushop.ui.warn(msg)
693 693 return handlereply
694 694
695 695 @b2partsgenerator('obsmarkers')
696 696 def _pushb2obsmarkers(pushop, bundler):
697 697 if 'obsmarkers' in pushop.stepsdone:
698 698 return
699 699 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
700 700 if obsolete.commonversion(remoteversions) is None:
701 701 return
702 702 pushop.stepsdone.add('obsmarkers')
703 703 if pushop.outobsmarkers:
704 704 markers = sorted(pushop.outobsmarkers)
705 705 buildobsmarkerspart(bundler, markers)
706 706
707 707 @b2partsgenerator('bookmarks')
708 708 def _pushb2bookmarks(pushop, bundler):
709 709 """handle bookmark push through bundle2"""
710 710 if 'bookmarks' in pushop.stepsdone:
711 711 return
712 712 b2caps = bundle2.bundle2caps(pushop.remote)
713 713 if 'pushkey' not in b2caps:
714 714 return
715 715 pushop.stepsdone.add('bookmarks')
716 716 part2book = []
717 717 enc = pushkey.encode
718 718
719 719 def handlefailure(pushop, exc):
720 720 targetid = int(exc.partid)
721 721 for partid, book, action in part2book:
722 722 if partid == targetid:
723 723 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
724 724 # we should not be called for part we did not generated
725 725 assert False
726 726
727 727 for book, old, new in pushop.outbookmarks:
728 728 part = bundler.newpart('pushkey')
729 729 part.addparam('namespace', enc('bookmarks'))
730 730 part.addparam('key', enc(book))
731 731 part.addparam('old', enc(old))
732 732 part.addparam('new', enc(new))
733 733 action = 'update'
734 734 if not old:
735 735 action = 'export'
736 736 elif not new:
737 737 action = 'delete'
738 738 part2book.append((part.id, book, action))
739 739 pushop.pkfailcb[part.id] = handlefailure
740 740
741 741 def handlereply(op):
742 742 ui = pushop.ui
743 743 for partid, book, action in part2book:
744 744 partrep = op.records.getreplies(partid)
745 745 results = partrep['pushkey']
746 746 assert len(results) <= 1
747 747 if not results:
748 748 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
749 749 else:
750 750 ret = int(results[0]['return'])
751 751 if ret:
752 752 ui.status(bookmsgmap[action][0] % book)
753 753 else:
754 754 ui.warn(bookmsgmap[action][1] % book)
755 755 if pushop.bkresult is not None:
756 756 pushop.bkresult = 1
757 757 return handlereply
758 758
759 759
760 760 def _pushbundle2(pushop):
761 761 """push data to the remote using bundle2
762 762
763 763 The only currently supported type of data is changegroup but this will
764 764 evolve in the future."""
765 765 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
766 766 pushback = (pushop.trmanager
767 767 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
768 768
769 769 # create reply capability
770 770 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
771 771 allowpushback=pushback))
772 772 bundler.newpart('replycaps', data=capsblob)
773 773 replyhandlers = []
774 774 for partgenname in b2partsgenorder:
775 775 partgen = b2partsgenmapping[partgenname]
776 776 ret = partgen(pushop, bundler)
777 777 if callable(ret):
778 778 replyhandlers.append(ret)
779 779 # do not push if nothing to push
780 780 if bundler.nbparts <= 1:
781 781 return
782 782 stream = util.chunkbuffer(bundler.getchunks())
783 783 try:
784 784 try:
785 785 reply = pushop.remote.unbundle(stream, ['force'], 'push')
786 786 except error.BundleValueError as exc:
787 787 raise error.Abort('missing support for %s' % exc)
788 788 try:
789 789 trgetter = None
790 790 if pushback:
791 791 trgetter = pushop.trmanager.transaction
792 792 op = bundle2.processbundle(pushop.repo, reply, trgetter)
793 793 except error.BundleValueError as exc:
794 794 raise error.Abort('missing support for %s' % exc)
795 795 except bundle2.AbortFromPart as exc:
796 796 pushop.ui.status(_('remote: %s\n') % exc)
797 797 raise error.Abort(_('push failed on remote'), hint=exc.hint)
798 798 except error.PushkeyFailed as exc:
799 799 partid = int(exc.partid)
800 800 if partid not in pushop.pkfailcb:
801 801 raise
802 802 pushop.pkfailcb[partid](pushop, exc)
803 803 for rephand in replyhandlers:
804 804 rephand(op)
805 805
806 806 def _pushchangeset(pushop):
807 807 """Make the actual push of changeset bundle to remote repo"""
808 808 if 'changesets' in pushop.stepsdone:
809 809 return
810 810 pushop.stepsdone.add('changesets')
811 811 if not _pushcheckoutgoing(pushop):
812 812 return
813 813 pushop.repo.prepushoutgoinghooks(pushop.repo,
814 814 pushop.remote,
815 815 pushop.outgoing)
816 816 outgoing = pushop.outgoing
817 817 unbundle = pushop.remote.capable('unbundle')
818 818 # TODO: get bundlecaps from remote
819 819 bundlecaps = None
820 820 # create a changegroup from local
821 821 if pushop.revs is None and not (outgoing.excluded
822 822 or pushop.repo.changelog.filteredrevs):
823 823 # push everything,
824 824 # use the fast path, no race possible on push
825 825 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
826 826 cg = changegroup.getsubset(pushop.repo,
827 827 outgoing,
828 828 bundler,
829 829 'push',
830 830 fastpath=True)
831 831 else:
832 832 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
833 833 bundlecaps)
834 834
835 835 # apply changegroup to remote
836 836 if unbundle:
837 837 # local repo finds heads on server, finds out what
838 838 # revs it must push. once revs transferred, if server
839 839 # finds it has different heads (someone else won
840 840 # commit/push race), server aborts.
841 841 if pushop.force:
842 842 remoteheads = ['force']
843 843 else:
844 844 remoteheads = pushop.remoteheads
845 845 # ssh: return remote's addchangegroup()
846 846 # http: return remote's addchangegroup() or 0 for error
847 847 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
848 848 pushop.repo.url())
849 849 else:
850 850 # we return an integer indicating remote head count
851 851 # change
852 852 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
853 853 pushop.repo.url())
854 854
855 855 def _pushsyncphase(pushop):
856 856 """synchronise phase information locally and remotely"""
857 857 cheads = pushop.commonheads
858 858 # even when we don't push, exchanging phase data is useful
859 859 remotephases = pushop.remote.listkeys('phases')
860 860 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
861 861 and remotephases # server supports phases
862 862 and pushop.cgresult is None # nothing was pushed
863 863 and remotephases.get('publishing', False)):
864 864 # When:
865 865 # - this is a subrepo push
866 866 # - and remote support phase
867 867 # - and no changeset was pushed
868 868 # - and remote is publishing
869 869 # We may be in issue 3871 case!
870 870 # We drop the possible phase synchronisation done by
871 871 # courtesy to publish changesets possibly locally draft
872 872 # on the remote.
873 873 remotephases = {'publishing': 'True'}
874 874 if not remotephases: # old server or public only reply from non-publishing
875 875 _localphasemove(pushop, cheads)
876 876 # don't push any phase data as there is nothing to push
877 877 else:
878 878 ana = phases.analyzeremotephases(pushop.repo, cheads,
879 879 remotephases)
880 880 pheads, droots = ana
881 881 ### Apply remote phase on local
882 882 if remotephases.get('publishing', False):
883 883 _localphasemove(pushop, cheads)
884 884 else: # publish = False
885 885 _localphasemove(pushop, pheads)
886 886 _localphasemove(pushop, cheads, phases.draft)
887 887 ### Apply local phase on remote
888 888
889 889 if pushop.cgresult:
890 890 if 'phases' in pushop.stepsdone:
891 891 # phases already pushed though bundle2
892 892 return
893 893 outdated = pushop.outdatedphases
894 894 else:
895 895 outdated = pushop.fallbackoutdatedphases
896 896
897 897 pushop.stepsdone.add('phases')
898 898
899 899 # filter heads already turned public by the push
900 900 outdated = [c for c in outdated if c.node() not in pheads]
901 901 # fallback to independent pushkey command
902 902 for newremotehead in outdated:
903 903 r = pushop.remote.pushkey('phases',
904 904 newremotehead.hex(),
905 905 str(phases.draft),
906 906 str(phases.public))
907 907 if not r:
908 908 pushop.ui.warn(_('updating %s to public failed!\n')
909 909 % newremotehead)
910 910
911 911 def _localphasemove(pushop, nodes, phase=phases.public):
912 912 """move <nodes> to <phase> in the local source repo"""
913 913 if pushop.trmanager:
914 914 phases.advanceboundary(pushop.repo,
915 915 pushop.trmanager.transaction(),
916 916 phase,
917 917 nodes)
918 918 else:
919 919 # repo is not locked, do not change any phases!
920 920 # Informs the user that phases should have been moved when
921 921 # applicable.
922 922 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
923 923 phasestr = phases.phasenames[phase]
924 924 if actualmoves:
925 925 pushop.ui.status(_('cannot lock source repo, skipping '
926 926 'local %s phase update\n') % phasestr)
927 927
928 928 def _pushobsolete(pushop):
929 929 """utility function to push obsolete markers to a remote"""
930 930 if 'obsmarkers' in pushop.stepsdone:
931 931 return
932 932 repo = pushop.repo
933 933 remote = pushop.remote
934 934 pushop.stepsdone.add('obsmarkers')
935 935 if pushop.outobsmarkers:
936 936 pushop.ui.debug('try to push obsolete markers to remote\n')
937 937 rslts = []
938 938 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
939 939 for key in sorted(remotedata, reverse=True):
940 940 # reverse sort to ensure we end with dump0
941 941 data = remotedata[key]
942 942 rslts.append(remote.pushkey('obsolete', key, '', data))
943 943 if [r for r in rslts if not r]:
944 944 msg = _('failed to push some obsolete markers!\n')
945 945 repo.ui.warn(msg)
946 946
947 947 def _pushbookmark(pushop):
948 948 """Update bookmark position on remote"""
949 949 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
950 950 return
951 951 pushop.stepsdone.add('bookmarks')
952 952 ui = pushop.ui
953 953 remote = pushop.remote
954 954
955 955 for b, old, new in pushop.outbookmarks:
956 956 action = 'update'
957 957 if not old:
958 958 action = 'export'
959 959 elif not new:
960 960 action = 'delete'
961 961 if remote.pushkey('bookmarks', b, old, new):
962 962 ui.status(bookmsgmap[action][0] % b)
963 963 else:
964 964 ui.warn(bookmsgmap[action][1] % b)
965 965 # discovery can have set the value form invalid entry
966 966 if pushop.bkresult is not None:
967 967 pushop.bkresult = 1
968 968
969 969 class pulloperation(object):
970 970 """A object that represent a single pull operation
971 971
972 972 It purpose is to carry pull related state and very common operation.
973 973
974 974 A new should be created at the beginning of each pull and discarded
975 975 afterward.
976 976 """
977 977
978 978 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
979 979 remotebookmarks=None, streamclonerequested=None):
980 980 # repo we pull into
981 981 self.repo = repo
982 982 # repo we pull from
983 983 self.remote = remote
984 984 # revision we try to pull (None is "all")
985 985 self.heads = heads
986 986 # bookmark pulled explicitly
987 987 self.explicitbookmarks = bookmarks
988 988 # do we force pull?
989 989 self.force = force
990 990 # whether a streaming clone was requested
991 991 self.streamclonerequested = streamclonerequested
992 992 # transaction manager
993 993 self.trmanager = None
994 994 # set of common changeset between local and remote before pull
995 995 self.common = None
996 996 # set of pulled head
997 997 self.rheads = None
998 998 # list of missing changeset to fetch remotely
999 999 self.fetch = None
1000 1000 # remote bookmarks data
1001 1001 self.remotebookmarks = remotebookmarks
1002 1002 # result of changegroup pulling (used as return code by pull)
1003 1003 self.cgresult = None
1004 1004 # list of step already done
1005 1005 self.stepsdone = set()
1006 1006 # Whether we attempted a clone from pre-generated bundles.
1007 1007 self.clonebundleattempted = False
1008 1008
1009 1009 @util.propertycache
1010 1010 def pulledsubset(self):
1011 1011 """heads of the set of changeset target by the pull"""
1012 1012 # compute target subset
1013 1013 if self.heads is None:
1014 1014 # We pulled every thing possible
1015 1015 # sync on everything common
1016 1016 c = set(self.common)
1017 1017 ret = list(self.common)
1018 1018 for n in self.rheads:
1019 1019 if n not in c:
1020 1020 ret.append(n)
1021 1021 return ret
1022 1022 else:
1023 1023 # We pulled a specific subset
1024 1024 # sync on this subset
1025 1025 return self.heads
1026 1026
1027 1027 @util.propertycache
1028 1028 def canusebundle2(self):
1029 1029 return _canusebundle2(self)
1030 1030
1031 1031 @util.propertycache
1032 1032 def remotebundle2caps(self):
1033 1033 return bundle2.bundle2caps(self.remote)
1034 1034
1035 1035 def gettransaction(self):
1036 1036 # deprecated; talk to trmanager directly
1037 1037 return self.trmanager.transaction()
1038 1038
1039 1039 class transactionmanager(object):
1040 1040 """An object to manage the life cycle of a transaction
1041 1041
1042 1042 It creates the transaction on demand and calls the appropriate hooks when
1043 1043 closing the transaction."""
1044 1044 def __init__(self, repo, source, url):
1045 1045 self.repo = repo
1046 1046 self.source = source
1047 1047 self.url = url
1048 1048 self._tr = None
1049 1049
1050 1050 def transaction(self):
1051 1051 """Return an open transaction object, constructing if necessary"""
1052 1052 if not self._tr:
1053 1053 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1054 1054 self._tr = self.repo.transaction(trname)
1055 1055 self._tr.hookargs['source'] = self.source
1056 1056 self._tr.hookargs['url'] = self.url
1057 1057 return self._tr
1058 1058
1059 1059 def close(self):
1060 1060 """close transaction if created"""
1061 1061 if self._tr is not None:
1062 1062 self._tr.close()
1063 1063
1064 1064 def release(self):
1065 1065 """release transaction if created"""
1066 1066 if self._tr is not None:
1067 1067 self._tr.release()
1068 1068
1069 1069 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1070 1070 streamclonerequested=None):
1071 1071 """Fetch repository data from a remote.
1072 1072
1073 1073 This is the main function used to retrieve data from a remote repository.
1074 1074
1075 1075 ``repo`` is the local repository to clone into.
1076 1076 ``remote`` is a peer instance.
1077 1077 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1078 1078 default) means to pull everything from the remote.
1079 1079 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1080 1080 default, all remote bookmarks are pulled.
1081 1081 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1082 1082 initialization.
1083 1083 ``streamclonerequested`` is a boolean indicating whether a "streaming
1084 1084 clone" is requested. A "streaming clone" is essentially a raw file copy
1085 1085 of revlogs from the server. This only works when the local repository is
1086 1086 empty. The default value of ``None`` means to respect the server
1087 1087 configuration for preferring stream clones.
1088 1088
1089 1089 Returns the ``pulloperation`` created for this pull.
1090 1090 """
1091 1091 if opargs is None:
1092 1092 opargs = {}
1093 1093 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1094 1094 streamclonerequested=streamclonerequested, **opargs)
1095 1095 if pullop.remote.local():
1096 1096 missing = set(pullop.remote.requirements) - pullop.repo.supported
1097 1097 if missing:
1098 1098 msg = _("required features are not"
1099 1099 " supported in the destination:"
1100 1100 " %s") % (', '.join(sorted(missing)))
1101 1101 raise error.Abort(msg)
1102 1102
1103 1103 lock = pullop.repo.lock()
1104 1104 try:
1105 1105 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1106 1106 streamclone.maybeperformlegacystreamclone(pullop)
1107 1107 # This should ideally be in _pullbundle2(). However, it needs to run
1108 1108 # before discovery to avoid extra work.
1109 1109 _maybeapplyclonebundle(pullop)
1110 1110 _pulldiscovery(pullop)
1111 1111 if pullop.canusebundle2:
1112 1112 _pullbundle2(pullop)
1113 1113 _pullchangeset(pullop)
1114 1114 _pullphase(pullop)
1115 1115 _pullbookmarks(pullop)
1116 1116 _pullobsolete(pullop)
1117 1117 pullop.trmanager.close()
1118 1118 finally:
1119 1119 pullop.trmanager.release()
1120 1120 lock.release()
1121 1121
1122 1122 return pullop
1123 1123
1124 1124 # list of steps to perform discovery before pull
1125 1125 pulldiscoveryorder = []
1126 1126
1127 1127 # Mapping between step name and function
1128 1128 #
1129 1129 # This exists to help extensions wrap steps if necessary
1130 1130 pulldiscoverymapping = {}
1131 1131
1132 1132 def pulldiscovery(stepname):
1133 1133 """decorator for function performing discovery before pull
1134 1134
1135 1135 The function is added to the step -> function mapping and appended to the
1136 1136 list of steps. Beware that decorated function will be added in order (this
1137 1137 may matter).
1138 1138
1139 1139 You can only use this decorator for a new step, if you want to wrap a step
1140 1140 from an extension, change the pulldiscovery dictionary directly."""
1141 1141 def dec(func):
1142 1142 assert stepname not in pulldiscoverymapping
1143 1143 pulldiscoverymapping[stepname] = func
1144 1144 pulldiscoveryorder.append(stepname)
1145 1145 return func
1146 1146 return dec
1147 1147
1148 1148 def _pulldiscovery(pullop):
1149 1149 """Run all discovery steps"""
1150 1150 for stepname in pulldiscoveryorder:
1151 1151 step = pulldiscoverymapping[stepname]
1152 1152 step(pullop)
1153 1153
1154 1154 @pulldiscovery('b1:bookmarks')
1155 1155 def _pullbookmarkbundle1(pullop):
1156 1156 """fetch bookmark data in bundle1 case
1157 1157
1158 1158 If not using bundle2, we have to fetch bookmarks before changeset
1159 1159 discovery to reduce the chance and impact of race conditions."""
1160 1160 if pullop.remotebookmarks is not None:
1161 1161 return
1162 1162 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1163 1163 # all known bundle2 servers now support listkeys, but lets be nice with
1164 1164 # new implementation.
1165 1165 return
1166 1166 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1167 1167
1168 1168
1169 1169 @pulldiscovery('changegroup')
1170 1170 def _pulldiscoverychangegroup(pullop):
1171 1171 """discovery phase for the pull
1172 1172
1173 1173 Current handle changeset discovery only, will change handle all discovery
1174 1174 at some point."""
1175 1175 tmp = discovery.findcommonincoming(pullop.repo,
1176 1176 pullop.remote,
1177 1177 heads=pullop.heads,
1178 1178 force=pullop.force)
1179 1179 common, fetch, rheads = tmp
1180 1180 nm = pullop.repo.unfiltered().changelog.nodemap
1181 1181 if fetch and rheads:
1182 1182 # If a remote heads in filtered locally, lets drop it from the unknown
1183 1183 # remote heads and put in back in common.
1184 1184 #
1185 1185 # This is a hackish solution to catch most of "common but locally
1186 1186 # hidden situation". We do not performs discovery on unfiltered
1187 1187 # repository because it end up doing a pathological amount of round
1188 1188 # trip for w huge amount of changeset we do not care about.
1189 1189 #
1190 1190 # If a set of such "common but filtered" changeset exist on the server
1191 1191 # but are not including a remote heads, we'll not be able to detect it,
1192 1192 scommon = set(common)
1193 1193 filteredrheads = []
1194 1194 for n in rheads:
1195 1195 if n in nm:
1196 1196 if n not in scommon:
1197 1197 common.append(n)
1198 1198 else:
1199 1199 filteredrheads.append(n)
1200 1200 if not filteredrheads:
1201 1201 fetch = []
1202 1202 rheads = filteredrheads
1203 1203 pullop.common = common
1204 1204 pullop.fetch = fetch
1205 1205 pullop.rheads = rheads
1206 1206
1207 1207 def _pullbundle2(pullop):
1208 1208 """pull data using bundle2
1209 1209
1210 1210 For now, the only supported data are changegroup."""
1211 1211 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1212 1212
1213 1213 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1214 1214
1215 1215 # pulling changegroup
1216 1216 pullop.stepsdone.add('changegroup')
1217 1217
1218 1218 kwargs['common'] = pullop.common
1219 1219 kwargs['heads'] = pullop.heads or pullop.rheads
1220 1220 kwargs['cg'] = pullop.fetch
1221 1221 if 'listkeys' in pullop.remotebundle2caps:
1222 1222 kwargs['listkeys'] = ['phase']
1223 1223 if pullop.remotebookmarks is None:
1224 1224 # make sure to always includes bookmark data when migrating
1225 1225 # `hg incoming --bundle` to using this function.
1226 1226 kwargs['listkeys'].append('bookmarks')
1227 1227
1228 1228 # If this is a full pull / clone and the server supports the clone bundles
1229 1229 # feature, tell the server whether we attempted a clone bundle. The
1230 1230 # presence of this flag indicates the client supports clone bundles. This
1231 1231 # will enable the server to treat clients that support clone bundles
1232 1232 # differently from those that don't.
1233 1233 if (pullop.remote.capable('clonebundles')
1234 1234 and pullop.heads is None and list(pullop.common) == [nullid]):
1235 1235 kwargs['cbattempted'] = pullop.clonebundleattempted
1236 1236
1237 1237 if streaming:
1238 1238 pullop.repo.ui.status(_('streaming all changes\n'))
1239 1239 elif not pullop.fetch:
1240 1240 pullop.repo.ui.status(_("no changes found\n"))
1241 1241 pullop.cgresult = 0
1242 1242 else:
1243 1243 if pullop.heads is None and list(pullop.common) == [nullid]:
1244 1244 pullop.repo.ui.status(_("requesting all changes\n"))
1245 1245 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1246 1246 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1247 1247 if obsolete.commonversion(remoteversions) is not None:
1248 1248 kwargs['obsmarkers'] = True
1249 1249 pullop.stepsdone.add('obsmarkers')
1250 1250 _pullbundle2extraprepare(pullop, kwargs)
1251 1251 bundle = pullop.remote.getbundle('pull', **kwargs)
1252 1252 try:
1253 1253 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1254 1254 except error.BundleValueError as exc:
1255 1255 raise error.Abort('missing support for %s' % exc)
1256 1256
1257 1257 if pullop.fetch:
1258 1258 results = [cg['return'] for cg in op.records['changegroup']]
1259 1259 pullop.cgresult = changegroup.combineresults(results)
1260 1260
1261 1261 # processing phases change
1262 1262 for namespace, value in op.records['listkeys']:
1263 1263 if namespace == 'phases':
1264 1264 _pullapplyphases(pullop, value)
1265 1265
1266 1266 # processing bookmark update
1267 1267 for namespace, value in op.records['listkeys']:
1268 1268 if namespace == 'bookmarks':
1269 1269 pullop.remotebookmarks = value
1270 1270
1271 1271 # bookmark data were either already there or pulled in the bundle
1272 1272 if pullop.remotebookmarks is not None:
1273 1273 _pullbookmarks(pullop)
1274 1274
1275 1275 def _pullbundle2extraprepare(pullop, kwargs):
1276 1276 """hook function so that extensions can extend the getbundle call"""
1277 1277 pass
1278 1278
1279 1279 def _pullchangeset(pullop):
1280 1280 """pull changeset from unbundle into the local repo"""
1281 1281 # We delay the open of the transaction as late as possible so we
1282 1282 # don't open transaction for nothing or you break future useful
1283 1283 # rollback call
1284 1284 if 'changegroup' in pullop.stepsdone:
1285 1285 return
1286 1286 pullop.stepsdone.add('changegroup')
1287 1287 if not pullop.fetch:
1288 1288 pullop.repo.ui.status(_("no changes found\n"))
1289 1289 pullop.cgresult = 0
1290 1290 return
1291 1291 pullop.gettransaction()
1292 1292 if pullop.heads is None and list(pullop.common) == [nullid]:
1293 1293 pullop.repo.ui.status(_("requesting all changes\n"))
1294 1294 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1295 1295 # issue1320, avoid a race if remote changed after discovery
1296 1296 pullop.heads = pullop.rheads
1297 1297
1298 1298 if pullop.remote.capable('getbundle'):
1299 1299 # TODO: get bundlecaps from remote
1300 1300 cg = pullop.remote.getbundle('pull', common=pullop.common,
1301 1301 heads=pullop.heads or pullop.rheads)
1302 1302 elif pullop.heads is None:
1303 1303 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1304 1304 elif not pullop.remote.capable('changegroupsubset'):
1305 1305 raise error.Abort(_("partial pull cannot be done because "
1306 1306 "other repository doesn't support "
1307 1307 "changegroupsubset."))
1308 1308 else:
1309 1309 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1310 1310 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1311 1311
1312 1312 def _pullphase(pullop):
1313 1313 # Get remote phases data from remote
1314 1314 if 'phases' in pullop.stepsdone:
1315 1315 return
1316 1316 remotephases = pullop.remote.listkeys('phases')
1317 1317 _pullapplyphases(pullop, remotephases)
1318 1318
1319 1319 def _pullapplyphases(pullop, remotephases):
1320 1320 """apply phase movement from observed remote state"""
1321 1321 if 'phases' in pullop.stepsdone:
1322 1322 return
1323 1323 pullop.stepsdone.add('phases')
1324 1324 publishing = bool(remotephases.get('publishing', False))
1325 1325 if remotephases and not publishing:
1326 1326 # remote is new and unpublishing
1327 1327 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1328 1328 pullop.pulledsubset,
1329 1329 remotephases)
1330 1330 dheads = pullop.pulledsubset
1331 1331 else:
1332 1332 # Remote is old or publishing all common changesets
1333 1333 # should be seen as public
1334 1334 pheads = pullop.pulledsubset
1335 1335 dheads = []
1336 1336 unfi = pullop.repo.unfiltered()
1337 1337 phase = unfi._phasecache.phase
1338 1338 rev = unfi.changelog.nodemap.get
1339 1339 public = phases.public
1340 1340 draft = phases.draft
1341 1341
1342 1342 # exclude changesets already public locally and update the others
1343 1343 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1344 1344 if pheads:
1345 1345 tr = pullop.gettransaction()
1346 1346 phases.advanceboundary(pullop.repo, tr, public, pheads)
1347 1347
1348 1348 # exclude changesets already draft locally and update the others
1349 1349 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1350 1350 if dheads:
1351 1351 tr = pullop.gettransaction()
1352 1352 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1353 1353
1354 1354 def _pullbookmarks(pullop):
1355 1355 """process the remote bookmark information to update the local one"""
1356 1356 if 'bookmarks' in pullop.stepsdone:
1357 1357 return
1358 1358 pullop.stepsdone.add('bookmarks')
1359 1359 repo = pullop.repo
1360 1360 remotebookmarks = pullop.remotebookmarks
1361 1361 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1362 1362 pullop.remote.url(),
1363 1363 pullop.gettransaction,
1364 1364 explicit=pullop.explicitbookmarks)
1365 1365
1366 1366 def _pullobsolete(pullop):
1367 1367 """utility function to pull obsolete markers from a remote
1368 1368
1369 1369 The `gettransaction` is function that return the pull transaction, creating
1370 1370 one if necessary. We return the transaction to inform the calling code that
1371 1371 a new transaction have been created (when applicable).
1372 1372
1373 1373 Exists mostly to allow overriding for experimentation purpose"""
1374 1374 if 'obsmarkers' in pullop.stepsdone:
1375 1375 return
1376 1376 pullop.stepsdone.add('obsmarkers')
1377 1377 tr = None
1378 1378 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1379 1379 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1380 1380 remoteobs = pullop.remote.listkeys('obsolete')
1381 1381 if 'dump0' in remoteobs:
1382 1382 tr = pullop.gettransaction()
1383 1383 for key in sorted(remoteobs, reverse=True):
1384 1384 if key.startswith('dump'):
1385 1385 data = base85.b85decode(remoteobs[key])
1386 1386 pullop.repo.obsstore.mergemarkers(tr, data)
1387 1387 pullop.repo.invalidatevolatilesets()
1388 1388 return tr
1389 1389
1390 1390 def caps20to10(repo):
1391 1391 """return a set with appropriate options to use bundle20 during getbundle"""
1392 1392 caps = set(['HG20'])
1393 1393 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1394 1394 caps.add('bundle2=' + urllib.quote(capsblob))
1395 1395 return caps
1396 1396
1397 1397 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1398 1398 getbundle2partsorder = []
1399 1399
1400 1400 # Mapping between step name and function
1401 1401 #
1402 1402 # This exists to help extensions wrap steps if necessary
1403 1403 getbundle2partsmapping = {}
1404 1404
1405 1405 def getbundle2partsgenerator(stepname, idx=None):
1406 1406 """decorator for function generating bundle2 part for getbundle
1407 1407
1408 1408 The function is added to the step -> function mapping and appended to the
1409 1409 list of steps. Beware that decorated functions will be added in order
1410 1410 (this may matter).
1411 1411
1412 1412 You can only use this decorator for new steps, if you want to wrap a step
1413 1413 from an extension, attack the getbundle2partsmapping dictionary directly."""
1414 1414 def dec(func):
1415 1415 assert stepname not in getbundle2partsmapping
1416 1416 getbundle2partsmapping[stepname] = func
1417 1417 if idx is None:
1418 1418 getbundle2partsorder.append(stepname)
1419 1419 else:
1420 1420 getbundle2partsorder.insert(idx, stepname)
1421 1421 return func
1422 1422 return dec
1423 1423
1424 def bundle2requested(bundlecaps):
1425 if bundlecaps is not None:
1426 return any(cap.startswith('HG2') for cap in bundlecaps)
1427 return False
1428
1424 1429 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1425 1430 **kwargs):
1426 1431 """return a full bundle (with potentially multiple kind of parts)
1427 1432
1428 1433 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1429 1434 passed. For now, the bundle can contain only changegroup, but this will
1430 1435 changes when more part type will be available for bundle2.
1431 1436
1432 1437 This is different from changegroup.getchangegroup that only returns an HG10
1433 1438 changegroup bundle. They may eventually get reunited in the future when we
1434 1439 have a clearer idea of the API we what to query different data.
1435 1440
1436 1441 The implementation is at a very early stage and will get massive rework
1437 1442 when the API of bundle is refined.
1438 1443 """
1444 usebundle2 = bundle2requested(bundlecaps)
1439 1445 # bundle10 case
1440 usebundle2 = False
1441 if bundlecaps is not None:
1442 usebundle2 = any((cap.startswith('HG2') for cap in bundlecaps))
1443 1446 if not usebundle2:
1444 1447 if bundlecaps and not kwargs.get('cg', True):
1445 1448 raise ValueError(_('request for bundle10 must include changegroup'))
1446 1449
1447 1450 if kwargs:
1448 1451 raise ValueError(_('unsupported getbundle arguments: %s')
1449 1452 % ', '.join(sorted(kwargs.keys())))
1450 1453 return changegroup.getchangegroup(repo, source, heads=heads,
1451 1454 common=common, bundlecaps=bundlecaps)
1452 1455
1453 1456 # bundle20 case
1454 1457 b2caps = {}
1455 1458 for bcaps in bundlecaps:
1456 1459 if bcaps.startswith('bundle2='):
1457 1460 blob = urllib.unquote(bcaps[len('bundle2='):])
1458 1461 b2caps.update(bundle2.decodecaps(blob))
1459 1462 bundler = bundle2.bundle20(repo.ui, b2caps)
1460 1463
1461 1464 kwargs['heads'] = heads
1462 1465 kwargs['common'] = common
1463 1466
1464 1467 for name in getbundle2partsorder:
1465 1468 func = getbundle2partsmapping[name]
1466 1469 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1467 1470 **kwargs)
1468 1471
1469 1472 return util.chunkbuffer(bundler.getchunks())
1470 1473
1471 1474 @getbundle2partsgenerator('changegroup')
1472 1475 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1473 1476 b2caps=None, heads=None, common=None, **kwargs):
1474 1477 """add a changegroup part to the requested bundle"""
1475 1478 cg = None
1476 1479 if kwargs.get('cg', True):
1477 1480 # build changegroup bundle here.
1478 1481 version = None
1479 1482 cgversions = b2caps.get('changegroup')
1480 1483 getcgkwargs = {}
1481 1484 if cgversions: # 3.1 and 3.2 ship with an empty value
1482 1485 cgversions = [v for v in cgversions if v in changegroup.packermap]
1483 1486 if not cgversions:
1484 1487 raise ValueError(_('no common changegroup version'))
1485 1488 version = getcgkwargs['version'] = max(cgversions)
1486 1489 outgoing = changegroup.computeoutgoing(repo, heads, common)
1487 1490 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1488 1491 bundlecaps=bundlecaps,
1489 1492 **getcgkwargs)
1490 1493
1491 1494 if cg:
1492 1495 part = bundler.newpart('changegroup', data=cg)
1493 1496 if version is not None:
1494 1497 part.addparam('version', version)
1495 1498 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1496 1499
1497 1500 @getbundle2partsgenerator('listkeys')
1498 1501 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1499 1502 b2caps=None, **kwargs):
1500 1503 """add parts containing listkeys namespaces to the requested bundle"""
1501 1504 listkeys = kwargs.get('listkeys', ())
1502 1505 for namespace in listkeys:
1503 1506 part = bundler.newpart('listkeys')
1504 1507 part.addparam('namespace', namespace)
1505 1508 keys = repo.listkeys(namespace).items()
1506 1509 part.data = pushkey.encodekeys(keys)
1507 1510
1508 1511 @getbundle2partsgenerator('obsmarkers')
1509 1512 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1510 1513 b2caps=None, heads=None, **kwargs):
1511 1514 """add an obsolescence markers part to the requested bundle"""
1512 1515 if kwargs.get('obsmarkers', False):
1513 1516 if heads is None:
1514 1517 heads = repo.heads()
1515 1518 subset = [c.node() for c in repo.set('::%ln', heads)]
1516 1519 markers = repo.obsstore.relevantmarkers(subset)
1517 1520 markers = sorted(markers)
1518 1521 buildobsmarkerspart(bundler, markers)
1519 1522
1520 1523 @getbundle2partsgenerator('hgtagsfnodes')
1521 1524 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1522 1525 b2caps=None, heads=None, common=None,
1523 1526 **kwargs):
1524 1527 """Transfer the .hgtags filenodes mapping.
1525 1528
1526 1529 Only values for heads in this bundle will be transferred.
1527 1530
1528 1531 The part data consists of pairs of 20 byte changeset node and .hgtags
1529 1532 filenodes raw values.
1530 1533 """
1531 1534 # Don't send unless:
1532 1535 # - changeset are being exchanged,
1533 1536 # - the client supports it.
1534 1537 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1535 1538 return
1536 1539
1537 1540 outgoing = changegroup.computeoutgoing(repo, heads, common)
1538 1541
1539 1542 if not outgoing.missingheads:
1540 1543 return
1541 1544
1542 1545 cache = tags.hgtagsfnodescache(repo.unfiltered())
1543 1546 chunks = []
1544 1547
1545 1548 # .hgtags fnodes are only relevant for head changesets. While we could
1546 1549 # transfer values for all known nodes, there will likely be little to
1547 1550 # no benefit.
1548 1551 #
1549 1552 # We don't bother using a generator to produce output data because
1550 1553 # a) we only have 40 bytes per head and even esoteric numbers of heads
1551 1554 # consume little memory (1M heads is 40MB) b) we don't want to send the
1552 1555 # part if we don't have entries and knowing if we have entries requires
1553 1556 # cache lookups.
1554 1557 for node in outgoing.missingheads:
1555 1558 # Don't compute missing, as this may slow down serving.
1556 1559 fnode = cache.getfnode(node, computemissing=False)
1557 1560 if fnode is not None:
1558 1561 chunks.extend([node, fnode])
1559 1562
1560 1563 if chunks:
1561 1564 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1562 1565
1563 1566 def check_heads(repo, their_heads, context):
1564 1567 """check if the heads of a repo have been modified
1565 1568
1566 1569 Used by peer for unbundling.
1567 1570 """
1568 1571 heads = repo.heads()
1569 1572 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1570 1573 if not (their_heads == ['force'] or their_heads == heads or
1571 1574 their_heads == ['hashed', heads_hash]):
1572 1575 # someone else committed/pushed/unbundled while we
1573 1576 # were transferring data
1574 1577 raise error.PushRaced('repository changed while %s - '
1575 1578 'please try again' % context)
1576 1579
1577 1580 def unbundle(repo, cg, heads, source, url):
1578 1581 """Apply a bundle to a repo.
1579 1582
1580 1583 this function makes sure the repo is locked during the application and have
1581 1584 mechanism to check that no push race occurred between the creation of the
1582 1585 bundle and its application.
1583 1586
1584 1587 If the push was raced as PushRaced exception is raised."""
1585 1588 r = 0
1586 1589 # need a transaction when processing a bundle2 stream
1587 1590 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1588 1591 lockandtr = [None, None, None]
1589 1592 recordout = None
1590 1593 # quick fix for output mismatch with bundle2 in 3.4
1591 1594 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1592 1595 False)
1593 1596 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1594 1597 captureoutput = True
1595 1598 try:
1596 1599 check_heads(repo, heads, 'uploading changes')
1597 1600 # push can proceed
1598 1601 if util.safehasattr(cg, 'params'):
1599 1602 r = None
1600 1603 try:
1601 1604 def gettransaction():
1602 1605 if not lockandtr[2]:
1603 1606 lockandtr[0] = repo.wlock()
1604 1607 lockandtr[1] = repo.lock()
1605 1608 lockandtr[2] = repo.transaction(source)
1606 1609 lockandtr[2].hookargs['source'] = source
1607 1610 lockandtr[2].hookargs['url'] = url
1608 1611 lockandtr[2].hookargs['bundle2'] = '1'
1609 1612 return lockandtr[2]
1610 1613
1611 1614 # Do greedy locking by default until we're satisfied with lazy
1612 1615 # locking.
1613 1616 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1614 1617 gettransaction()
1615 1618
1616 1619 op = bundle2.bundleoperation(repo, gettransaction,
1617 1620 captureoutput=captureoutput)
1618 1621 try:
1619 1622 op = bundle2.processbundle(repo, cg, op=op)
1620 1623 finally:
1621 1624 r = op.reply
1622 1625 if captureoutput and r is not None:
1623 1626 repo.ui.pushbuffer(error=True, subproc=True)
1624 1627 def recordout(output):
1625 1628 r.newpart('output', data=output, mandatory=False)
1626 1629 if lockandtr[2] is not None:
1627 1630 lockandtr[2].close()
1628 1631 except BaseException as exc:
1629 1632 exc.duringunbundle2 = True
1630 1633 if captureoutput and r is not None:
1631 1634 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1632 1635 def recordout(output):
1633 1636 part = bundle2.bundlepart('output', data=output,
1634 1637 mandatory=False)
1635 1638 parts.append(part)
1636 1639 raise
1637 1640 else:
1638 1641 lockandtr[1] = repo.lock()
1639 1642 r = cg.apply(repo, source, url)
1640 1643 finally:
1641 1644 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1642 1645 if recordout is not None:
1643 1646 recordout(repo.ui.popbuffer())
1644 1647 return r
1645 1648
1646 1649 def _maybeapplyclonebundle(pullop):
1647 1650 """Apply a clone bundle from a remote, if possible."""
1648 1651
1649 1652 repo = pullop.repo
1650 1653 remote = pullop.remote
1651 1654
1652 1655 if not repo.ui.configbool('experimental', 'clonebundles', False):
1653 1656 return
1654 1657
1655 1658 # Only run if local repo is empty.
1656 1659 if len(repo):
1657 1660 return
1658 1661
1659 1662 if pullop.heads:
1660 1663 return
1661 1664
1662 1665 if not remote.capable('clonebundles'):
1663 1666 return
1664 1667
1665 1668 res = remote._call('clonebundles')
1666 1669
1667 1670 # If we call the wire protocol command, that's good enough to record the
1668 1671 # attempt.
1669 1672 pullop.clonebundleattempted = True
1670 1673
1671 1674 entries = parseclonebundlesmanifest(repo, res)
1672 1675 if not entries:
1673 1676 repo.ui.note(_('no clone bundles available on remote; '
1674 1677 'falling back to regular clone\n'))
1675 1678 return
1676 1679
1677 1680 entries = filterclonebundleentries(repo, entries)
1678 1681 if not entries:
1679 1682 # There is a thundering herd concern here. However, if a server
1680 1683 # operator doesn't advertise bundles appropriate for its clients,
1681 1684 # they deserve what's coming. Furthermore, from a client's
1682 1685 # perspective, no automatic fallback would mean not being able to
1683 1686 # clone!
1684 1687 repo.ui.warn(_('no compatible clone bundles available on server; '
1685 1688 'falling back to regular clone\n'))
1686 1689 repo.ui.warn(_('(you may want to report this to the server '
1687 1690 'operator)\n'))
1688 1691 return
1689 1692
1690 1693 entries = sortclonebundleentries(repo.ui, entries)
1691 1694
1692 1695 url = entries[0]['URL']
1693 1696 repo.ui.status(_('applying clone bundle from %s\n') % url)
1694 1697 if trypullbundlefromurl(repo.ui, repo, url):
1695 1698 repo.ui.status(_('finished applying clone bundle\n'))
1696 1699 # Bundle failed.
1697 1700 #
1698 1701 # We abort by default to avoid the thundering herd of
1699 1702 # clients flooding a server that was expecting expensive
1700 1703 # clone load to be offloaded.
1701 1704 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1702 1705 repo.ui.warn(_('falling back to normal clone\n'))
1703 1706 else:
1704 1707 raise error.Abort(_('error applying bundle'),
1705 1708 hint=_('if this error persists, consider contacting '
1706 1709 'the server operator or disable clone '
1707 1710 'bundles via '
1708 1711 '"--config experimental.clonebundles=false"'))
1709 1712
1710 1713 def parseclonebundlesmanifest(repo, s):
1711 1714 """Parses the raw text of a clone bundles manifest.
1712 1715
1713 1716 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1714 1717 to the URL and other keys are the attributes for the entry.
1715 1718 """
1716 1719 m = []
1717 1720 for line in s.splitlines():
1718 1721 fields = line.split()
1719 1722 if not fields:
1720 1723 continue
1721 1724 attrs = {'URL': fields[0]}
1722 1725 for rawattr in fields[1:]:
1723 1726 key, value = rawattr.split('=', 1)
1724 1727 key = urllib.unquote(key)
1725 1728 value = urllib.unquote(value)
1726 1729 attrs[key] = value
1727 1730
1728 1731 # Parse BUNDLESPEC into components. This makes client-side
1729 1732 # preferences easier to specify since you can prefer a single
1730 1733 # component of the BUNDLESPEC.
1731 1734 if key == 'BUNDLESPEC':
1732 1735 try:
1733 1736 comp, version, params = parsebundlespec(repo, value,
1734 1737 externalnames=True)
1735 1738 attrs['COMPRESSION'] = comp
1736 1739 attrs['VERSION'] = version
1737 1740 except error.InvalidBundleSpecification:
1738 1741 pass
1739 1742 except error.UnsupportedBundleSpecification:
1740 1743 pass
1741 1744
1742 1745 m.append(attrs)
1743 1746
1744 1747 return m
1745 1748
1746 1749 def filterclonebundleentries(repo, entries):
1747 1750 """Remove incompatible clone bundle manifest entries.
1748 1751
1749 1752 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1750 1753 and returns a new list consisting of only the entries that this client
1751 1754 should be able to apply.
1752 1755
1753 1756 There is no guarantee we'll be able to apply all returned entries because
1754 1757 the metadata we use to filter on may be missing or wrong.
1755 1758 """
1756 1759 newentries = []
1757 1760 for entry in entries:
1758 1761 spec = entry.get('BUNDLESPEC')
1759 1762 if spec:
1760 1763 try:
1761 1764 parsebundlespec(repo, spec, strict=True)
1762 1765 except error.InvalidBundleSpecification as e:
1763 1766 repo.ui.debug(str(e) + '\n')
1764 1767 continue
1765 1768 except error.UnsupportedBundleSpecification as e:
1766 1769 repo.ui.debug('filtering %s because unsupported bundle '
1767 1770 'spec: %s\n' % (entry['URL'], str(e)))
1768 1771 continue
1769 1772
1770 1773 if 'REQUIRESNI' in entry and not sslutil.hassni:
1771 1774 repo.ui.debug('filtering %s because SNI not supported\n' %
1772 1775 entry['URL'])
1773 1776 continue
1774 1777
1775 1778 newentries.append(entry)
1776 1779
1777 1780 return newentries
1778 1781
1779 1782 def sortclonebundleentries(ui, entries):
1780 1783 # experimental config: experimental.clonebundleprefers
1781 1784 prefers = ui.configlist('experimental', 'clonebundleprefers', default=[])
1782 1785 if not prefers:
1783 1786 return list(entries)
1784 1787
1785 1788 prefers = [p.split('=', 1) for p in prefers]
1786 1789
1787 1790 # Our sort function.
1788 1791 def compareentry(a, b):
1789 1792 for prefkey, prefvalue in prefers:
1790 1793 avalue = a.get(prefkey)
1791 1794 bvalue = b.get(prefkey)
1792 1795
1793 1796 # Special case for b missing attribute and a matches exactly.
1794 1797 if avalue is not None and bvalue is None and avalue == prefvalue:
1795 1798 return -1
1796 1799
1797 1800 # Special case for a missing attribute and b matches exactly.
1798 1801 if bvalue is not None and avalue is None and bvalue == prefvalue:
1799 1802 return 1
1800 1803
1801 1804 # We can't compare unless attribute present on both.
1802 1805 if avalue is None or bvalue is None:
1803 1806 continue
1804 1807
1805 1808 # Same values should fall back to next attribute.
1806 1809 if avalue == bvalue:
1807 1810 continue
1808 1811
1809 1812 # Exact matches come first.
1810 1813 if avalue == prefvalue:
1811 1814 return -1
1812 1815 if bvalue == prefvalue:
1813 1816 return 1
1814 1817
1815 1818 # Fall back to next attribute.
1816 1819 continue
1817 1820
1818 1821 # If we got here we couldn't sort by attributes and prefers. Fall
1819 1822 # back to index order.
1820 1823 return 0
1821 1824
1822 1825 return sorted(entries, cmp=compareentry)
1823 1826
1824 1827 def trypullbundlefromurl(ui, repo, url):
1825 1828 """Attempt to apply a bundle from a URL."""
1826 1829 lock = repo.lock()
1827 1830 try:
1828 1831 tr = repo.transaction('bundleurl')
1829 1832 try:
1830 1833 try:
1831 1834 fh = urlmod.open(ui, url)
1832 1835 cg = readbundle(ui, fh, 'stream')
1833 1836
1834 1837 if isinstance(cg, bundle2.unbundle20):
1835 1838 bundle2.processbundle(repo, cg, lambda: tr)
1836 1839 elif isinstance(cg, streamclone.streamcloneapplier):
1837 1840 cg.apply(repo)
1838 1841 else:
1839 1842 cg.apply(repo, 'clonebundles', url)
1840 1843 tr.close()
1841 1844 return True
1842 1845 except urllib2.HTTPError as e:
1843 1846 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1844 1847 except urllib2.URLError as e:
1845 1848 ui.warn(_('error fetching bundle: %s\n') % e.reason[1])
1846 1849
1847 1850 return False
1848 1851 finally:
1849 1852 tr.release()
1850 1853 finally:
1851 1854 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now