##// END OF EJS Templates
pushoperation: fix language issues in docstring
Nathan Goldbaum -
r28456:d9d51da7 default
parent child Browse files
Show More
@@ -1,1937 +1,1937
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import urllib
12 12 import urllib2
13 13
14 14 from .i18n import _
15 15 from .node import (
16 16 hex,
17 17 nullid,
18 18 )
19 19 from . import (
20 20 base85,
21 21 bookmarks as bookmod,
22 22 bundle2,
23 23 changegroup,
24 24 discovery,
25 25 error,
26 26 lock as lockmod,
27 27 obsolete,
28 28 phases,
29 29 pushkey,
30 30 scmutil,
31 31 sslutil,
32 32 streamclone,
33 33 tags,
34 34 url as urlmod,
35 35 util,
36 36 )
37 37
38 38 # Maps bundle compression human names to internal representation.
39 39 _bundlespeccompressions = {'none': None,
40 40 'bzip2': 'BZ',
41 41 'gzip': 'GZ',
42 42 }
43 43
44 44 # Maps bundle version human names to changegroup versions.
45 45 _bundlespeccgversions = {'v1': '01',
46 46 'v2': '02',
47 47 'packed1': 's1',
48 48 'bundle2': '02', #legacy
49 49 }
50 50
51 51 def parsebundlespec(repo, spec, strict=True, externalnames=False):
52 52 """Parse a bundle string specification into parts.
53 53
54 54 Bundle specifications denote a well-defined bundle/exchange format.
55 55 The content of a given specification should not change over time in
56 56 order to ensure that bundles produced by a newer version of Mercurial are
57 57 readable from an older version.
58 58
59 59 The string currently has the form:
60 60
61 61 <compression>-<type>[;<parameter0>[;<parameter1>]]
62 62
63 63 Where <compression> is one of the supported compression formats
64 64 and <type> is (currently) a version string. A ";" can follow the type and
65 65 all text afterwards is interpretted as URI encoded, ";" delimited key=value
66 66 pairs.
67 67
68 68 If ``strict`` is True (the default) <compression> is required. Otherwise,
69 69 it is optional.
70 70
71 71 If ``externalnames`` is False (the default), the human-centric names will
72 72 be converted to their internal representation.
73 73
74 74 Returns a 3-tuple of (compression, version, parameters). Compression will
75 75 be ``None`` if not in strict mode and a compression isn't defined.
76 76
77 77 An ``InvalidBundleSpecification`` is raised when the specification is
78 78 not syntactically well formed.
79 79
80 80 An ``UnsupportedBundleSpecification`` is raised when the compression or
81 81 bundle type/version is not recognized.
82 82
83 83 Note: this function will likely eventually return a more complex data
84 84 structure, including bundle2 part information.
85 85 """
86 86 def parseparams(s):
87 87 if ';' not in s:
88 88 return s, {}
89 89
90 90 params = {}
91 91 version, paramstr = s.split(';', 1)
92 92
93 93 for p in paramstr.split(';'):
94 94 if '=' not in p:
95 95 raise error.InvalidBundleSpecification(
96 96 _('invalid bundle specification: '
97 97 'missing "=" in parameter: %s') % p)
98 98
99 99 key, value = p.split('=', 1)
100 100 key = urllib.unquote(key)
101 101 value = urllib.unquote(value)
102 102 params[key] = value
103 103
104 104 return version, params
105 105
106 106
107 107 if strict and '-' not in spec:
108 108 raise error.InvalidBundleSpecification(
109 109 _('invalid bundle specification; '
110 110 'must be prefixed with compression: %s') % spec)
111 111
112 112 if '-' in spec:
113 113 compression, version = spec.split('-', 1)
114 114
115 115 if compression not in _bundlespeccompressions:
116 116 raise error.UnsupportedBundleSpecification(
117 117 _('%s compression is not supported') % compression)
118 118
119 119 version, params = parseparams(version)
120 120
121 121 if version not in _bundlespeccgversions:
122 122 raise error.UnsupportedBundleSpecification(
123 123 _('%s is not a recognized bundle version') % version)
124 124 else:
125 125 # Value could be just the compression or just the version, in which
126 126 # case some defaults are assumed (but only when not in strict mode).
127 127 assert not strict
128 128
129 129 spec, params = parseparams(spec)
130 130
131 131 if spec in _bundlespeccompressions:
132 132 compression = spec
133 133 version = 'v1'
134 134 if 'generaldelta' in repo.requirements:
135 135 version = 'v2'
136 136 elif spec in _bundlespeccgversions:
137 137 if spec == 'packed1':
138 138 compression = 'none'
139 139 else:
140 140 compression = 'bzip2'
141 141 version = spec
142 142 else:
143 143 raise error.UnsupportedBundleSpecification(
144 144 _('%s is not a recognized bundle specification') % spec)
145 145
146 146 # The specification for packed1 can optionally declare the data formats
147 147 # required to apply it. If we see this metadata, compare against what the
148 148 # repo supports and error if the bundle isn't compatible.
149 149 if version == 'packed1' and 'requirements' in params:
150 150 requirements = set(params['requirements'].split(','))
151 151 missingreqs = requirements - repo.supportedformats
152 152 if missingreqs:
153 153 raise error.UnsupportedBundleSpecification(
154 154 _('missing support for repository features: %s') %
155 155 ', '.join(sorted(missingreqs)))
156 156
157 157 if not externalnames:
158 158 compression = _bundlespeccompressions[compression]
159 159 version = _bundlespeccgversions[version]
160 160 return compression, version, params
161 161
162 162 def readbundle(ui, fh, fname, vfs=None):
163 163 header = changegroup.readexactly(fh, 4)
164 164
165 165 alg = None
166 166 if not fname:
167 167 fname = "stream"
168 168 if not header.startswith('HG') and header.startswith('\0'):
169 169 fh = changegroup.headerlessfixup(fh, header)
170 170 header = "HG10"
171 171 alg = 'UN'
172 172 elif vfs:
173 173 fname = vfs.join(fname)
174 174
175 175 magic, version = header[0:2], header[2:4]
176 176
177 177 if magic != 'HG':
178 178 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
179 179 if version == '10':
180 180 if alg is None:
181 181 alg = changegroup.readexactly(fh, 2)
182 182 return changegroup.cg1unpacker(fh, alg)
183 183 elif version.startswith('2'):
184 184 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
185 185 elif version == 'S1':
186 186 return streamclone.streamcloneapplier(fh)
187 187 else:
188 188 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
189 189
190 190 def getbundlespec(ui, fh):
191 191 """Infer the bundlespec from a bundle file handle.
192 192
193 193 The input file handle is seeked and the original seek position is not
194 194 restored.
195 195 """
196 196 def speccompression(alg):
197 197 for k, v in _bundlespeccompressions.items():
198 198 if v == alg:
199 199 return k
200 200 return None
201 201
202 202 b = readbundle(ui, fh, None)
203 203 if isinstance(b, changegroup.cg1unpacker):
204 204 alg = b._type
205 205 if alg == '_truncatedBZ':
206 206 alg = 'BZ'
207 207 comp = speccompression(alg)
208 208 if not comp:
209 209 raise error.Abort(_('unknown compression algorithm: %s') % alg)
210 210 return '%s-v1' % comp
211 211 elif isinstance(b, bundle2.unbundle20):
212 212 if 'Compression' in b.params:
213 213 comp = speccompression(b.params['Compression'])
214 214 if not comp:
215 215 raise error.Abort(_('unknown compression algorithm: %s') % comp)
216 216 else:
217 217 comp = 'none'
218 218
219 219 version = None
220 220 for part in b.iterparts():
221 221 if part.type == 'changegroup':
222 222 version = part.params['version']
223 223 if version in ('01', '02'):
224 224 version = 'v2'
225 225 else:
226 226 raise error.Abort(_('changegroup version %s does not have '
227 227 'a known bundlespec') % version,
228 228 hint=_('try upgrading your Mercurial '
229 229 'client'))
230 230
231 231 if not version:
232 232 raise error.Abort(_('could not identify changegroup version in '
233 233 'bundle'))
234 234
235 235 return '%s-%s' % (comp, version)
236 236 elif isinstance(b, streamclone.streamcloneapplier):
237 237 requirements = streamclone.readbundle1header(fh)[2]
238 238 params = 'requirements=%s' % ','.join(sorted(requirements))
239 239 return 'none-packed1;%s' % urllib.quote(params)
240 240 else:
241 241 raise error.Abort(_('unknown bundle type: %s') % b)
242 242
243 243 def buildobsmarkerspart(bundler, markers):
244 244 """add an obsmarker part to the bundler with <markers>
245 245
246 246 No part is created if markers is empty.
247 247 Raises ValueError if the bundler doesn't support any known obsmarker format.
248 248 """
249 249 if markers:
250 250 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
251 251 version = obsolete.commonversion(remoteversions)
252 252 if version is None:
253 253 raise ValueError('bundler does not support common obsmarker format')
254 254 stream = obsolete.encodemarkers(markers, True, version=version)
255 255 return bundler.newpart('obsmarkers', data=stream)
256 256 return None
257 257
258 258 def _canusebundle2(op):
259 259 """return true if a pull/push can use bundle2
260 260
261 261 Feel free to nuke this function when we drop the experimental option"""
262 262 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
263 263 and op.remote.capable('bundle2'))
264 264
265 265
266 266 class pushoperation(object):
267 267 """A object that represent a single push operation
268 268
269 It purpose is to carry push related state and very common operation.
269 Its purpose is to carry push related state and very common operations.
270 270
271 A new should be created at the beginning of each push and discarded
272 afterward.
271 A new pushoperation should be created at the beginning of each push and
272 discarded afterward.
273 273 """
274 274
275 275 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
276 276 bookmarks=()):
277 277 # repo we push from
278 278 self.repo = repo
279 279 self.ui = repo.ui
280 280 # repo we push to
281 281 self.remote = remote
282 282 # force option provided
283 283 self.force = force
284 284 # revs to be pushed (None is "all")
285 285 self.revs = revs
286 286 # bookmark explicitly pushed
287 287 self.bookmarks = bookmarks
288 288 # allow push of new branch
289 289 self.newbranch = newbranch
290 290 # did a local lock get acquired?
291 291 self.locallocked = None
292 292 # step already performed
293 293 # (used to check what steps have been already performed through bundle2)
294 294 self.stepsdone = set()
295 295 # Integer version of the changegroup push result
296 296 # - None means nothing to push
297 297 # - 0 means HTTP error
298 298 # - 1 means we pushed and remote head count is unchanged *or*
299 299 # we have outgoing changesets but refused to push
300 300 # - other values as described by addchangegroup()
301 301 self.cgresult = None
302 302 # Boolean value for the bookmark push
303 303 self.bkresult = None
304 304 # discover.outgoing object (contains common and outgoing data)
305 305 self.outgoing = None
306 306 # all remote heads before the push
307 307 self.remoteheads = None
308 308 # testable as a boolean indicating if any nodes are missing locally.
309 309 self.incoming = None
310 310 # phases changes that must be pushed along side the changesets
311 311 self.outdatedphases = None
312 312 # phases changes that must be pushed if changeset push fails
313 313 self.fallbackoutdatedphases = None
314 314 # outgoing obsmarkers
315 315 self.outobsmarkers = set()
316 316 # outgoing bookmarks
317 317 self.outbookmarks = []
318 318 # transaction manager
319 319 self.trmanager = None
320 320 # map { pushkey partid -> callback handling failure}
321 321 # used to handle exception from mandatory pushkey part failure
322 322 self.pkfailcb = {}
323 323
324 324 @util.propertycache
325 325 def futureheads(self):
326 326 """future remote heads if the changeset push succeeds"""
327 327 return self.outgoing.missingheads
328 328
329 329 @util.propertycache
330 330 def fallbackheads(self):
331 331 """future remote heads if the changeset push fails"""
332 332 if self.revs is None:
333 333 # not target to push, all common are relevant
334 334 return self.outgoing.commonheads
335 335 unfi = self.repo.unfiltered()
336 336 # I want cheads = heads(::missingheads and ::commonheads)
337 337 # (missingheads is revs with secret changeset filtered out)
338 338 #
339 339 # This can be expressed as:
340 340 # cheads = ( (missingheads and ::commonheads)
341 341 # + (commonheads and ::missingheads))"
342 342 # )
343 343 #
344 344 # while trying to push we already computed the following:
345 345 # common = (::commonheads)
346 346 # missing = ((commonheads::missingheads) - commonheads)
347 347 #
348 348 # We can pick:
349 349 # * missingheads part of common (::commonheads)
350 350 common = self.outgoing.common
351 351 nm = self.repo.changelog.nodemap
352 352 cheads = [node for node in self.revs if nm[node] in common]
353 353 # and
354 354 # * commonheads parents on missing
355 355 revset = unfi.set('%ln and parents(roots(%ln))',
356 356 self.outgoing.commonheads,
357 357 self.outgoing.missing)
358 358 cheads.extend(c.node() for c in revset)
359 359 return cheads
360 360
361 361 @property
362 362 def commonheads(self):
363 363 """set of all common heads after changeset bundle push"""
364 364 if self.cgresult:
365 365 return self.futureheads
366 366 else:
367 367 return self.fallbackheads
368 368
369 369 # mapping of message used when pushing bookmark
370 370 bookmsgmap = {'update': (_("updating bookmark %s\n"),
371 371 _('updating bookmark %s failed!\n')),
372 372 'export': (_("exporting bookmark %s\n"),
373 373 _('exporting bookmark %s failed!\n')),
374 374 'delete': (_("deleting remote bookmark %s\n"),
375 375 _('deleting remote bookmark %s failed!\n')),
376 376 }
377 377
378 378
379 379 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
380 380 opargs=None):
381 381 '''Push outgoing changesets (limited by revs) from a local
382 382 repository to remote. Return an integer:
383 383 - None means nothing to push
384 384 - 0 means HTTP error
385 385 - 1 means we pushed and remote head count is unchanged *or*
386 386 we have outgoing changesets but refused to push
387 387 - other values as described by addchangegroup()
388 388 '''
389 389 if opargs is None:
390 390 opargs = {}
391 391 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
392 392 **opargs)
393 393 if pushop.remote.local():
394 394 missing = (set(pushop.repo.requirements)
395 395 - pushop.remote.local().supported)
396 396 if missing:
397 397 msg = _("required features are not"
398 398 " supported in the destination:"
399 399 " %s") % (', '.join(sorted(missing)))
400 400 raise error.Abort(msg)
401 401
402 402 # there are two ways to push to remote repo:
403 403 #
404 404 # addchangegroup assumes local user can lock remote
405 405 # repo (local filesystem, old ssh servers).
406 406 #
407 407 # unbundle assumes local user cannot lock remote repo (new ssh
408 408 # servers, http servers).
409 409
410 410 if not pushop.remote.canpush():
411 411 raise error.Abort(_("destination does not support push"))
412 412 # get local lock as we might write phase data
413 413 localwlock = locallock = None
414 414 try:
415 415 # bundle2 push may receive a reply bundle touching bookmarks or other
416 416 # things requiring the wlock. Take it now to ensure proper ordering.
417 417 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
418 418 if _canusebundle2(pushop) and maypushback:
419 419 localwlock = pushop.repo.wlock()
420 420 locallock = pushop.repo.lock()
421 421 pushop.locallocked = True
422 422 except IOError as err:
423 423 pushop.locallocked = False
424 424 if err.errno != errno.EACCES:
425 425 raise
426 426 # source repo cannot be locked.
427 427 # We do not abort the push, but just disable the local phase
428 428 # synchronisation.
429 429 msg = 'cannot lock source repository: %s\n' % err
430 430 pushop.ui.debug(msg)
431 431 try:
432 432 if pushop.locallocked:
433 433 pushop.trmanager = transactionmanager(pushop.repo,
434 434 'push-response',
435 435 pushop.remote.url())
436 436 pushop.repo.checkpush(pushop)
437 437 lock = None
438 438 unbundle = pushop.remote.capable('unbundle')
439 439 if not unbundle:
440 440 lock = pushop.remote.lock()
441 441 try:
442 442 _pushdiscovery(pushop)
443 443 if _canusebundle2(pushop):
444 444 _pushbundle2(pushop)
445 445 _pushchangeset(pushop)
446 446 _pushsyncphase(pushop)
447 447 _pushobsolete(pushop)
448 448 _pushbookmark(pushop)
449 449 finally:
450 450 if lock is not None:
451 451 lock.release()
452 452 if pushop.trmanager:
453 453 pushop.trmanager.close()
454 454 finally:
455 455 if pushop.trmanager:
456 456 pushop.trmanager.release()
457 457 if locallock is not None:
458 458 locallock.release()
459 459 if localwlock is not None:
460 460 localwlock.release()
461 461
462 462 return pushop
463 463
464 464 # list of steps to perform discovery before push
465 465 pushdiscoveryorder = []
466 466
467 467 # Mapping between step name and function
468 468 #
469 469 # This exists to help extensions wrap steps if necessary
470 470 pushdiscoverymapping = {}
471 471
472 472 def pushdiscovery(stepname):
473 473 """decorator for function performing discovery before push
474 474
475 475 The function is added to the step -> function mapping and appended to the
476 476 list of steps. Beware that decorated function will be added in order (this
477 477 may matter).
478 478
479 479 You can only use this decorator for a new step, if you want to wrap a step
480 480 from an extension, change the pushdiscovery dictionary directly."""
481 481 def dec(func):
482 482 assert stepname not in pushdiscoverymapping
483 483 pushdiscoverymapping[stepname] = func
484 484 pushdiscoveryorder.append(stepname)
485 485 return func
486 486 return dec
487 487
488 488 def _pushdiscovery(pushop):
489 489 """Run all discovery steps"""
490 490 for stepname in pushdiscoveryorder:
491 491 step = pushdiscoverymapping[stepname]
492 492 step(pushop)
493 493
494 494 @pushdiscovery('changeset')
495 495 def _pushdiscoverychangeset(pushop):
496 496 """discover the changeset that need to be pushed"""
497 497 fci = discovery.findcommonincoming
498 498 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
499 499 common, inc, remoteheads = commoninc
500 500 fco = discovery.findcommonoutgoing
501 501 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
502 502 commoninc=commoninc, force=pushop.force)
503 503 pushop.outgoing = outgoing
504 504 pushop.remoteheads = remoteheads
505 505 pushop.incoming = inc
506 506
507 507 @pushdiscovery('phase')
508 508 def _pushdiscoveryphase(pushop):
509 509 """discover the phase that needs to be pushed
510 510
511 511 (computed for both success and failure case for changesets push)"""
512 512 outgoing = pushop.outgoing
513 513 unfi = pushop.repo.unfiltered()
514 514 remotephases = pushop.remote.listkeys('phases')
515 515 publishing = remotephases.get('publishing', False)
516 516 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
517 517 and remotephases # server supports phases
518 518 and not pushop.outgoing.missing # no changesets to be pushed
519 519 and publishing):
520 520 # When:
521 521 # - this is a subrepo push
522 522 # - and remote support phase
523 523 # - and no changeset are to be pushed
524 524 # - and remote is publishing
525 525 # We may be in issue 3871 case!
526 526 # We drop the possible phase synchronisation done by
527 527 # courtesy to publish changesets possibly locally draft
528 528 # on the remote.
529 529 remotephases = {'publishing': 'True'}
530 530 ana = phases.analyzeremotephases(pushop.repo,
531 531 pushop.fallbackheads,
532 532 remotephases)
533 533 pheads, droots = ana
534 534 extracond = ''
535 535 if not publishing:
536 536 extracond = ' and public()'
537 537 revset = 'heads((%%ln::%%ln) %s)' % extracond
538 538 # Get the list of all revs draft on remote by public here.
539 539 # XXX Beware that revset break if droots is not strictly
540 540 # XXX root we may want to ensure it is but it is costly
541 541 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
542 542 if not outgoing.missing:
543 543 future = fallback
544 544 else:
545 545 # adds changeset we are going to push as draft
546 546 #
547 547 # should not be necessary for publishing server, but because of an
548 548 # issue fixed in xxxxx we have to do it anyway.
549 549 fdroots = list(unfi.set('roots(%ln + %ln::)',
550 550 outgoing.missing, droots))
551 551 fdroots = [f.node() for f in fdroots]
552 552 future = list(unfi.set(revset, fdroots, pushop.futureheads))
553 553 pushop.outdatedphases = future
554 554 pushop.fallbackoutdatedphases = fallback
555 555
556 556 @pushdiscovery('obsmarker')
557 557 def _pushdiscoveryobsmarkers(pushop):
558 558 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
559 559 and pushop.repo.obsstore
560 560 and 'obsolete' in pushop.remote.listkeys('namespaces')):
561 561 repo = pushop.repo
562 562 # very naive computation, that can be quite expensive on big repo.
563 563 # However: evolution is currently slow on them anyway.
564 564 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
565 565 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
566 566
567 567 @pushdiscovery('bookmarks')
568 568 def _pushdiscoverybookmarks(pushop):
569 569 ui = pushop.ui
570 570 repo = pushop.repo.unfiltered()
571 571 remote = pushop.remote
572 572 ui.debug("checking for updated bookmarks\n")
573 573 ancestors = ()
574 574 if pushop.revs:
575 575 revnums = map(repo.changelog.rev, pushop.revs)
576 576 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
577 577 remotebookmark = remote.listkeys('bookmarks')
578 578
579 579 explicit = set([repo._bookmarks.expandname(bookmark)
580 580 for bookmark in pushop.bookmarks])
581 581
582 582 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
583 583 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
584 584 for b, scid, dcid in advsrc:
585 585 if b in explicit:
586 586 explicit.remove(b)
587 587 if not ancestors or repo[scid].rev() in ancestors:
588 588 pushop.outbookmarks.append((b, dcid, scid))
589 589 # search added bookmark
590 590 for b, scid, dcid in addsrc:
591 591 if b in explicit:
592 592 explicit.remove(b)
593 593 pushop.outbookmarks.append((b, '', scid))
594 594 # search for overwritten bookmark
595 595 for b, scid, dcid in advdst + diverge + differ:
596 596 if b in explicit:
597 597 explicit.remove(b)
598 598 pushop.outbookmarks.append((b, dcid, scid))
599 599 # search for bookmark to delete
600 600 for b, scid, dcid in adddst:
601 601 if b in explicit:
602 602 explicit.remove(b)
603 603 # treat as "deleted locally"
604 604 pushop.outbookmarks.append((b, dcid, ''))
605 605 # identical bookmarks shouldn't get reported
606 606 for b, scid, dcid in same:
607 607 if b in explicit:
608 608 explicit.remove(b)
609 609
610 610 if explicit:
611 611 explicit = sorted(explicit)
612 612 # we should probably list all of them
613 613 ui.warn(_('bookmark %s does not exist on the local '
614 614 'or remote repository!\n') % explicit[0])
615 615 pushop.bkresult = 2
616 616
617 617 pushop.outbookmarks.sort()
618 618
619 619 def _pushcheckoutgoing(pushop):
620 620 outgoing = pushop.outgoing
621 621 unfi = pushop.repo.unfiltered()
622 622 if not outgoing.missing:
623 623 # nothing to push
624 624 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
625 625 return False
626 626 # something to push
627 627 if not pushop.force:
628 628 # if repo.obsstore == False --> no obsolete
629 629 # then, save the iteration
630 630 if unfi.obsstore:
631 631 # this message are here for 80 char limit reason
632 632 mso = _("push includes obsolete changeset: %s!")
633 633 mst = {"unstable": _("push includes unstable changeset: %s!"),
634 634 "bumped": _("push includes bumped changeset: %s!"),
635 635 "divergent": _("push includes divergent changeset: %s!")}
636 636 # If we are to push if there is at least one
637 637 # obsolete or unstable changeset in missing, at
638 638 # least one of the missinghead will be obsolete or
639 639 # unstable. So checking heads only is ok
640 640 for node in outgoing.missingheads:
641 641 ctx = unfi[node]
642 642 if ctx.obsolete():
643 643 raise error.Abort(mso % ctx)
644 644 elif ctx.troubled():
645 645 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
646 646
647 647 discovery.checkheads(pushop)
648 648 return True
649 649
650 650 # List of names of steps to perform for an outgoing bundle2, order matters.
651 651 b2partsgenorder = []
652 652
653 653 # Mapping between step name and function
654 654 #
655 655 # This exists to help extensions wrap steps if necessary
656 656 b2partsgenmapping = {}
657 657
658 658 def b2partsgenerator(stepname, idx=None):
659 659 """decorator for function generating bundle2 part
660 660
661 661 The function is added to the step -> function mapping and appended to the
662 662 list of steps. Beware that decorated functions will be added in order
663 663 (this may matter).
664 664
665 665 You can only use this decorator for new steps, if you want to wrap a step
666 666 from an extension, attack the b2partsgenmapping dictionary directly."""
667 667 def dec(func):
668 668 assert stepname not in b2partsgenmapping
669 669 b2partsgenmapping[stepname] = func
670 670 if idx is None:
671 671 b2partsgenorder.append(stepname)
672 672 else:
673 673 b2partsgenorder.insert(idx, stepname)
674 674 return func
675 675 return dec
676 676
677 677 def _pushb2ctxcheckheads(pushop, bundler):
678 678 """Generate race condition checking parts
679 679
680 680 Exists as an independent function to aid extensions
681 681 """
682 682 if not pushop.force:
683 683 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
684 684
685 685 @b2partsgenerator('changeset')
686 686 def _pushb2ctx(pushop, bundler):
687 687 """handle changegroup push through bundle2
688 688
689 689 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
690 690 """
691 691 if 'changesets' in pushop.stepsdone:
692 692 return
693 693 pushop.stepsdone.add('changesets')
694 694 # Send known heads to the server for race detection.
695 695 if not _pushcheckoutgoing(pushop):
696 696 return
697 697 pushop.repo.prepushoutgoinghooks(pushop.repo,
698 698 pushop.remote,
699 699 pushop.outgoing)
700 700
701 701 _pushb2ctxcheckheads(pushop, bundler)
702 702
703 703 b2caps = bundle2.bundle2caps(pushop.remote)
704 704 version = None
705 705 cgversions = b2caps.get('changegroup')
706 706 if not cgversions: # 3.1 and 3.2 ship with an empty value
707 707 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
708 708 pushop.outgoing)
709 709 else:
710 710 cgversions = [v for v in cgversions
711 711 if v in changegroup.supportedoutgoingversions(
712 712 pushop.repo)]
713 713 if not cgversions:
714 714 raise ValueError(_('no common changegroup version'))
715 715 version = max(cgversions)
716 716 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
717 717 pushop.outgoing,
718 718 version=version)
719 719 cgpart = bundler.newpart('changegroup', data=cg)
720 720 if version is not None:
721 721 cgpart.addparam('version', version)
722 722 if 'treemanifest' in pushop.repo.requirements:
723 723 cgpart.addparam('treemanifest', '1')
724 724 def handlereply(op):
725 725 """extract addchangegroup returns from server reply"""
726 726 cgreplies = op.records.getreplies(cgpart.id)
727 727 assert len(cgreplies['changegroup']) == 1
728 728 pushop.cgresult = cgreplies['changegroup'][0]['return']
729 729 return handlereply
730 730
731 731 @b2partsgenerator('phase')
732 732 def _pushb2phases(pushop, bundler):
733 733 """handle phase push through bundle2"""
734 734 if 'phases' in pushop.stepsdone:
735 735 return
736 736 b2caps = bundle2.bundle2caps(pushop.remote)
737 737 if not 'pushkey' in b2caps:
738 738 return
739 739 pushop.stepsdone.add('phases')
740 740 part2node = []
741 741
742 742 def handlefailure(pushop, exc):
743 743 targetid = int(exc.partid)
744 744 for partid, node in part2node:
745 745 if partid == targetid:
746 746 raise error.Abort(_('updating %s to public failed') % node)
747 747
748 748 enc = pushkey.encode
749 749 for newremotehead in pushop.outdatedphases:
750 750 part = bundler.newpart('pushkey')
751 751 part.addparam('namespace', enc('phases'))
752 752 part.addparam('key', enc(newremotehead.hex()))
753 753 part.addparam('old', enc(str(phases.draft)))
754 754 part.addparam('new', enc(str(phases.public)))
755 755 part2node.append((part.id, newremotehead))
756 756 pushop.pkfailcb[part.id] = handlefailure
757 757
758 758 def handlereply(op):
759 759 for partid, node in part2node:
760 760 partrep = op.records.getreplies(partid)
761 761 results = partrep['pushkey']
762 762 assert len(results) <= 1
763 763 msg = None
764 764 if not results:
765 765 msg = _('server ignored update of %s to public!\n') % node
766 766 elif not int(results[0]['return']):
767 767 msg = _('updating %s to public failed!\n') % node
768 768 if msg is not None:
769 769 pushop.ui.warn(msg)
770 770 return handlereply
771 771
772 772 @b2partsgenerator('obsmarkers')
773 773 def _pushb2obsmarkers(pushop, bundler):
774 774 if 'obsmarkers' in pushop.stepsdone:
775 775 return
776 776 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
777 777 if obsolete.commonversion(remoteversions) is None:
778 778 return
779 779 pushop.stepsdone.add('obsmarkers')
780 780 if pushop.outobsmarkers:
781 781 markers = sorted(pushop.outobsmarkers)
782 782 buildobsmarkerspart(bundler, markers)
783 783
784 784 @b2partsgenerator('bookmarks')
785 785 def _pushb2bookmarks(pushop, bundler):
786 786 """handle bookmark push through bundle2"""
787 787 if 'bookmarks' in pushop.stepsdone:
788 788 return
789 789 b2caps = bundle2.bundle2caps(pushop.remote)
790 790 if 'pushkey' not in b2caps:
791 791 return
792 792 pushop.stepsdone.add('bookmarks')
793 793 part2book = []
794 794 enc = pushkey.encode
795 795
796 796 def handlefailure(pushop, exc):
797 797 targetid = int(exc.partid)
798 798 for partid, book, action in part2book:
799 799 if partid == targetid:
800 800 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
801 801 # we should not be called for part we did not generated
802 802 assert False
803 803
804 804 for book, old, new in pushop.outbookmarks:
805 805 part = bundler.newpart('pushkey')
806 806 part.addparam('namespace', enc('bookmarks'))
807 807 part.addparam('key', enc(book))
808 808 part.addparam('old', enc(old))
809 809 part.addparam('new', enc(new))
810 810 action = 'update'
811 811 if not old:
812 812 action = 'export'
813 813 elif not new:
814 814 action = 'delete'
815 815 part2book.append((part.id, book, action))
816 816 pushop.pkfailcb[part.id] = handlefailure
817 817
818 818 def handlereply(op):
819 819 ui = pushop.ui
820 820 for partid, book, action in part2book:
821 821 partrep = op.records.getreplies(partid)
822 822 results = partrep['pushkey']
823 823 assert len(results) <= 1
824 824 if not results:
825 825 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
826 826 else:
827 827 ret = int(results[0]['return'])
828 828 if ret:
829 829 ui.status(bookmsgmap[action][0] % book)
830 830 else:
831 831 ui.warn(bookmsgmap[action][1] % book)
832 832 if pushop.bkresult is not None:
833 833 pushop.bkresult = 1
834 834 return handlereply
835 835
836 836
837 837 def _pushbundle2(pushop):
838 838 """push data to the remote using bundle2
839 839
840 840 The only currently supported type of data is changegroup but this will
841 841 evolve in the future."""
842 842 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
843 843 pushback = (pushop.trmanager
844 844 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
845 845
846 846 # create reply capability
847 847 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
848 848 allowpushback=pushback))
849 849 bundler.newpart('replycaps', data=capsblob)
850 850 replyhandlers = []
851 851 for partgenname in b2partsgenorder:
852 852 partgen = b2partsgenmapping[partgenname]
853 853 ret = partgen(pushop, bundler)
854 854 if callable(ret):
855 855 replyhandlers.append(ret)
856 856 # do not push if nothing to push
857 857 if bundler.nbparts <= 1:
858 858 return
859 859 stream = util.chunkbuffer(bundler.getchunks())
860 860 try:
861 861 try:
862 862 reply = pushop.remote.unbundle(stream, ['force'], 'push')
863 863 except error.BundleValueError as exc:
864 864 raise error.Abort('missing support for %s' % exc)
865 865 try:
866 866 trgetter = None
867 867 if pushback:
868 868 trgetter = pushop.trmanager.transaction
869 869 op = bundle2.processbundle(pushop.repo, reply, trgetter)
870 870 except error.BundleValueError as exc:
871 871 raise error.Abort('missing support for %s' % exc)
872 872 except bundle2.AbortFromPart as exc:
873 873 pushop.ui.status(_('remote: %s\n') % exc)
874 874 raise error.Abort(_('push failed on remote'), hint=exc.hint)
875 875 except error.PushkeyFailed as exc:
876 876 partid = int(exc.partid)
877 877 if partid not in pushop.pkfailcb:
878 878 raise
879 879 pushop.pkfailcb[partid](pushop, exc)
880 880 for rephand in replyhandlers:
881 881 rephand(op)
882 882
883 883 def _pushchangeset(pushop):
884 884 """Make the actual push of changeset bundle to remote repo"""
885 885 if 'changesets' in pushop.stepsdone:
886 886 return
887 887 pushop.stepsdone.add('changesets')
888 888 if not _pushcheckoutgoing(pushop):
889 889 return
890 890 pushop.repo.prepushoutgoinghooks(pushop.repo,
891 891 pushop.remote,
892 892 pushop.outgoing)
893 893 outgoing = pushop.outgoing
894 894 unbundle = pushop.remote.capable('unbundle')
895 895 # TODO: get bundlecaps from remote
896 896 bundlecaps = None
897 897 # create a changegroup from local
898 898 if pushop.revs is None and not (outgoing.excluded
899 899 or pushop.repo.changelog.filteredrevs):
900 900 # push everything,
901 901 # use the fast path, no race possible on push
902 902 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
903 903 cg = changegroup.getsubset(pushop.repo,
904 904 outgoing,
905 905 bundler,
906 906 'push',
907 907 fastpath=True)
908 908 else:
909 909 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
910 910 bundlecaps)
911 911
912 912 # apply changegroup to remote
913 913 if unbundle:
914 914 # local repo finds heads on server, finds out what
915 915 # revs it must push. once revs transferred, if server
916 916 # finds it has different heads (someone else won
917 917 # commit/push race), server aborts.
918 918 if pushop.force:
919 919 remoteheads = ['force']
920 920 else:
921 921 remoteheads = pushop.remoteheads
922 922 # ssh: return remote's addchangegroup()
923 923 # http: return remote's addchangegroup() or 0 for error
924 924 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
925 925 pushop.repo.url())
926 926 else:
927 927 # we return an integer indicating remote head count
928 928 # change
929 929 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
930 930 pushop.repo.url())
931 931
932 932 def _pushsyncphase(pushop):
933 933 """synchronise phase information locally and remotely"""
934 934 cheads = pushop.commonheads
935 935 # even when we don't push, exchanging phase data is useful
936 936 remotephases = pushop.remote.listkeys('phases')
937 937 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
938 938 and remotephases # server supports phases
939 939 and pushop.cgresult is None # nothing was pushed
940 940 and remotephases.get('publishing', False)):
941 941 # When:
942 942 # - this is a subrepo push
943 943 # - and remote support phase
944 944 # - and no changeset was pushed
945 945 # - and remote is publishing
946 946 # We may be in issue 3871 case!
947 947 # We drop the possible phase synchronisation done by
948 948 # courtesy to publish changesets possibly locally draft
949 949 # on the remote.
950 950 remotephases = {'publishing': 'True'}
951 951 if not remotephases: # old server or public only reply from non-publishing
952 952 _localphasemove(pushop, cheads)
953 953 # don't push any phase data as there is nothing to push
954 954 else:
955 955 ana = phases.analyzeremotephases(pushop.repo, cheads,
956 956 remotephases)
957 957 pheads, droots = ana
958 958 ### Apply remote phase on local
959 959 if remotephases.get('publishing', False):
960 960 _localphasemove(pushop, cheads)
961 961 else: # publish = False
962 962 _localphasemove(pushop, pheads)
963 963 _localphasemove(pushop, cheads, phases.draft)
964 964 ### Apply local phase on remote
965 965
966 966 if pushop.cgresult:
967 967 if 'phases' in pushop.stepsdone:
968 968 # phases already pushed though bundle2
969 969 return
970 970 outdated = pushop.outdatedphases
971 971 else:
972 972 outdated = pushop.fallbackoutdatedphases
973 973
974 974 pushop.stepsdone.add('phases')
975 975
976 976 # filter heads already turned public by the push
977 977 outdated = [c for c in outdated if c.node() not in pheads]
978 978 # fallback to independent pushkey command
979 979 for newremotehead in outdated:
980 980 r = pushop.remote.pushkey('phases',
981 981 newremotehead.hex(),
982 982 str(phases.draft),
983 983 str(phases.public))
984 984 if not r:
985 985 pushop.ui.warn(_('updating %s to public failed!\n')
986 986 % newremotehead)
987 987
988 988 def _localphasemove(pushop, nodes, phase=phases.public):
989 989 """move <nodes> to <phase> in the local source repo"""
990 990 if pushop.trmanager:
991 991 phases.advanceboundary(pushop.repo,
992 992 pushop.trmanager.transaction(),
993 993 phase,
994 994 nodes)
995 995 else:
996 996 # repo is not locked, do not change any phases!
997 997 # Informs the user that phases should have been moved when
998 998 # applicable.
999 999 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1000 1000 phasestr = phases.phasenames[phase]
1001 1001 if actualmoves:
1002 1002 pushop.ui.status(_('cannot lock source repo, skipping '
1003 1003 'local %s phase update\n') % phasestr)
1004 1004
1005 1005 def _pushobsolete(pushop):
1006 1006 """utility function to push obsolete markers to a remote"""
1007 1007 if 'obsmarkers' in pushop.stepsdone:
1008 1008 return
1009 1009 repo = pushop.repo
1010 1010 remote = pushop.remote
1011 1011 pushop.stepsdone.add('obsmarkers')
1012 1012 if pushop.outobsmarkers:
1013 1013 pushop.ui.debug('try to push obsolete markers to remote\n')
1014 1014 rslts = []
1015 1015 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1016 1016 for key in sorted(remotedata, reverse=True):
1017 1017 # reverse sort to ensure we end with dump0
1018 1018 data = remotedata[key]
1019 1019 rslts.append(remote.pushkey('obsolete', key, '', data))
1020 1020 if [r for r in rslts if not r]:
1021 1021 msg = _('failed to push some obsolete markers!\n')
1022 1022 repo.ui.warn(msg)
1023 1023
1024 1024 def _pushbookmark(pushop):
1025 1025 """Update bookmark position on remote"""
1026 1026 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1027 1027 return
1028 1028 pushop.stepsdone.add('bookmarks')
1029 1029 ui = pushop.ui
1030 1030 remote = pushop.remote
1031 1031
1032 1032 for b, old, new in pushop.outbookmarks:
1033 1033 action = 'update'
1034 1034 if not old:
1035 1035 action = 'export'
1036 1036 elif not new:
1037 1037 action = 'delete'
1038 1038 if remote.pushkey('bookmarks', b, old, new):
1039 1039 ui.status(bookmsgmap[action][0] % b)
1040 1040 else:
1041 1041 ui.warn(bookmsgmap[action][1] % b)
1042 1042 # discovery can have set the value form invalid entry
1043 1043 if pushop.bkresult is not None:
1044 1044 pushop.bkresult = 1
1045 1045
1046 1046 class pulloperation(object):
1047 1047 """A object that represent a single pull operation
1048 1048
1049 1049 It purpose is to carry pull related state and very common operation.
1050 1050
1051 1051 A new should be created at the beginning of each pull and discarded
1052 1052 afterward.
1053 1053 """
1054 1054
1055 1055 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1056 1056 remotebookmarks=None, streamclonerequested=None):
1057 1057 # repo we pull into
1058 1058 self.repo = repo
1059 1059 # repo we pull from
1060 1060 self.remote = remote
1061 1061 # revision we try to pull (None is "all")
1062 1062 self.heads = heads
1063 1063 # bookmark pulled explicitly
1064 1064 self.explicitbookmarks = bookmarks
1065 1065 # do we force pull?
1066 1066 self.force = force
1067 1067 # whether a streaming clone was requested
1068 1068 self.streamclonerequested = streamclonerequested
1069 1069 # transaction manager
1070 1070 self.trmanager = None
1071 1071 # set of common changeset between local and remote before pull
1072 1072 self.common = None
1073 1073 # set of pulled head
1074 1074 self.rheads = None
1075 1075 # list of missing changeset to fetch remotely
1076 1076 self.fetch = None
1077 1077 # remote bookmarks data
1078 1078 self.remotebookmarks = remotebookmarks
1079 1079 # result of changegroup pulling (used as return code by pull)
1080 1080 self.cgresult = None
1081 1081 # list of step already done
1082 1082 self.stepsdone = set()
1083 1083 # Whether we attempted a clone from pre-generated bundles.
1084 1084 self.clonebundleattempted = False
1085 1085
1086 1086 @util.propertycache
1087 1087 def pulledsubset(self):
1088 1088 """heads of the set of changeset target by the pull"""
1089 1089 # compute target subset
1090 1090 if self.heads is None:
1091 1091 # We pulled every thing possible
1092 1092 # sync on everything common
1093 1093 c = set(self.common)
1094 1094 ret = list(self.common)
1095 1095 for n in self.rheads:
1096 1096 if n not in c:
1097 1097 ret.append(n)
1098 1098 return ret
1099 1099 else:
1100 1100 # We pulled a specific subset
1101 1101 # sync on this subset
1102 1102 return self.heads
1103 1103
1104 1104 @util.propertycache
1105 1105 def canusebundle2(self):
1106 1106 return _canusebundle2(self)
1107 1107
1108 1108 @util.propertycache
1109 1109 def remotebundle2caps(self):
1110 1110 return bundle2.bundle2caps(self.remote)
1111 1111
1112 1112 def gettransaction(self):
1113 1113 # deprecated; talk to trmanager directly
1114 1114 return self.trmanager.transaction()
1115 1115
1116 1116 class transactionmanager(object):
1117 1117 """An object to manage the life cycle of a transaction
1118 1118
1119 1119 It creates the transaction on demand and calls the appropriate hooks when
1120 1120 closing the transaction."""
1121 1121 def __init__(self, repo, source, url):
1122 1122 self.repo = repo
1123 1123 self.source = source
1124 1124 self.url = url
1125 1125 self._tr = None
1126 1126
1127 1127 def transaction(self):
1128 1128 """Return an open transaction object, constructing if necessary"""
1129 1129 if not self._tr:
1130 1130 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1131 1131 self._tr = self.repo.transaction(trname)
1132 1132 self._tr.hookargs['source'] = self.source
1133 1133 self._tr.hookargs['url'] = self.url
1134 1134 return self._tr
1135 1135
1136 1136 def close(self):
1137 1137 """close transaction if created"""
1138 1138 if self._tr is not None:
1139 1139 self._tr.close()
1140 1140
1141 1141 def release(self):
1142 1142 """release transaction if created"""
1143 1143 if self._tr is not None:
1144 1144 self._tr.release()
1145 1145
1146 1146 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1147 1147 streamclonerequested=None):
1148 1148 """Fetch repository data from a remote.
1149 1149
1150 1150 This is the main function used to retrieve data from a remote repository.
1151 1151
1152 1152 ``repo`` is the local repository to clone into.
1153 1153 ``remote`` is a peer instance.
1154 1154 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1155 1155 default) means to pull everything from the remote.
1156 1156 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1157 1157 default, all remote bookmarks are pulled.
1158 1158 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1159 1159 initialization.
1160 1160 ``streamclonerequested`` is a boolean indicating whether a "streaming
1161 1161 clone" is requested. A "streaming clone" is essentially a raw file copy
1162 1162 of revlogs from the server. This only works when the local repository is
1163 1163 empty. The default value of ``None`` means to respect the server
1164 1164 configuration for preferring stream clones.
1165 1165
1166 1166 Returns the ``pulloperation`` created for this pull.
1167 1167 """
1168 1168 if opargs is None:
1169 1169 opargs = {}
1170 1170 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1171 1171 streamclonerequested=streamclonerequested, **opargs)
1172 1172 if pullop.remote.local():
1173 1173 missing = set(pullop.remote.requirements) - pullop.repo.supported
1174 1174 if missing:
1175 1175 msg = _("required features are not"
1176 1176 " supported in the destination:"
1177 1177 " %s") % (', '.join(sorted(missing)))
1178 1178 raise error.Abort(msg)
1179 1179
1180 1180 lock = pullop.repo.lock()
1181 1181 try:
1182 1182 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1183 1183 streamclone.maybeperformlegacystreamclone(pullop)
1184 1184 # This should ideally be in _pullbundle2(). However, it needs to run
1185 1185 # before discovery to avoid extra work.
1186 1186 _maybeapplyclonebundle(pullop)
1187 1187 _pulldiscovery(pullop)
1188 1188 if pullop.canusebundle2:
1189 1189 _pullbundle2(pullop)
1190 1190 _pullchangeset(pullop)
1191 1191 _pullphase(pullop)
1192 1192 _pullbookmarks(pullop)
1193 1193 _pullobsolete(pullop)
1194 1194 pullop.trmanager.close()
1195 1195 finally:
1196 1196 pullop.trmanager.release()
1197 1197 lock.release()
1198 1198
1199 1199 return pullop
1200 1200
1201 1201 # list of steps to perform discovery before pull
1202 1202 pulldiscoveryorder = []
1203 1203
1204 1204 # Mapping between step name and function
1205 1205 #
1206 1206 # This exists to help extensions wrap steps if necessary
1207 1207 pulldiscoverymapping = {}
1208 1208
1209 1209 def pulldiscovery(stepname):
1210 1210 """decorator for function performing discovery before pull
1211 1211
1212 1212 The function is added to the step -> function mapping and appended to the
1213 1213 list of steps. Beware that decorated function will be added in order (this
1214 1214 may matter).
1215 1215
1216 1216 You can only use this decorator for a new step, if you want to wrap a step
1217 1217 from an extension, change the pulldiscovery dictionary directly."""
1218 1218 def dec(func):
1219 1219 assert stepname not in pulldiscoverymapping
1220 1220 pulldiscoverymapping[stepname] = func
1221 1221 pulldiscoveryorder.append(stepname)
1222 1222 return func
1223 1223 return dec
1224 1224
1225 1225 def _pulldiscovery(pullop):
1226 1226 """Run all discovery steps"""
1227 1227 for stepname in pulldiscoveryorder:
1228 1228 step = pulldiscoverymapping[stepname]
1229 1229 step(pullop)
1230 1230
1231 1231 @pulldiscovery('b1:bookmarks')
1232 1232 def _pullbookmarkbundle1(pullop):
1233 1233 """fetch bookmark data in bundle1 case
1234 1234
1235 1235 If not using bundle2, we have to fetch bookmarks before changeset
1236 1236 discovery to reduce the chance and impact of race conditions."""
1237 1237 if pullop.remotebookmarks is not None:
1238 1238 return
1239 1239 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1240 1240 # all known bundle2 servers now support listkeys, but lets be nice with
1241 1241 # new implementation.
1242 1242 return
1243 1243 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1244 1244
1245 1245
1246 1246 @pulldiscovery('changegroup')
1247 1247 def _pulldiscoverychangegroup(pullop):
1248 1248 """discovery phase for the pull
1249 1249
1250 1250 Current handle changeset discovery only, will change handle all discovery
1251 1251 at some point."""
1252 1252 tmp = discovery.findcommonincoming(pullop.repo,
1253 1253 pullop.remote,
1254 1254 heads=pullop.heads,
1255 1255 force=pullop.force)
1256 1256 common, fetch, rheads = tmp
1257 1257 nm = pullop.repo.unfiltered().changelog.nodemap
1258 1258 if fetch and rheads:
1259 1259 # If a remote heads in filtered locally, lets drop it from the unknown
1260 1260 # remote heads and put in back in common.
1261 1261 #
1262 1262 # This is a hackish solution to catch most of "common but locally
1263 1263 # hidden situation". We do not performs discovery on unfiltered
1264 1264 # repository because it end up doing a pathological amount of round
1265 1265 # trip for w huge amount of changeset we do not care about.
1266 1266 #
1267 1267 # If a set of such "common but filtered" changeset exist on the server
1268 1268 # but are not including a remote heads, we'll not be able to detect it,
1269 1269 scommon = set(common)
1270 1270 filteredrheads = []
1271 1271 for n in rheads:
1272 1272 if n in nm:
1273 1273 if n not in scommon:
1274 1274 common.append(n)
1275 1275 else:
1276 1276 filteredrheads.append(n)
1277 1277 if not filteredrheads:
1278 1278 fetch = []
1279 1279 rheads = filteredrheads
1280 1280 pullop.common = common
1281 1281 pullop.fetch = fetch
1282 1282 pullop.rheads = rheads
1283 1283
1284 1284 def _pullbundle2(pullop):
1285 1285 """pull data using bundle2
1286 1286
1287 1287 For now, the only supported data are changegroup."""
1288 1288 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1289 1289
1290 1290 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1291 1291
1292 1292 # pulling changegroup
1293 1293 pullop.stepsdone.add('changegroup')
1294 1294
1295 1295 kwargs['common'] = pullop.common
1296 1296 kwargs['heads'] = pullop.heads or pullop.rheads
1297 1297 kwargs['cg'] = pullop.fetch
1298 1298 if 'listkeys' in pullop.remotebundle2caps:
1299 1299 kwargs['listkeys'] = ['phase']
1300 1300 if pullop.remotebookmarks is None:
1301 1301 # make sure to always includes bookmark data when migrating
1302 1302 # `hg incoming --bundle` to using this function.
1303 1303 kwargs['listkeys'].append('bookmarks')
1304 1304
1305 1305 # If this is a full pull / clone and the server supports the clone bundles
1306 1306 # feature, tell the server whether we attempted a clone bundle. The
1307 1307 # presence of this flag indicates the client supports clone bundles. This
1308 1308 # will enable the server to treat clients that support clone bundles
1309 1309 # differently from those that don't.
1310 1310 if (pullop.remote.capable('clonebundles')
1311 1311 and pullop.heads is None and list(pullop.common) == [nullid]):
1312 1312 kwargs['cbattempted'] = pullop.clonebundleattempted
1313 1313
1314 1314 if streaming:
1315 1315 pullop.repo.ui.status(_('streaming all changes\n'))
1316 1316 elif not pullop.fetch:
1317 1317 pullop.repo.ui.status(_("no changes found\n"))
1318 1318 pullop.cgresult = 0
1319 1319 else:
1320 1320 if pullop.heads is None and list(pullop.common) == [nullid]:
1321 1321 pullop.repo.ui.status(_("requesting all changes\n"))
1322 1322 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1323 1323 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1324 1324 if obsolete.commonversion(remoteversions) is not None:
1325 1325 kwargs['obsmarkers'] = True
1326 1326 pullop.stepsdone.add('obsmarkers')
1327 1327 _pullbundle2extraprepare(pullop, kwargs)
1328 1328 bundle = pullop.remote.getbundle('pull', **kwargs)
1329 1329 try:
1330 1330 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1331 1331 except error.BundleValueError as exc:
1332 1332 raise error.Abort('missing support for %s' % exc)
1333 1333
1334 1334 if pullop.fetch:
1335 1335 results = [cg['return'] for cg in op.records['changegroup']]
1336 1336 pullop.cgresult = changegroup.combineresults(results)
1337 1337
1338 1338 # processing phases change
1339 1339 for namespace, value in op.records['listkeys']:
1340 1340 if namespace == 'phases':
1341 1341 _pullapplyphases(pullop, value)
1342 1342
1343 1343 # processing bookmark update
1344 1344 for namespace, value in op.records['listkeys']:
1345 1345 if namespace == 'bookmarks':
1346 1346 pullop.remotebookmarks = value
1347 1347
1348 1348 # bookmark data were either already there or pulled in the bundle
1349 1349 if pullop.remotebookmarks is not None:
1350 1350 _pullbookmarks(pullop)
1351 1351
1352 1352 def _pullbundle2extraprepare(pullop, kwargs):
1353 1353 """hook function so that extensions can extend the getbundle call"""
1354 1354 pass
1355 1355
1356 1356 def _pullchangeset(pullop):
1357 1357 """pull changeset from unbundle into the local repo"""
1358 1358 # We delay the open of the transaction as late as possible so we
1359 1359 # don't open transaction for nothing or you break future useful
1360 1360 # rollback call
1361 1361 if 'changegroup' in pullop.stepsdone:
1362 1362 return
1363 1363 pullop.stepsdone.add('changegroup')
1364 1364 if not pullop.fetch:
1365 1365 pullop.repo.ui.status(_("no changes found\n"))
1366 1366 pullop.cgresult = 0
1367 1367 return
1368 1368 pullop.gettransaction()
1369 1369 if pullop.heads is None and list(pullop.common) == [nullid]:
1370 1370 pullop.repo.ui.status(_("requesting all changes\n"))
1371 1371 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1372 1372 # issue1320, avoid a race if remote changed after discovery
1373 1373 pullop.heads = pullop.rheads
1374 1374
1375 1375 if pullop.remote.capable('getbundle'):
1376 1376 # TODO: get bundlecaps from remote
1377 1377 cg = pullop.remote.getbundle('pull', common=pullop.common,
1378 1378 heads=pullop.heads or pullop.rheads)
1379 1379 elif pullop.heads is None:
1380 1380 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1381 1381 elif not pullop.remote.capable('changegroupsubset'):
1382 1382 raise error.Abort(_("partial pull cannot be done because "
1383 1383 "other repository doesn't support "
1384 1384 "changegroupsubset."))
1385 1385 else:
1386 1386 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1387 1387 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1388 1388
1389 1389 def _pullphase(pullop):
1390 1390 # Get remote phases data from remote
1391 1391 if 'phases' in pullop.stepsdone:
1392 1392 return
1393 1393 remotephases = pullop.remote.listkeys('phases')
1394 1394 _pullapplyphases(pullop, remotephases)
1395 1395
1396 1396 def _pullapplyphases(pullop, remotephases):
1397 1397 """apply phase movement from observed remote state"""
1398 1398 if 'phases' in pullop.stepsdone:
1399 1399 return
1400 1400 pullop.stepsdone.add('phases')
1401 1401 publishing = bool(remotephases.get('publishing', False))
1402 1402 if remotephases and not publishing:
1403 1403 # remote is new and unpublishing
1404 1404 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1405 1405 pullop.pulledsubset,
1406 1406 remotephases)
1407 1407 dheads = pullop.pulledsubset
1408 1408 else:
1409 1409 # Remote is old or publishing all common changesets
1410 1410 # should be seen as public
1411 1411 pheads = pullop.pulledsubset
1412 1412 dheads = []
1413 1413 unfi = pullop.repo.unfiltered()
1414 1414 phase = unfi._phasecache.phase
1415 1415 rev = unfi.changelog.nodemap.get
1416 1416 public = phases.public
1417 1417 draft = phases.draft
1418 1418
1419 1419 # exclude changesets already public locally and update the others
1420 1420 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1421 1421 if pheads:
1422 1422 tr = pullop.gettransaction()
1423 1423 phases.advanceboundary(pullop.repo, tr, public, pheads)
1424 1424
1425 1425 # exclude changesets already draft locally and update the others
1426 1426 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1427 1427 if dheads:
1428 1428 tr = pullop.gettransaction()
1429 1429 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1430 1430
1431 1431 def _pullbookmarks(pullop):
1432 1432 """process the remote bookmark information to update the local one"""
1433 1433 if 'bookmarks' in pullop.stepsdone:
1434 1434 return
1435 1435 pullop.stepsdone.add('bookmarks')
1436 1436 repo = pullop.repo
1437 1437 remotebookmarks = pullop.remotebookmarks
1438 1438 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1439 1439 pullop.remote.url(),
1440 1440 pullop.gettransaction,
1441 1441 explicit=pullop.explicitbookmarks)
1442 1442
1443 1443 def _pullobsolete(pullop):
1444 1444 """utility function to pull obsolete markers from a remote
1445 1445
1446 1446 The `gettransaction` is function that return the pull transaction, creating
1447 1447 one if necessary. We return the transaction to inform the calling code that
1448 1448 a new transaction have been created (when applicable).
1449 1449
1450 1450 Exists mostly to allow overriding for experimentation purpose"""
1451 1451 if 'obsmarkers' in pullop.stepsdone:
1452 1452 return
1453 1453 pullop.stepsdone.add('obsmarkers')
1454 1454 tr = None
1455 1455 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1456 1456 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1457 1457 remoteobs = pullop.remote.listkeys('obsolete')
1458 1458 if 'dump0' in remoteobs:
1459 1459 tr = pullop.gettransaction()
1460 1460 markers = []
1461 1461 for key in sorted(remoteobs, reverse=True):
1462 1462 if key.startswith('dump'):
1463 1463 data = base85.b85decode(remoteobs[key])
1464 1464 version, newmarks = obsolete._readmarkers(data)
1465 1465 markers += newmarks
1466 1466 if markers:
1467 1467 pullop.repo.obsstore.add(tr, markers)
1468 1468 pullop.repo.invalidatevolatilesets()
1469 1469 return tr
1470 1470
1471 1471 def caps20to10(repo):
1472 1472 """return a set with appropriate options to use bundle20 during getbundle"""
1473 1473 caps = set(['HG20'])
1474 1474 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1475 1475 caps.add('bundle2=' + urllib.quote(capsblob))
1476 1476 return caps
1477 1477
1478 1478 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1479 1479 getbundle2partsorder = []
1480 1480
1481 1481 # Mapping between step name and function
1482 1482 #
1483 1483 # This exists to help extensions wrap steps if necessary
1484 1484 getbundle2partsmapping = {}
1485 1485
1486 1486 def getbundle2partsgenerator(stepname, idx=None):
1487 1487 """decorator for function generating bundle2 part for getbundle
1488 1488
1489 1489 The function is added to the step -> function mapping and appended to the
1490 1490 list of steps. Beware that decorated functions will be added in order
1491 1491 (this may matter).
1492 1492
1493 1493 You can only use this decorator for new steps, if you want to wrap a step
1494 1494 from an extension, attack the getbundle2partsmapping dictionary directly."""
1495 1495 def dec(func):
1496 1496 assert stepname not in getbundle2partsmapping
1497 1497 getbundle2partsmapping[stepname] = func
1498 1498 if idx is None:
1499 1499 getbundle2partsorder.append(stepname)
1500 1500 else:
1501 1501 getbundle2partsorder.insert(idx, stepname)
1502 1502 return func
1503 1503 return dec
1504 1504
1505 1505 def bundle2requested(bundlecaps):
1506 1506 if bundlecaps is not None:
1507 1507 return any(cap.startswith('HG2') for cap in bundlecaps)
1508 1508 return False
1509 1509
1510 1510 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1511 1511 **kwargs):
1512 1512 """return a full bundle (with potentially multiple kind of parts)
1513 1513
1514 1514 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1515 1515 passed. For now, the bundle can contain only changegroup, but this will
1516 1516 changes when more part type will be available for bundle2.
1517 1517
1518 1518 This is different from changegroup.getchangegroup that only returns an HG10
1519 1519 changegroup bundle. They may eventually get reunited in the future when we
1520 1520 have a clearer idea of the API we what to query different data.
1521 1521
1522 1522 The implementation is at a very early stage and will get massive rework
1523 1523 when the API of bundle is refined.
1524 1524 """
1525 1525 usebundle2 = bundle2requested(bundlecaps)
1526 1526 # bundle10 case
1527 1527 if not usebundle2:
1528 1528 if bundlecaps and not kwargs.get('cg', True):
1529 1529 raise ValueError(_('request for bundle10 must include changegroup'))
1530 1530
1531 1531 if kwargs:
1532 1532 raise ValueError(_('unsupported getbundle arguments: %s')
1533 1533 % ', '.join(sorted(kwargs.keys())))
1534 1534 return changegroup.getchangegroup(repo, source, heads=heads,
1535 1535 common=common, bundlecaps=bundlecaps)
1536 1536
1537 1537 # bundle20 case
1538 1538 b2caps = {}
1539 1539 for bcaps in bundlecaps:
1540 1540 if bcaps.startswith('bundle2='):
1541 1541 blob = urllib.unquote(bcaps[len('bundle2='):])
1542 1542 b2caps.update(bundle2.decodecaps(blob))
1543 1543 bundler = bundle2.bundle20(repo.ui, b2caps)
1544 1544
1545 1545 kwargs['heads'] = heads
1546 1546 kwargs['common'] = common
1547 1547
1548 1548 for name in getbundle2partsorder:
1549 1549 func = getbundle2partsmapping[name]
1550 1550 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1551 1551 **kwargs)
1552 1552
1553 1553 return util.chunkbuffer(bundler.getchunks())
1554 1554
1555 1555 @getbundle2partsgenerator('changegroup')
1556 1556 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1557 1557 b2caps=None, heads=None, common=None, **kwargs):
1558 1558 """add a changegroup part to the requested bundle"""
1559 1559 cg = None
1560 1560 if kwargs.get('cg', True):
1561 1561 # build changegroup bundle here.
1562 1562 version = None
1563 1563 cgversions = b2caps.get('changegroup')
1564 1564 getcgkwargs = {}
1565 1565 if cgversions: # 3.1 and 3.2 ship with an empty value
1566 1566 cgversions = [v for v in cgversions
1567 1567 if v in changegroup.supportedoutgoingversions(repo)]
1568 1568 if not cgversions:
1569 1569 raise ValueError(_('no common changegroup version'))
1570 1570 version = getcgkwargs['version'] = max(cgversions)
1571 1571 outgoing = changegroup.computeoutgoing(repo, heads, common)
1572 1572 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1573 1573 bundlecaps=bundlecaps,
1574 1574 **getcgkwargs)
1575 1575
1576 1576 if cg:
1577 1577 part = bundler.newpart('changegroup', data=cg)
1578 1578 if version is not None:
1579 1579 part.addparam('version', version)
1580 1580 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1581 1581 if 'treemanifest' in repo.requirements:
1582 1582 part.addparam('treemanifest', '1')
1583 1583
1584 1584 @getbundle2partsgenerator('listkeys')
1585 1585 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1586 1586 b2caps=None, **kwargs):
1587 1587 """add parts containing listkeys namespaces to the requested bundle"""
1588 1588 listkeys = kwargs.get('listkeys', ())
1589 1589 for namespace in listkeys:
1590 1590 part = bundler.newpart('listkeys')
1591 1591 part.addparam('namespace', namespace)
1592 1592 keys = repo.listkeys(namespace).items()
1593 1593 part.data = pushkey.encodekeys(keys)
1594 1594
1595 1595 @getbundle2partsgenerator('obsmarkers')
1596 1596 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1597 1597 b2caps=None, heads=None, **kwargs):
1598 1598 """add an obsolescence markers part to the requested bundle"""
1599 1599 if kwargs.get('obsmarkers', False):
1600 1600 if heads is None:
1601 1601 heads = repo.heads()
1602 1602 subset = [c.node() for c in repo.set('::%ln', heads)]
1603 1603 markers = repo.obsstore.relevantmarkers(subset)
1604 1604 markers = sorted(markers)
1605 1605 buildobsmarkerspart(bundler, markers)
1606 1606
1607 1607 @getbundle2partsgenerator('hgtagsfnodes')
1608 1608 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1609 1609 b2caps=None, heads=None, common=None,
1610 1610 **kwargs):
1611 1611 """Transfer the .hgtags filenodes mapping.
1612 1612
1613 1613 Only values for heads in this bundle will be transferred.
1614 1614
1615 1615 The part data consists of pairs of 20 byte changeset node and .hgtags
1616 1616 filenodes raw values.
1617 1617 """
1618 1618 # Don't send unless:
1619 1619 # - changeset are being exchanged,
1620 1620 # - the client supports it.
1621 1621 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1622 1622 return
1623 1623
1624 1624 outgoing = changegroup.computeoutgoing(repo, heads, common)
1625 1625
1626 1626 if not outgoing.missingheads:
1627 1627 return
1628 1628
1629 1629 cache = tags.hgtagsfnodescache(repo.unfiltered())
1630 1630 chunks = []
1631 1631
1632 1632 # .hgtags fnodes are only relevant for head changesets. While we could
1633 1633 # transfer values for all known nodes, there will likely be little to
1634 1634 # no benefit.
1635 1635 #
1636 1636 # We don't bother using a generator to produce output data because
1637 1637 # a) we only have 40 bytes per head and even esoteric numbers of heads
1638 1638 # consume little memory (1M heads is 40MB) b) we don't want to send the
1639 1639 # part if we don't have entries and knowing if we have entries requires
1640 1640 # cache lookups.
1641 1641 for node in outgoing.missingheads:
1642 1642 # Don't compute missing, as this may slow down serving.
1643 1643 fnode = cache.getfnode(node, computemissing=False)
1644 1644 if fnode is not None:
1645 1645 chunks.extend([node, fnode])
1646 1646
1647 1647 if chunks:
1648 1648 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1649 1649
1650 1650 def check_heads(repo, their_heads, context):
1651 1651 """check if the heads of a repo have been modified
1652 1652
1653 1653 Used by peer for unbundling.
1654 1654 """
1655 1655 heads = repo.heads()
1656 1656 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1657 1657 if not (their_heads == ['force'] or their_heads == heads or
1658 1658 their_heads == ['hashed', heads_hash]):
1659 1659 # someone else committed/pushed/unbundled while we
1660 1660 # were transferring data
1661 1661 raise error.PushRaced('repository changed while %s - '
1662 1662 'please try again' % context)
1663 1663
1664 1664 def unbundle(repo, cg, heads, source, url):
1665 1665 """Apply a bundle to a repo.
1666 1666
1667 1667 this function makes sure the repo is locked during the application and have
1668 1668 mechanism to check that no push race occurred between the creation of the
1669 1669 bundle and its application.
1670 1670
1671 1671 If the push was raced as PushRaced exception is raised."""
1672 1672 r = 0
1673 1673 # need a transaction when processing a bundle2 stream
1674 1674 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1675 1675 lockandtr = [None, None, None]
1676 1676 recordout = None
1677 1677 # quick fix for output mismatch with bundle2 in 3.4
1678 1678 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1679 1679 False)
1680 1680 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1681 1681 captureoutput = True
1682 1682 try:
1683 1683 check_heads(repo, heads, 'uploading changes')
1684 1684 # push can proceed
1685 1685 if util.safehasattr(cg, 'params'):
1686 1686 r = None
1687 1687 try:
1688 1688 def gettransaction():
1689 1689 if not lockandtr[2]:
1690 1690 lockandtr[0] = repo.wlock()
1691 1691 lockandtr[1] = repo.lock()
1692 1692 lockandtr[2] = repo.transaction(source)
1693 1693 lockandtr[2].hookargs['source'] = source
1694 1694 lockandtr[2].hookargs['url'] = url
1695 1695 lockandtr[2].hookargs['bundle2'] = '1'
1696 1696 return lockandtr[2]
1697 1697
1698 1698 # Do greedy locking by default until we're satisfied with lazy
1699 1699 # locking.
1700 1700 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1701 1701 gettransaction()
1702 1702
1703 1703 op = bundle2.bundleoperation(repo, gettransaction,
1704 1704 captureoutput=captureoutput)
1705 1705 try:
1706 1706 op = bundle2.processbundle(repo, cg, op=op)
1707 1707 finally:
1708 1708 r = op.reply
1709 1709 if captureoutput and r is not None:
1710 1710 repo.ui.pushbuffer(error=True, subproc=True)
1711 1711 def recordout(output):
1712 1712 r.newpart('output', data=output, mandatory=False)
1713 1713 if lockandtr[2] is not None:
1714 1714 lockandtr[2].close()
1715 1715 except BaseException as exc:
1716 1716 exc.duringunbundle2 = True
1717 1717 if captureoutput and r is not None:
1718 1718 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1719 1719 def recordout(output):
1720 1720 part = bundle2.bundlepart('output', data=output,
1721 1721 mandatory=False)
1722 1722 parts.append(part)
1723 1723 raise
1724 1724 else:
1725 1725 lockandtr[1] = repo.lock()
1726 1726 r = cg.apply(repo, source, url)
1727 1727 finally:
1728 1728 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1729 1729 if recordout is not None:
1730 1730 recordout(repo.ui.popbuffer())
1731 1731 return r
1732 1732
1733 1733 def _maybeapplyclonebundle(pullop):
1734 1734 """Apply a clone bundle from a remote, if possible."""
1735 1735
1736 1736 repo = pullop.repo
1737 1737 remote = pullop.remote
1738 1738
1739 1739 if not repo.ui.configbool('ui', 'clonebundles', True):
1740 1740 return
1741 1741
1742 1742 # Only run if local repo is empty.
1743 1743 if len(repo):
1744 1744 return
1745 1745
1746 1746 if pullop.heads:
1747 1747 return
1748 1748
1749 1749 if not remote.capable('clonebundles'):
1750 1750 return
1751 1751
1752 1752 res = remote._call('clonebundles')
1753 1753
1754 1754 # If we call the wire protocol command, that's good enough to record the
1755 1755 # attempt.
1756 1756 pullop.clonebundleattempted = True
1757 1757
1758 1758 entries = parseclonebundlesmanifest(repo, res)
1759 1759 if not entries:
1760 1760 repo.ui.note(_('no clone bundles available on remote; '
1761 1761 'falling back to regular clone\n'))
1762 1762 return
1763 1763
1764 1764 entries = filterclonebundleentries(repo, entries)
1765 1765 if not entries:
1766 1766 # There is a thundering herd concern here. However, if a server
1767 1767 # operator doesn't advertise bundles appropriate for its clients,
1768 1768 # they deserve what's coming. Furthermore, from a client's
1769 1769 # perspective, no automatic fallback would mean not being able to
1770 1770 # clone!
1771 1771 repo.ui.warn(_('no compatible clone bundles available on server; '
1772 1772 'falling back to regular clone\n'))
1773 1773 repo.ui.warn(_('(you may want to report this to the server '
1774 1774 'operator)\n'))
1775 1775 return
1776 1776
1777 1777 entries = sortclonebundleentries(repo.ui, entries)
1778 1778
1779 1779 url = entries[0]['URL']
1780 1780 repo.ui.status(_('applying clone bundle from %s\n') % url)
1781 1781 if trypullbundlefromurl(repo.ui, repo, url):
1782 1782 repo.ui.status(_('finished applying clone bundle\n'))
1783 1783 # Bundle failed.
1784 1784 #
1785 1785 # We abort by default to avoid the thundering herd of
1786 1786 # clients flooding a server that was expecting expensive
1787 1787 # clone load to be offloaded.
1788 1788 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1789 1789 repo.ui.warn(_('falling back to normal clone\n'))
1790 1790 else:
1791 1791 raise error.Abort(_('error applying bundle'),
1792 1792 hint=_('if this error persists, consider contacting '
1793 1793 'the server operator or disable clone '
1794 1794 'bundles via '
1795 1795 '"--config ui.clonebundles=false"'))
1796 1796
1797 1797 def parseclonebundlesmanifest(repo, s):
1798 1798 """Parses the raw text of a clone bundles manifest.
1799 1799
1800 1800 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1801 1801 to the URL and other keys are the attributes for the entry.
1802 1802 """
1803 1803 m = []
1804 1804 for line in s.splitlines():
1805 1805 fields = line.split()
1806 1806 if not fields:
1807 1807 continue
1808 1808 attrs = {'URL': fields[0]}
1809 1809 for rawattr in fields[1:]:
1810 1810 key, value = rawattr.split('=', 1)
1811 1811 key = urllib.unquote(key)
1812 1812 value = urllib.unquote(value)
1813 1813 attrs[key] = value
1814 1814
1815 1815 # Parse BUNDLESPEC into components. This makes client-side
1816 1816 # preferences easier to specify since you can prefer a single
1817 1817 # component of the BUNDLESPEC.
1818 1818 if key == 'BUNDLESPEC':
1819 1819 try:
1820 1820 comp, version, params = parsebundlespec(repo, value,
1821 1821 externalnames=True)
1822 1822 attrs['COMPRESSION'] = comp
1823 1823 attrs['VERSION'] = version
1824 1824 except error.InvalidBundleSpecification:
1825 1825 pass
1826 1826 except error.UnsupportedBundleSpecification:
1827 1827 pass
1828 1828
1829 1829 m.append(attrs)
1830 1830
1831 1831 return m
1832 1832
1833 1833 def filterclonebundleentries(repo, entries):
1834 1834 """Remove incompatible clone bundle manifest entries.
1835 1835
1836 1836 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1837 1837 and returns a new list consisting of only the entries that this client
1838 1838 should be able to apply.
1839 1839
1840 1840 There is no guarantee we'll be able to apply all returned entries because
1841 1841 the metadata we use to filter on may be missing or wrong.
1842 1842 """
1843 1843 newentries = []
1844 1844 for entry in entries:
1845 1845 spec = entry.get('BUNDLESPEC')
1846 1846 if spec:
1847 1847 try:
1848 1848 parsebundlespec(repo, spec, strict=True)
1849 1849 except error.InvalidBundleSpecification as e:
1850 1850 repo.ui.debug(str(e) + '\n')
1851 1851 continue
1852 1852 except error.UnsupportedBundleSpecification as e:
1853 1853 repo.ui.debug('filtering %s because unsupported bundle '
1854 1854 'spec: %s\n' % (entry['URL'], str(e)))
1855 1855 continue
1856 1856
1857 1857 if 'REQUIRESNI' in entry and not sslutil.hassni:
1858 1858 repo.ui.debug('filtering %s because SNI not supported\n' %
1859 1859 entry['URL'])
1860 1860 continue
1861 1861
1862 1862 newentries.append(entry)
1863 1863
1864 1864 return newentries
1865 1865
1866 1866 def sortclonebundleentries(ui, entries):
1867 1867 prefers = ui.configlist('ui', 'clonebundleprefers', default=[])
1868 1868 if not prefers:
1869 1869 return list(entries)
1870 1870
1871 1871 prefers = [p.split('=', 1) for p in prefers]
1872 1872
1873 1873 # Our sort function.
1874 1874 def compareentry(a, b):
1875 1875 for prefkey, prefvalue in prefers:
1876 1876 avalue = a.get(prefkey)
1877 1877 bvalue = b.get(prefkey)
1878 1878
1879 1879 # Special case for b missing attribute and a matches exactly.
1880 1880 if avalue is not None and bvalue is None and avalue == prefvalue:
1881 1881 return -1
1882 1882
1883 1883 # Special case for a missing attribute and b matches exactly.
1884 1884 if bvalue is not None and avalue is None and bvalue == prefvalue:
1885 1885 return 1
1886 1886
1887 1887 # We can't compare unless attribute present on both.
1888 1888 if avalue is None or bvalue is None:
1889 1889 continue
1890 1890
1891 1891 # Same values should fall back to next attribute.
1892 1892 if avalue == bvalue:
1893 1893 continue
1894 1894
1895 1895 # Exact matches come first.
1896 1896 if avalue == prefvalue:
1897 1897 return -1
1898 1898 if bvalue == prefvalue:
1899 1899 return 1
1900 1900
1901 1901 # Fall back to next attribute.
1902 1902 continue
1903 1903
1904 1904 # If we got here we couldn't sort by attributes and prefers. Fall
1905 1905 # back to index order.
1906 1906 return 0
1907 1907
1908 1908 return sorted(entries, cmp=compareentry)
1909 1909
1910 1910 def trypullbundlefromurl(ui, repo, url):
1911 1911 """Attempt to apply a bundle from a URL."""
1912 1912 lock = repo.lock()
1913 1913 try:
1914 1914 tr = repo.transaction('bundleurl')
1915 1915 try:
1916 1916 try:
1917 1917 fh = urlmod.open(ui, url)
1918 1918 cg = readbundle(ui, fh, 'stream')
1919 1919
1920 1920 if isinstance(cg, bundle2.unbundle20):
1921 1921 bundle2.processbundle(repo, cg, lambda: tr)
1922 1922 elif isinstance(cg, streamclone.streamcloneapplier):
1923 1923 cg.apply(repo)
1924 1924 else:
1925 1925 cg.apply(repo, 'clonebundles', url)
1926 1926 tr.close()
1927 1927 return True
1928 1928 except urllib2.HTTPError as e:
1929 1929 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1930 1930 except urllib2.URLError as e:
1931 1931 ui.warn(_('error fetching bundle: %s\n') % e.reason[1])
1932 1932
1933 1933 return False
1934 1934 finally:
1935 1935 tr.release()
1936 1936 finally:
1937 1937 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now