##// END OF EJS Templates
exchange: reject new compression engines for v1 bundles (issue5506)...
Gregory Szorc -
r31473:ffed3bf5 stable
parent child Browse files
Show More
@@ -1,2011 +1,2020 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 hex,
16 16 nullid,
17 17 )
18 18 from . import (
19 19 base85,
20 20 bookmarks as bookmod,
21 21 bundle2,
22 22 changegroup,
23 23 discovery,
24 24 error,
25 25 lock as lockmod,
26 26 obsolete,
27 27 phases,
28 28 pushkey,
29 29 scmutil,
30 30 sslutil,
31 31 streamclone,
32 32 tags,
33 33 url as urlmod,
34 34 util,
35 35 )
36 36
37 37 urlerr = util.urlerr
38 38 urlreq = util.urlreq
39 39
40 40 # Maps bundle version human names to changegroup versions.
41 41 _bundlespeccgversions = {'v1': '01',
42 42 'v2': '02',
43 43 'packed1': 's1',
44 44 'bundle2': '02', #legacy
45 45 }
46 46
47 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
48 _bundlespecv1compengines = set(['gzip', 'bzip2', 'none'])
49
47 50 def parsebundlespec(repo, spec, strict=True, externalnames=False):
48 51 """Parse a bundle string specification into parts.
49 52
50 53 Bundle specifications denote a well-defined bundle/exchange format.
51 54 The content of a given specification should not change over time in
52 55 order to ensure that bundles produced by a newer version of Mercurial are
53 56 readable from an older version.
54 57
55 58 The string currently has the form:
56 59
57 60 <compression>-<type>[;<parameter0>[;<parameter1>]]
58 61
59 62 Where <compression> is one of the supported compression formats
60 63 and <type> is (currently) a version string. A ";" can follow the type and
61 64 all text afterwards is interpreted as URI encoded, ";" delimited key=value
62 65 pairs.
63 66
64 67 If ``strict`` is True (the default) <compression> is required. Otherwise,
65 68 it is optional.
66 69
67 70 If ``externalnames`` is False (the default), the human-centric names will
68 71 be converted to their internal representation.
69 72
70 73 Returns a 3-tuple of (compression, version, parameters). Compression will
71 74 be ``None`` if not in strict mode and a compression isn't defined.
72 75
73 76 An ``InvalidBundleSpecification`` is raised when the specification is
74 77 not syntactically well formed.
75 78
76 79 An ``UnsupportedBundleSpecification`` is raised when the compression or
77 80 bundle type/version is not recognized.
78 81
79 82 Note: this function will likely eventually return a more complex data
80 83 structure, including bundle2 part information.
81 84 """
82 85 def parseparams(s):
83 86 if ';' not in s:
84 87 return s, {}
85 88
86 89 params = {}
87 90 version, paramstr = s.split(';', 1)
88 91
89 92 for p in paramstr.split(';'):
90 93 if '=' not in p:
91 94 raise error.InvalidBundleSpecification(
92 95 _('invalid bundle specification: '
93 96 'missing "=" in parameter: %s') % p)
94 97
95 98 key, value = p.split('=', 1)
96 99 key = urlreq.unquote(key)
97 100 value = urlreq.unquote(value)
98 101 params[key] = value
99 102
100 103 return version, params
101 104
102 105
103 106 if strict and '-' not in spec:
104 107 raise error.InvalidBundleSpecification(
105 108 _('invalid bundle specification; '
106 109 'must be prefixed with compression: %s') % spec)
107 110
108 111 if '-' in spec:
109 112 compression, version = spec.split('-', 1)
110 113
111 114 if compression not in util.compengines.supportedbundlenames:
112 115 raise error.UnsupportedBundleSpecification(
113 116 _('%s compression is not supported') % compression)
114 117
115 118 version, params = parseparams(version)
116 119
117 120 if version not in _bundlespeccgversions:
118 121 raise error.UnsupportedBundleSpecification(
119 122 _('%s is not a recognized bundle version') % version)
120 123 else:
121 124 # Value could be just the compression or just the version, in which
122 125 # case some defaults are assumed (but only when not in strict mode).
123 126 assert not strict
124 127
125 128 spec, params = parseparams(spec)
126 129
127 130 if spec in util.compengines.supportedbundlenames:
128 131 compression = spec
129 132 version = 'v1'
130 133 if 'generaldelta' in repo.requirements:
131 134 version = 'v2'
132 135 elif spec in _bundlespeccgversions:
133 136 if spec == 'packed1':
134 137 compression = 'none'
135 138 else:
136 139 compression = 'bzip2'
137 140 version = spec
138 141 else:
139 142 raise error.UnsupportedBundleSpecification(
140 143 _('%s is not a recognized bundle specification') % spec)
141 144
145 # Bundle version 1 only supports a known set of compression engines.
146 if version == 'v1' and compression not in _bundlespecv1compengines:
147 raise error.UnsupportedBundleSpecification(
148 _('compression engine %s is not supported on v1 bundles') %
149 compression)
150
142 151 # The specification for packed1 can optionally declare the data formats
143 152 # required to apply it. If we see this metadata, compare against what the
144 153 # repo supports and error if the bundle isn't compatible.
145 154 if version == 'packed1' and 'requirements' in params:
146 155 requirements = set(params['requirements'].split(','))
147 156 missingreqs = requirements - repo.supportedformats
148 157 if missingreqs:
149 158 raise error.UnsupportedBundleSpecification(
150 159 _('missing support for repository features: %s') %
151 160 ', '.join(sorted(missingreqs)))
152 161
153 162 if not externalnames:
154 163 engine = util.compengines.forbundlename(compression)
155 164 compression = engine.bundletype()[1]
156 165 version = _bundlespeccgversions[version]
157 166 return compression, version, params
158 167
159 168 def readbundle(ui, fh, fname, vfs=None):
160 169 header = changegroup.readexactly(fh, 4)
161 170
162 171 alg = None
163 172 if not fname:
164 173 fname = "stream"
165 174 if not header.startswith('HG') and header.startswith('\0'):
166 175 fh = changegroup.headerlessfixup(fh, header)
167 176 header = "HG10"
168 177 alg = 'UN'
169 178 elif vfs:
170 179 fname = vfs.join(fname)
171 180
172 181 magic, version = header[0:2], header[2:4]
173 182
174 183 if magic != 'HG':
175 184 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
176 185 if version == '10':
177 186 if alg is None:
178 187 alg = changegroup.readexactly(fh, 2)
179 188 return changegroup.cg1unpacker(fh, alg)
180 189 elif version.startswith('2'):
181 190 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
182 191 elif version == 'S1':
183 192 return streamclone.streamcloneapplier(fh)
184 193 else:
185 194 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
186 195
187 196 def getbundlespec(ui, fh):
188 197 """Infer the bundlespec from a bundle file handle.
189 198
190 199 The input file handle is seeked and the original seek position is not
191 200 restored.
192 201 """
193 202 def speccompression(alg):
194 203 try:
195 204 return util.compengines.forbundletype(alg).bundletype()[0]
196 205 except KeyError:
197 206 return None
198 207
199 208 b = readbundle(ui, fh, None)
200 209 if isinstance(b, changegroup.cg1unpacker):
201 210 alg = b._type
202 211 if alg == '_truncatedBZ':
203 212 alg = 'BZ'
204 213 comp = speccompression(alg)
205 214 if not comp:
206 215 raise error.Abort(_('unknown compression algorithm: %s') % alg)
207 216 return '%s-v1' % comp
208 217 elif isinstance(b, bundle2.unbundle20):
209 218 if 'Compression' in b.params:
210 219 comp = speccompression(b.params['Compression'])
211 220 if not comp:
212 221 raise error.Abort(_('unknown compression algorithm: %s') % comp)
213 222 else:
214 223 comp = 'none'
215 224
216 225 version = None
217 226 for part in b.iterparts():
218 227 if part.type == 'changegroup':
219 228 version = part.params['version']
220 229 if version in ('01', '02'):
221 230 version = 'v2'
222 231 else:
223 232 raise error.Abort(_('changegroup version %s does not have '
224 233 'a known bundlespec') % version,
225 234 hint=_('try upgrading your Mercurial '
226 235 'client'))
227 236
228 237 if not version:
229 238 raise error.Abort(_('could not identify changegroup version in '
230 239 'bundle'))
231 240
232 241 return '%s-%s' % (comp, version)
233 242 elif isinstance(b, streamclone.streamcloneapplier):
234 243 requirements = streamclone.readbundle1header(fh)[2]
235 244 params = 'requirements=%s' % ','.join(sorted(requirements))
236 245 return 'none-packed1;%s' % urlreq.quote(params)
237 246 else:
238 247 raise error.Abort(_('unknown bundle type: %s') % b)
239 248
240 249 def buildobsmarkerspart(bundler, markers):
241 250 """add an obsmarker part to the bundler with <markers>
242 251
243 252 No part is created if markers is empty.
244 253 Raises ValueError if the bundler doesn't support any known obsmarker format.
245 254 """
246 255 if markers:
247 256 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
248 257 version = obsolete.commonversion(remoteversions)
249 258 if version is None:
250 259 raise ValueError('bundler does not support common obsmarker format')
251 260 stream = obsolete.encodemarkers(markers, True, version=version)
252 261 return bundler.newpart('obsmarkers', data=stream)
253 262 return None
254 263
255 264 def _computeoutgoing(repo, heads, common):
256 265 """Computes which revs are outgoing given a set of common
257 266 and a set of heads.
258 267
259 268 This is a separate function so extensions can have access to
260 269 the logic.
261 270
262 271 Returns a discovery.outgoing object.
263 272 """
264 273 cl = repo.changelog
265 274 if common:
266 275 hasnode = cl.hasnode
267 276 common = [n for n in common if hasnode(n)]
268 277 else:
269 278 common = [nullid]
270 279 if not heads:
271 280 heads = cl.heads()
272 281 return discovery.outgoing(repo, common, heads)
273 282
274 283 def _forcebundle1(op):
275 284 """return true if a pull/push must use bundle1
276 285
277 286 This function is used to allow testing of the older bundle version"""
278 287 ui = op.repo.ui
279 288 forcebundle1 = False
280 289 # The goal is this config is to allow developer to choose the bundle
281 290 # version used during exchanged. This is especially handy during test.
282 291 # Value is a list of bundle version to be picked from, highest version
283 292 # should be used.
284 293 #
285 294 # developer config: devel.legacy.exchange
286 295 exchange = ui.configlist('devel', 'legacy.exchange')
287 296 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
288 297 return forcebundle1 or not op.remote.capable('bundle2')
289 298
290 299 class pushoperation(object):
291 300 """A object that represent a single push operation
292 301
293 302 Its purpose is to carry push related state and very common operations.
294 303
295 304 A new pushoperation should be created at the beginning of each push and
296 305 discarded afterward.
297 306 """
298 307
299 308 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
300 309 bookmarks=()):
301 310 # repo we push from
302 311 self.repo = repo
303 312 self.ui = repo.ui
304 313 # repo we push to
305 314 self.remote = remote
306 315 # force option provided
307 316 self.force = force
308 317 # revs to be pushed (None is "all")
309 318 self.revs = revs
310 319 # bookmark explicitly pushed
311 320 self.bookmarks = bookmarks
312 321 # allow push of new branch
313 322 self.newbranch = newbranch
314 323 # did a local lock get acquired?
315 324 self.locallocked = None
316 325 # step already performed
317 326 # (used to check what steps have been already performed through bundle2)
318 327 self.stepsdone = set()
319 328 # Integer version of the changegroup push result
320 329 # - None means nothing to push
321 330 # - 0 means HTTP error
322 331 # - 1 means we pushed and remote head count is unchanged *or*
323 332 # we have outgoing changesets but refused to push
324 333 # - other values as described by addchangegroup()
325 334 self.cgresult = None
326 335 # Boolean value for the bookmark push
327 336 self.bkresult = None
328 337 # discover.outgoing object (contains common and outgoing data)
329 338 self.outgoing = None
330 339 # all remote heads before the push
331 340 self.remoteheads = None
332 341 # testable as a boolean indicating if any nodes are missing locally.
333 342 self.incoming = None
334 343 # phases changes that must be pushed along side the changesets
335 344 self.outdatedphases = None
336 345 # phases changes that must be pushed if changeset push fails
337 346 self.fallbackoutdatedphases = None
338 347 # outgoing obsmarkers
339 348 self.outobsmarkers = set()
340 349 # outgoing bookmarks
341 350 self.outbookmarks = []
342 351 # transaction manager
343 352 self.trmanager = None
344 353 # map { pushkey partid -> callback handling failure}
345 354 # used to handle exception from mandatory pushkey part failure
346 355 self.pkfailcb = {}
347 356
348 357 @util.propertycache
349 358 def futureheads(self):
350 359 """future remote heads if the changeset push succeeds"""
351 360 return self.outgoing.missingheads
352 361
353 362 @util.propertycache
354 363 def fallbackheads(self):
355 364 """future remote heads if the changeset push fails"""
356 365 if self.revs is None:
357 366 # not target to push, all common are relevant
358 367 return self.outgoing.commonheads
359 368 unfi = self.repo.unfiltered()
360 369 # I want cheads = heads(::missingheads and ::commonheads)
361 370 # (missingheads is revs with secret changeset filtered out)
362 371 #
363 372 # This can be expressed as:
364 373 # cheads = ( (missingheads and ::commonheads)
365 374 # + (commonheads and ::missingheads))"
366 375 # )
367 376 #
368 377 # while trying to push we already computed the following:
369 378 # common = (::commonheads)
370 379 # missing = ((commonheads::missingheads) - commonheads)
371 380 #
372 381 # We can pick:
373 382 # * missingheads part of common (::commonheads)
374 383 common = self.outgoing.common
375 384 nm = self.repo.changelog.nodemap
376 385 cheads = [node for node in self.revs if nm[node] in common]
377 386 # and
378 387 # * commonheads parents on missing
379 388 revset = unfi.set('%ln and parents(roots(%ln))',
380 389 self.outgoing.commonheads,
381 390 self.outgoing.missing)
382 391 cheads.extend(c.node() for c in revset)
383 392 return cheads
384 393
385 394 @property
386 395 def commonheads(self):
387 396 """set of all common heads after changeset bundle push"""
388 397 if self.cgresult:
389 398 return self.futureheads
390 399 else:
391 400 return self.fallbackheads
392 401
393 402 # mapping of message used when pushing bookmark
394 403 bookmsgmap = {'update': (_("updating bookmark %s\n"),
395 404 _('updating bookmark %s failed!\n')),
396 405 'export': (_("exporting bookmark %s\n"),
397 406 _('exporting bookmark %s failed!\n')),
398 407 'delete': (_("deleting remote bookmark %s\n"),
399 408 _('deleting remote bookmark %s failed!\n')),
400 409 }
401 410
402 411
403 412 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
404 413 opargs=None):
405 414 '''Push outgoing changesets (limited by revs) from a local
406 415 repository to remote. Return an integer:
407 416 - None means nothing to push
408 417 - 0 means HTTP error
409 418 - 1 means we pushed and remote head count is unchanged *or*
410 419 we have outgoing changesets but refused to push
411 420 - other values as described by addchangegroup()
412 421 '''
413 422 if opargs is None:
414 423 opargs = {}
415 424 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
416 425 **opargs)
417 426 if pushop.remote.local():
418 427 missing = (set(pushop.repo.requirements)
419 428 - pushop.remote.local().supported)
420 429 if missing:
421 430 msg = _("required features are not"
422 431 " supported in the destination:"
423 432 " %s") % (', '.join(sorted(missing)))
424 433 raise error.Abort(msg)
425 434
426 435 # there are two ways to push to remote repo:
427 436 #
428 437 # addchangegroup assumes local user can lock remote
429 438 # repo (local filesystem, old ssh servers).
430 439 #
431 440 # unbundle assumes local user cannot lock remote repo (new ssh
432 441 # servers, http servers).
433 442
434 443 if not pushop.remote.canpush():
435 444 raise error.Abort(_("destination does not support push"))
436 445 # get local lock as we might write phase data
437 446 localwlock = locallock = None
438 447 try:
439 448 # bundle2 push may receive a reply bundle touching bookmarks or other
440 449 # things requiring the wlock. Take it now to ensure proper ordering.
441 450 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
442 451 if (not _forcebundle1(pushop)) and maypushback:
443 452 localwlock = pushop.repo.wlock()
444 453 locallock = pushop.repo.lock()
445 454 pushop.locallocked = True
446 455 except IOError as err:
447 456 pushop.locallocked = False
448 457 if err.errno != errno.EACCES:
449 458 raise
450 459 # source repo cannot be locked.
451 460 # We do not abort the push, but just disable the local phase
452 461 # synchronisation.
453 462 msg = 'cannot lock source repository: %s\n' % err
454 463 pushop.ui.debug(msg)
455 464 try:
456 465 if pushop.locallocked:
457 466 pushop.trmanager = transactionmanager(pushop.repo,
458 467 'push-response',
459 468 pushop.remote.url())
460 469 pushop.repo.checkpush(pushop)
461 470 lock = None
462 471 unbundle = pushop.remote.capable('unbundle')
463 472 if not unbundle:
464 473 lock = pushop.remote.lock()
465 474 try:
466 475 _pushdiscovery(pushop)
467 476 if not _forcebundle1(pushop):
468 477 _pushbundle2(pushop)
469 478 _pushchangeset(pushop)
470 479 _pushsyncphase(pushop)
471 480 _pushobsolete(pushop)
472 481 _pushbookmark(pushop)
473 482 finally:
474 483 if lock is not None:
475 484 lock.release()
476 485 if pushop.trmanager:
477 486 pushop.trmanager.close()
478 487 finally:
479 488 if pushop.trmanager:
480 489 pushop.trmanager.release()
481 490 if locallock is not None:
482 491 locallock.release()
483 492 if localwlock is not None:
484 493 localwlock.release()
485 494
486 495 return pushop
487 496
488 497 # list of steps to perform discovery before push
489 498 pushdiscoveryorder = []
490 499
491 500 # Mapping between step name and function
492 501 #
493 502 # This exists to help extensions wrap steps if necessary
494 503 pushdiscoverymapping = {}
495 504
496 505 def pushdiscovery(stepname):
497 506 """decorator for function performing discovery before push
498 507
499 508 The function is added to the step -> function mapping and appended to the
500 509 list of steps. Beware that decorated function will be added in order (this
501 510 may matter).
502 511
503 512 You can only use this decorator for a new step, if you want to wrap a step
504 513 from an extension, change the pushdiscovery dictionary directly."""
505 514 def dec(func):
506 515 assert stepname not in pushdiscoverymapping
507 516 pushdiscoverymapping[stepname] = func
508 517 pushdiscoveryorder.append(stepname)
509 518 return func
510 519 return dec
511 520
512 521 def _pushdiscovery(pushop):
513 522 """Run all discovery steps"""
514 523 for stepname in pushdiscoveryorder:
515 524 step = pushdiscoverymapping[stepname]
516 525 step(pushop)
517 526
518 527 @pushdiscovery('changeset')
519 528 def _pushdiscoverychangeset(pushop):
520 529 """discover the changeset that need to be pushed"""
521 530 fci = discovery.findcommonincoming
522 531 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
523 532 common, inc, remoteheads = commoninc
524 533 fco = discovery.findcommonoutgoing
525 534 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
526 535 commoninc=commoninc, force=pushop.force)
527 536 pushop.outgoing = outgoing
528 537 pushop.remoteheads = remoteheads
529 538 pushop.incoming = inc
530 539
531 540 @pushdiscovery('phase')
532 541 def _pushdiscoveryphase(pushop):
533 542 """discover the phase that needs to be pushed
534 543
535 544 (computed for both success and failure case for changesets push)"""
536 545 outgoing = pushop.outgoing
537 546 unfi = pushop.repo.unfiltered()
538 547 remotephases = pushop.remote.listkeys('phases')
539 548 publishing = remotephases.get('publishing', False)
540 549 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
541 550 and remotephases # server supports phases
542 551 and not pushop.outgoing.missing # no changesets to be pushed
543 552 and publishing):
544 553 # When:
545 554 # - this is a subrepo push
546 555 # - and remote support phase
547 556 # - and no changeset are to be pushed
548 557 # - and remote is publishing
549 558 # We may be in issue 3871 case!
550 559 # We drop the possible phase synchronisation done by
551 560 # courtesy to publish changesets possibly locally draft
552 561 # on the remote.
553 562 remotephases = {'publishing': 'True'}
554 563 ana = phases.analyzeremotephases(pushop.repo,
555 564 pushop.fallbackheads,
556 565 remotephases)
557 566 pheads, droots = ana
558 567 extracond = ''
559 568 if not publishing:
560 569 extracond = ' and public()'
561 570 revset = 'heads((%%ln::%%ln) %s)' % extracond
562 571 # Get the list of all revs draft on remote by public here.
563 572 # XXX Beware that revset break if droots is not strictly
564 573 # XXX root we may want to ensure it is but it is costly
565 574 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
566 575 if not outgoing.missing:
567 576 future = fallback
568 577 else:
569 578 # adds changeset we are going to push as draft
570 579 #
571 580 # should not be necessary for publishing server, but because of an
572 581 # issue fixed in xxxxx we have to do it anyway.
573 582 fdroots = list(unfi.set('roots(%ln + %ln::)',
574 583 outgoing.missing, droots))
575 584 fdroots = [f.node() for f in fdroots]
576 585 future = list(unfi.set(revset, fdroots, pushop.futureheads))
577 586 pushop.outdatedphases = future
578 587 pushop.fallbackoutdatedphases = fallback
579 588
580 589 @pushdiscovery('obsmarker')
581 590 def _pushdiscoveryobsmarkers(pushop):
582 591 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
583 592 and pushop.repo.obsstore
584 593 and 'obsolete' in pushop.remote.listkeys('namespaces')):
585 594 repo = pushop.repo
586 595 # very naive computation, that can be quite expensive on big repo.
587 596 # However: evolution is currently slow on them anyway.
588 597 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
589 598 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
590 599
591 600 @pushdiscovery('bookmarks')
592 601 def _pushdiscoverybookmarks(pushop):
593 602 ui = pushop.ui
594 603 repo = pushop.repo.unfiltered()
595 604 remote = pushop.remote
596 605 ui.debug("checking for updated bookmarks\n")
597 606 ancestors = ()
598 607 if pushop.revs:
599 608 revnums = map(repo.changelog.rev, pushop.revs)
600 609 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
601 610 remotebookmark = remote.listkeys('bookmarks')
602 611
603 612 explicit = set([repo._bookmarks.expandname(bookmark)
604 613 for bookmark in pushop.bookmarks])
605 614
606 615 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
607 616 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
608 617
609 618 def safehex(x):
610 619 if x is None:
611 620 return x
612 621 return hex(x)
613 622
614 623 def hexifycompbookmarks(bookmarks):
615 624 for b, scid, dcid in bookmarks:
616 625 yield b, safehex(scid), safehex(dcid)
617 626
618 627 comp = [hexifycompbookmarks(marks) for marks in comp]
619 628 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
620 629
621 630 for b, scid, dcid in advsrc:
622 631 if b in explicit:
623 632 explicit.remove(b)
624 633 if not ancestors or repo[scid].rev() in ancestors:
625 634 pushop.outbookmarks.append((b, dcid, scid))
626 635 # search added bookmark
627 636 for b, scid, dcid in addsrc:
628 637 if b in explicit:
629 638 explicit.remove(b)
630 639 pushop.outbookmarks.append((b, '', scid))
631 640 # search for overwritten bookmark
632 641 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
633 642 if b in explicit:
634 643 explicit.remove(b)
635 644 pushop.outbookmarks.append((b, dcid, scid))
636 645 # search for bookmark to delete
637 646 for b, scid, dcid in adddst:
638 647 if b in explicit:
639 648 explicit.remove(b)
640 649 # treat as "deleted locally"
641 650 pushop.outbookmarks.append((b, dcid, ''))
642 651 # identical bookmarks shouldn't get reported
643 652 for b, scid, dcid in same:
644 653 if b in explicit:
645 654 explicit.remove(b)
646 655
647 656 if explicit:
648 657 explicit = sorted(explicit)
649 658 # we should probably list all of them
650 659 ui.warn(_('bookmark %s does not exist on the local '
651 660 'or remote repository!\n') % explicit[0])
652 661 pushop.bkresult = 2
653 662
654 663 pushop.outbookmarks.sort()
655 664
656 665 def _pushcheckoutgoing(pushop):
657 666 outgoing = pushop.outgoing
658 667 unfi = pushop.repo.unfiltered()
659 668 if not outgoing.missing:
660 669 # nothing to push
661 670 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
662 671 return False
663 672 # something to push
664 673 if not pushop.force:
665 674 # if repo.obsstore == False --> no obsolete
666 675 # then, save the iteration
667 676 if unfi.obsstore:
668 677 # this message are here for 80 char limit reason
669 678 mso = _("push includes obsolete changeset: %s!")
670 679 mst = {"unstable": _("push includes unstable changeset: %s!"),
671 680 "bumped": _("push includes bumped changeset: %s!"),
672 681 "divergent": _("push includes divergent changeset: %s!")}
673 682 # If we are to push if there is at least one
674 683 # obsolete or unstable changeset in missing, at
675 684 # least one of the missinghead will be obsolete or
676 685 # unstable. So checking heads only is ok
677 686 for node in outgoing.missingheads:
678 687 ctx = unfi[node]
679 688 if ctx.obsolete():
680 689 raise error.Abort(mso % ctx)
681 690 elif ctx.troubled():
682 691 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
683 692
684 693 discovery.checkheads(pushop)
685 694 return True
686 695
687 696 # List of names of steps to perform for an outgoing bundle2, order matters.
688 697 b2partsgenorder = []
689 698
690 699 # Mapping between step name and function
691 700 #
692 701 # This exists to help extensions wrap steps if necessary
693 702 b2partsgenmapping = {}
694 703
695 704 def b2partsgenerator(stepname, idx=None):
696 705 """decorator for function generating bundle2 part
697 706
698 707 The function is added to the step -> function mapping and appended to the
699 708 list of steps. Beware that decorated functions will be added in order
700 709 (this may matter).
701 710
702 711 You can only use this decorator for new steps, if you want to wrap a step
703 712 from an extension, attack the b2partsgenmapping dictionary directly."""
704 713 def dec(func):
705 714 assert stepname not in b2partsgenmapping
706 715 b2partsgenmapping[stepname] = func
707 716 if idx is None:
708 717 b2partsgenorder.append(stepname)
709 718 else:
710 719 b2partsgenorder.insert(idx, stepname)
711 720 return func
712 721 return dec
713 722
714 723 def _pushb2ctxcheckheads(pushop, bundler):
715 724 """Generate race condition checking parts
716 725
717 726 Exists as an independent function to aid extensions
718 727 """
719 728 if not pushop.force:
720 729 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
721 730
722 731 @b2partsgenerator('changeset')
723 732 def _pushb2ctx(pushop, bundler):
724 733 """handle changegroup push through bundle2
725 734
726 735 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
727 736 """
728 737 if 'changesets' in pushop.stepsdone:
729 738 return
730 739 pushop.stepsdone.add('changesets')
731 740 # Send known heads to the server for race detection.
732 741 if not _pushcheckoutgoing(pushop):
733 742 return
734 743 pushop.repo.prepushoutgoinghooks(pushop)
735 744
736 745 _pushb2ctxcheckheads(pushop, bundler)
737 746
738 747 b2caps = bundle2.bundle2caps(pushop.remote)
739 748 version = '01'
740 749 cgversions = b2caps.get('changegroup')
741 750 if cgversions: # 3.1 and 3.2 ship with an empty value
742 751 cgversions = [v for v in cgversions
743 752 if v in changegroup.supportedoutgoingversions(
744 753 pushop.repo)]
745 754 if not cgversions:
746 755 raise ValueError(_('no common changegroup version'))
747 756 version = max(cgversions)
748 757 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
749 758 pushop.outgoing,
750 759 version=version)
751 760 cgpart = bundler.newpart('changegroup', data=cg)
752 761 if cgversions:
753 762 cgpart.addparam('version', version)
754 763 if 'treemanifest' in pushop.repo.requirements:
755 764 cgpart.addparam('treemanifest', '1')
756 765 def handlereply(op):
757 766 """extract addchangegroup returns from server reply"""
758 767 cgreplies = op.records.getreplies(cgpart.id)
759 768 assert len(cgreplies['changegroup']) == 1
760 769 pushop.cgresult = cgreplies['changegroup'][0]['return']
761 770 return handlereply
762 771
763 772 @b2partsgenerator('phase')
764 773 def _pushb2phases(pushop, bundler):
765 774 """handle phase push through bundle2"""
766 775 if 'phases' in pushop.stepsdone:
767 776 return
768 777 b2caps = bundle2.bundle2caps(pushop.remote)
769 778 if not 'pushkey' in b2caps:
770 779 return
771 780 pushop.stepsdone.add('phases')
772 781 part2node = []
773 782
774 783 def handlefailure(pushop, exc):
775 784 targetid = int(exc.partid)
776 785 for partid, node in part2node:
777 786 if partid == targetid:
778 787 raise error.Abort(_('updating %s to public failed') % node)
779 788
780 789 enc = pushkey.encode
781 790 for newremotehead in pushop.outdatedphases:
782 791 part = bundler.newpart('pushkey')
783 792 part.addparam('namespace', enc('phases'))
784 793 part.addparam('key', enc(newremotehead.hex()))
785 794 part.addparam('old', enc(str(phases.draft)))
786 795 part.addparam('new', enc(str(phases.public)))
787 796 part2node.append((part.id, newremotehead))
788 797 pushop.pkfailcb[part.id] = handlefailure
789 798
790 799 def handlereply(op):
791 800 for partid, node in part2node:
792 801 partrep = op.records.getreplies(partid)
793 802 results = partrep['pushkey']
794 803 assert len(results) <= 1
795 804 msg = None
796 805 if not results:
797 806 msg = _('server ignored update of %s to public!\n') % node
798 807 elif not int(results[0]['return']):
799 808 msg = _('updating %s to public failed!\n') % node
800 809 if msg is not None:
801 810 pushop.ui.warn(msg)
802 811 return handlereply
803 812
804 813 @b2partsgenerator('obsmarkers')
805 814 def _pushb2obsmarkers(pushop, bundler):
806 815 if 'obsmarkers' in pushop.stepsdone:
807 816 return
808 817 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
809 818 if obsolete.commonversion(remoteversions) is None:
810 819 return
811 820 pushop.stepsdone.add('obsmarkers')
812 821 if pushop.outobsmarkers:
813 822 markers = sorted(pushop.outobsmarkers)
814 823 buildobsmarkerspart(bundler, markers)
815 824
816 825 @b2partsgenerator('bookmarks')
817 826 def _pushb2bookmarks(pushop, bundler):
818 827 """handle bookmark push through bundle2"""
819 828 if 'bookmarks' in pushop.stepsdone:
820 829 return
821 830 b2caps = bundle2.bundle2caps(pushop.remote)
822 831 if 'pushkey' not in b2caps:
823 832 return
824 833 pushop.stepsdone.add('bookmarks')
825 834 part2book = []
826 835 enc = pushkey.encode
827 836
828 837 def handlefailure(pushop, exc):
829 838 targetid = int(exc.partid)
830 839 for partid, book, action in part2book:
831 840 if partid == targetid:
832 841 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
833 842 # we should not be called for part we did not generated
834 843 assert False
835 844
836 845 for book, old, new in pushop.outbookmarks:
837 846 part = bundler.newpart('pushkey')
838 847 part.addparam('namespace', enc('bookmarks'))
839 848 part.addparam('key', enc(book))
840 849 part.addparam('old', enc(old))
841 850 part.addparam('new', enc(new))
842 851 action = 'update'
843 852 if not old:
844 853 action = 'export'
845 854 elif not new:
846 855 action = 'delete'
847 856 part2book.append((part.id, book, action))
848 857 pushop.pkfailcb[part.id] = handlefailure
849 858
850 859 def handlereply(op):
851 860 ui = pushop.ui
852 861 for partid, book, action in part2book:
853 862 partrep = op.records.getreplies(partid)
854 863 results = partrep['pushkey']
855 864 assert len(results) <= 1
856 865 if not results:
857 866 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
858 867 else:
859 868 ret = int(results[0]['return'])
860 869 if ret:
861 870 ui.status(bookmsgmap[action][0] % book)
862 871 else:
863 872 ui.warn(bookmsgmap[action][1] % book)
864 873 if pushop.bkresult is not None:
865 874 pushop.bkresult = 1
866 875 return handlereply
867 876
868 877
869 878 def _pushbundle2(pushop):
870 879 """push data to the remote using bundle2
871 880
872 881 The only currently supported type of data is changegroup but this will
873 882 evolve in the future."""
874 883 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
875 884 pushback = (pushop.trmanager
876 885 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
877 886
878 887 # create reply capability
879 888 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
880 889 allowpushback=pushback))
881 890 bundler.newpart('replycaps', data=capsblob)
882 891 replyhandlers = []
883 892 for partgenname in b2partsgenorder:
884 893 partgen = b2partsgenmapping[partgenname]
885 894 ret = partgen(pushop, bundler)
886 895 if callable(ret):
887 896 replyhandlers.append(ret)
888 897 # do not push if nothing to push
889 898 if bundler.nbparts <= 1:
890 899 return
891 900 stream = util.chunkbuffer(bundler.getchunks())
892 901 try:
893 902 try:
894 903 reply = pushop.remote.unbundle(
895 904 stream, ['force'], pushop.remote.url())
896 905 except error.BundleValueError as exc:
897 906 raise error.Abort(_('missing support for %s') % exc)
898 907 try:
899 908 trgetter = None
900 909 if pushback:
901 910 trgetter = pushop.trmanager.transaction
902 911 op = bundle2.processbundle(pushop.repo, reply, trgetter)
903 912 except error.BundleValueError as exc:
904 913 raise error.Abort(_('missing support for %s') % exc)
905 914 except bundle2.AbortFromPart as exc:
906 915 pushop.ui.status(_('remote: %s\n') % exc)
907 916 if exc.hint is not None:
908 917 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
909 918 raise error.Abort(_('push failed on remote'))
910 919 except error.PushkeyFailed as exc:
911 920 partid = int(exc.partid)
912 921 if partid not in pushop.pkfailcb:
913 922 raise
914 923 pushop.pkfailcb[partid](pushop, exc)
915 924 for rephand in replyhandlers:
916 925 rephand(op)
917 926
918 927 def _pushchangeset(pushop):
919 928 """Make the actual push of changeset bundle to remote repo"""
920 929 if 'changesets' in pushop.stepsdone:
921 930 return
922 931 pushop.stepsdone.add('changesets')
923 932 if not _pushcheckoutgoing(pushop):
924 933 return
925 934 pushop.repo.prepushoutgoinghooks(pushop)
926 935 outgoing = pushop.outgoing
927 936 unbundle = pushop.remote.capable('unbundle')
928 937 # TODO: get bundlecaps from remote
929 938 bundlecaps = None
930 939 # create a changegroup from local
931 940 if pushop.revs is None and not (outgoing.excluded
932 941 or pushop.repo.changelog.filteredrevs):
933 942 # push everything,
934 943 # use the fast path, no race possible on push
935 944 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
936 945 cg = changegroup.getsubset(pushop.repo,
937 946 outgoing,
938 947 bundler,
939 948 'push',
940 949 fastpath=True)
941 950 else:
942 951 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
943 952 bundlecaps)
944 953
945 954 # apply changegroup to remote
946 955 if unbundle:
947 956 # local repo finds heads on server, finds out what
948 957 # revs it must push. once revs transferred, if server
949 958 # finds it has different heads (someone else won
950 959 # commit/push race), server aborts.
951 960 if pushop.force:
952 961 remoteheads = ['force']
953 962 else:
954 963 remoteheads = pushop.remoteheads
955 964 # ssh: return remote's addchangegroup()
956 965 # http: return remote's addchangegroup() or 0 for error
957 966 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
958 967 pushop.repo.url())
959 968 else:
960 969 # we return an integer indicating remote head count
961 970 # change
962 971 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
963 972 pushop.repo.url())
964 973
965 974 def _pushsyncphase(pushop):
966 975 """synchronise phase information locally and remotely"""
967 976 cheads = pushop.commonheads
968 977 # even when we don't push, exchanging phase data is useful
969 978 remotephases = pushop.remote.listkeys('phases')
970 979 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
971 980 and remotephases # server supports phases
972 981 and pushop.cgresult is None # nothing was pushed
973 982 and remotephases.get('publishing', False)):
974 983 # When:
975 984 # - this is a subrepo push
976 985 # - and remote support phase
977 986 # - and no changeset was pushed
978 987 # - and remote is publishing
979 988 # We may be in issue 3871 case!
980 989 # We drop the possible phase synchronisation done by
981 990 # courtesy to publish changesets possibly locally draft
982 991 # on the remote.
983 992 remotephases = {'publishing': 'True'}
984 993 if not remotephases: # old server or public only reply from non-publishing
985 994 _localphasemove(pushop, cheads)
986 995 # don't push any phase data as there is nothing to push
987 996 else:
988 997 ana = phases.analyzeremotephases(pushop.repo, cheads,
989 998 remotephases)
990 999 pheads, droots = ana
991 1000 ### Apply remote phase on local
992 1001 if remotephases.get('publishing', False):
993 1002 _localphasemove(pushop, cheads)
994 1003 else: # publish = False
995 1004 _localphasemove(pushop, pheads)
996 1005 _localphasemove(pushop, cheads, phases.draft)
997 1006 ### Apply local phase on remote
998 1007
999 1008 if pushop.cgresult:
1000 1009 if 'phases' in pushop.stepsdone:
1001 1010 # phases already pushed though bundle2
1002 1011 return
1003 1012 outdated = pushop.outdatedphases
1004 1013 else:
1005 1014 outdated = pushop.fallbackoutdatedphases
1006 1015
1007 1016 pushop.stepsdone.add('phases')
1008 1017
1009 1018 # filter heads already turned public by the push
1010 1019 outdated = [c for c in outdated if c.node() not in pheads]
1011 1020 # fallback to independent pushkey command
1012 1021 for newremotehead in outdated:
1013 1022 r = pushop.remote.pushkey('phases',
1014 1023 newremotehead.hex(),
1015 1024 str(phases.draft),
1016 1025 str(phases.public))
1017 1026 if not r:
1018 1027 pushop.ui.warn(_('updating %s to public failed!\n')
1019 1028 % newremotehead)
1020 1029
1021 1030 def _localphasemove(pushop, nodes, phase=phases.public):
1022 1031 """move <nodes> to <phase> in the local source repo"""
1023 1032 if pushop.trmanager:
1024 1033 phases.advanceboundary(pushop.repo,
1025 1034 pushop.trmanager.transaction(),
1026 1035 phase,
1027 1036 nodes)
1028 1037 else:
1029 1038 # repo is not locked, do not change any phases!
1030 1039 # Informs the user that phases should have been moved when
1031 1040 # applicable.
1032 1041 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1033 1042 phasestr = phases.phasenames[phase]
1034 1043 if actualmoves:
1035 1044 pushop.ui.status(_('cannot lock source repo, skipping '
1036 1045 'local %s phase update\n') % phasestr)
1037 1046
1038 1047 def _pushobsolete(pushop):
1039 1048 """utility function to push obsolete markers to a remote"""
1040 1049 if 'obsmarkers' in pushop.stepsdone:
1041 1050 return
1042 1051 repo = pushop.repo
1043 1052 remote = pushop.remote
1044 1053 pushop.stepsdone.add('obsmarkers')
1045 1054 if pushop.outobsmarkers:
1046 1055 pushop.ui.debug('try to push obsolete markers to remote\n')
1047 1056 rslts = []
1048 1057 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1049 1058 for key in sorted(remotedata, reverse=True):
1050 1059 # reverse sort to ensure we end with dump0
1051 1060 data = remotedata[key]
1052 1061 rslts.append(remote.pushkey('obsolete', key, '', data))
1053 1062 if [r for r in rslts if not r]:
1054 1063 msg = _('failed to push some obsolete markers!\n')
1055 1064 repo.ui.warn(msg)
1056 1065
1057 1066 def _pushbookmark(pushop):
1058 1067 """Update bookmark position on remote"""
1059 1068 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1060 1069 return
1061 1070 pushop.stepsdone.add('bookmarks')
1062 1071 ui = pushop.ui
1063 1072 remote = pushop.remote
1064 1073
1065 1074 for b, old, new in pushop.outbookmarks:
1066 1075 action = 'update'
1067 1076 if not old:
1068 1077 action = 'export'
1069 1078 elif not new:
1070 1079 action = 'delete'
1071 1080 if remote.pushkey('bookmarks', b, old, new):
1072 1081 ui.status(bookmsgmap[action][0] % b)
1073 1082 else:
1074 1083 ui.warn(bookmsgmap[action][1] % b)
1075 1084 # discovery can have set the value form invalid entry
1076 1085 if pushop.bkresult is not None:
1077 1086 pushop.bkresult = 1
1078 1087
1079 1088 class pulloperation(object):
1080 1089 """A object that represent a single pull operation
1081 1090
1082 1091 It purpose is to carry pull related state and very common operation.
1083 1092
1084 1093 A new should be created at the beginning of each pull and discarded
1085 1094 afterward.
1086 1095 """
1087 1096
1088 1097 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1089 1098 remotebookmarks=None, streamclonerequested=None):
1090 1099 # repo we pull into
1091 1100 self.repo = repo
1092 1101 # repo we pull from
1093 1102 self.remote = remote
1094 1103 # revision we try to pull (None is "all")
1095 1104 self.heads = heads
1096 1105 # bookmark pulled explicitly
1097 1106 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1098 1107 for bookmark in bookmarks]
1099 1108 # do we force pull?
1100 1109 self.force = force
1101 1110 # whether a streaming clone was requested
1102 1111 self.streamclonerequested = streamclonerequested
1103 1112 # transaction manager
1104 1113 self.trmanager = None
1105 1114 # set of common changeset between local and remote before pull
1106 1115 self.common = None
1107 1116 # set of pulled head
1108 1117 self.rheads = None
1109 1118 # list of missing changeset to fetch remotely
1110 1119 self.fetch = None
1111 1120 # remote bookmarks data
1112 1121 self.remotebookmarks = remotebookmarks
1113 1122 # result of changegroup pulling (used as return code by pull)
1114 1123 self.cgresult = None
1115 1124 # list of step already done
1116 1125 self.stepsdone = set()
1117 1126 # Whether we attempted a clone from pre-generated bundles.
1118 1127 self.clonebundleattempted = False
1119 1128
1120 1129 @util.propertycache
1121 1130 def pulledsubset(self):
1122 1131 """heads of the set of changeset target by the pull"""
1123 1132 # compute target subset
1124 1133 if self.heads is None:
1125 1134 # We pulled every thing possible
1126 1135 # sync on everything common
1127 1136 c = set(self.common)
1128 1137 ret = list(self.common)
1129 1138 for n in self.rheads:
1130 1139 if n not in c:
1131 1140 ret.append(n)
1132 1141 return ret
1133 1142 else:
1134 1143 # We pulled a specific subset
1135 1144 # sync on this subset
1136 1145 return self.heads
1137 1146
1138 1147 @util.propertycache
1139 1148 def canusebundle2(self):
1140 1149 return not _forcebundle1(self)
1141 1150
1142 1151 @util.propertycache
1143 1152 def remotebundle2caps(self):
1144 1153 return bundle2.bundle2caps(self.remote)
1145 1154
1146 1155 def gettransaction(self):
1147 1156 # deprecated; talk to trmanager directly
1148 1157 return self.trmanager.transaction()
1149 1158
1150 1159 class transactionmanager(object):
1151 1160 """An object to manage the life cycle of a transaction
1152 1161
1153 1162 It creates the transaction on demand and calls the appropriate hooks when
1154 1163 closing the transaction."""
1155 1164 def __init__(self, repo, source, url):
1156 1165 self.repo = repo
1157 1166 self.source = source
1158 1167 self.url = url
1159 1168 self._tr = None
1160 1169
1161 1170 def transaction(self):
1162 1171 """Return an open transaction object, constructing if necessary"""
1163 1172 if not self._tr:
1164 1173 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1165 1174 self._tr = self.repo.transaction(trname)
1166 1175 self._tr.hookargs['source'] = self.source
1167 1176 self._tr.hookargs['url'] = self.url
1168 1177 return self._tr
1169 1178
1170 1179 def close(self):
1171 1180 """close transaction if created"""
1172 1181 if self._tr is not None:
1173 1182 self._tr.close()
1174 1183
1175 1184 def release(self):
1176 1185 """release transaction if created"""
1177 1186 if self._tr is not None:
1178 1187 self._tr.release()
1179 1188
1180 1189 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1181 1190 streamclonerequested=None):
1182 1191 """Fetch repository data from a remote.
1183 1192
1184 1193 This is the main function used to retrieve data from a remote repository.
1185 1194
1186 1195 ``repo`` is the local repository to clone into.
1187 1196 ``remote`` is a peer instance.
1188 1197 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1189 1198 default) means to pull everything from the remote.
1190 1199 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1191 1200 default, all remote bookmarks are pulled.
1192 1201 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1193 1202 initialization.
1194 1203 ``streamclonerequested`` is a boolean indicating whether a "streaming
1195 1204 clone" is requested. A "streaming clone" is essentially a raw file copy
1196 1205 of revlogs from the server. This only works when the local repository is
1197 1206 empty. The default value of ``None`` means to respect the server
1198 1207 configuration for preferring stream clones.
1199 1208
1200 1209 Returns the ``pulloperation`` created for this pull.
1201 1210 """
1202 1211 if opargs is None:
1203 1212 opargs = {}
1204 1213 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1205 1214 streamclonerequested=streamclonerequested, **opargs)
1206 1215 if pullop.remote.local():
1207 1216 missing = set(pullop.remote.requirements) - pullop.repo.supported
1208 1217 if missing:
1209 1218 msg = _("required features are not"
1210 1219 " supported in the destination:"
1211 1220 " %s") % (', '.join(sorted(missing)))
1212 1221 raise error.Abort(msg)
1213 1222
1214 1223 wlock = lock = None
1215 1224 try:
1216 1225 wlock = pullop.repo.wlock()
1217 1226 lock = pullop.repo.lock()
1218 1227 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1219 1228 streamclone.maybeperformlegacystreamclone(pullop)
1220 1229 # This should ideally be in _pullbundle2(). However, it needs to run
1221 1230 # before discovery to avoid extra work.
1222 1231 _maybeapplyclonebundle(pullop)
1223 1232 _pulldiscovery(pullop)
1224 1233 if pullop.canusebundle2:
1225 1234 _pullbundle2(pullop)
1226 1235 _pullchangeset(pullop)
1227 1236 _pullphase(pullop)
1228 1237 _pullbookmarks(pullop)
1229 1238 _pullobsolete(pullop)
1230 1239 pullop.trmanager.close()
1231 1240 finally:
1232 1241 lockmod.release(pullop.trmanager, lock, wlock)
1233 1242
1234 1243 return pullop
1235 1244
1236 1245 # list of steps to perform discovery before pull
1237 1246 pulldiscoveryorder = []
1238 1247
1239 1248 # Mapping between step name and function
1240 1249 #
1241 1250 # This exists to help extensions wrap steps if necessary
1242 1251 pulldiscoverymapping = {}
1243 1252
1244 1253 def pulldiscovery(stepname):
1245 1254 """decorator for function performing discovery before pull
1246 1255
1247 1256 The function is added to the step -> function mapping and appended to the
1248 1257 list of steps. Beware that decorated function will be added in order (this
1249 1258 may matter).
1250 1259
1251 1260 You can only use this decorator for a new step, if you want to wrap a step
1252 1261 from an extension, change the pulldiscovery dictionary directly."""
1253 1262 def dec(func):
1254 1263 assert stepname not in pulldiscoverymapping
1255 1264 pulldiscoverymapping[stepname] = func
1256 1265 pulldiscoveryorder.append(stepname)
1257 1266 return func
1258 1267 return dec
1259 1268
1260 1269 def _pulldiscovery(pullop):
1261 1270 """Run all discovery steps"""
1262 1271 for stepname in pulldiscoveryorder:
1263 1272 step = pulldiscoverymapping[stepname]
1264 1273 step(pullop)
1265 1274
1266 1275 @pulldiscovery('b1:bookmarks')
1267 1276 def _pullbookmarkbundle1(pullop):
1268 1277 """fetch bookmark data in bundle1 case
1269 1278
1270 1279 If not using bundle2, we have to fetch bookmarks before changeset
1271 1280 discovery to reduce the chance and impact of race conditions."""
1272 1281 if pullop.remotebookmarks is not None:
1273 1282 return
1274 1283 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1275 1284 # all known bundle2 servers now support listkeys, but lets be nice with
1276 1285 # new implementation.
1277 1286 return
1278 1287 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1279 1288
1280 1289
1281 1290 @pulldiscovery('changegroup')
1282 1291 def _pulldiscoverychangegroup(pullop):
1283 1292 """discovery phase for the pull
1284 1293
1285 1294 Current handle changeset discovery only, will change handle all discovery
1286 1295 at some point."""
1287 1296 tmp = discovery.findcommonincoming(pullop.repo,
1288 1297 pullop.remote,
1289 1298 heads=pullop.heads,
1290 1299 force=pullop.force)
1291 1300 common, fetch, rheads = tmp
1292 1301 nm = pullop.repo.unfiltered().changelog.nodemap
1293 1302 if fetch and rheads:
1294 1303 # If a remote heads in filtered locally, lets drop it from the unknown
1295 1304 # remote heads and put in back in common.
1296 1305 #
1297 1306 # This is a hackish solution to catch most of "common but locally
1298 1307 # hidden situation". We do not performs discovery on unfiltered
1299 1308 # repository because it end up doing a pathological amount of round
1300 1309 # trip for w huge amount of changeset we do not care about.
1301 1310 #
1302 1311 # If a set of such "common but filtered" changeset exist on the server
1303 1312 # but are not including a remote heads, we'll not be able to detect it,
1304 1313 scommon = set(common)
1305 1314 filteredrheads = []
1306 1315 for n in rheads:
1307 1316 if n in nm:
1308 1317 if n not in scommon:
1309 1318 common.append(n)
1310 1319 else:
1311 1320 filteredrheads.append(n)
1312 1321 if not filteredrheads:
1313 1322 fetch = []
1314 1323 rheads = filteredrheads
1315 1324 pullop.common = common
1316 1325 pullop.fetch = fetch
1317 1326 pullop.rheads = rheads
1318 1327
1319 1328 def _pullbundle2(pullop):
1320 1329 """pull data using bundle2
1321 1330
1322 1331 For now, the only supported data are changegroup."""
1323 1332 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1324 1333
1325 1334 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1326 1335
1327 1336 # pulling changegroup
1328 1337 pullop.stepsdone.add('changegroup')
1329 1338
1330 1339 kwargs['common'] = pullop.common
1331 1340 kwargs['heads'] = pullop.heads or pullop.rheads
1332 1341 kwargs['cg'] = pullop.fetch
1333 1342 if 'listkeys' in pullop.remotebundle2caps:
1334 1343 kwargs['listkeys'] = ['phases']
1335 1344 if pullop.remotebookmarks is None:
1336 1345 # make sure to always includes bookmark data when migrating
1337 1346 # `hg incoming --bundle` to using this function.
1338 1347 kwargs['listkeys'].append('bookmarks')
1339 1348
1340 1349 # If this is a full pull / clone and the server supports the clone bundles
1341 1350 # feature, tell the server whether we attempted a clone bundle. The
1342 1351 # presence of this flag indicates the client supports clone bundles. This
1343 1352 # will enable the server to treat clients that support clone bundles
1344 1353 # differently from those that don't.
1345 1354 if (pullop.remote.capable('clonebundles')
1346 1355 and pullop.heads is None and list(pullop.common) == [nullid]):
1347 1356 kwargs['cbattempted'] = pullop.clonebundleattempted
1348 1357
1349 1358 if streaming:
1350 1359 pullop.repo.ui.status(_('streaming all changes\n'))
1351 1360 elif not pullop.fetch:
1352 1361 pullop.repo.ui.status(_("no changes found\n"))
1353 1362 pullop.cgresult = 0
1354 1363 else:
1355 1364 if pullop.heads is None and list(pullop.common) == [nullid]:
1356 1365 pullop.repo.ui.status(_("requesting all changes\n"))
1357 1366 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1358 1367 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1359 1368 if obsolete.commonversion(remoteversions) is not None:
1360 1369 kwargs['obsmarkers'] = True
1361 1370 pullop.stepsdone.add('obsmarkers')
1362 1371 _pullbundle2extraprepare(pullop, kwargs)
1363 1372 bundle = pullop.remote.getbundle('pull', **kwargs)
1364 1373 try:
1365 1374 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1366 1375 except bundle2.AbortFromPart as exc:
1367 1376 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1368 1377 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1369 1378 except error.BundleValueError as exc:
1370 1379 raise error.Abort(_('missing support for %s') % exc)
1371 1380
1372 1381 if pullop.fetch:
1373 1382 results = [cg['return'] for cg in op.records['changegroup']]
1374 1383 pullop.cgresult = changegroup.combineresults(results)
1375 1384
1376 1385 # processing phases change
1377 1386 for namespace, value in op.records['listkeys']:
1378 1387 if namespace == 'phases':
1379 1388 _pullapplyphases(pullop, value)
1380 1389
1381 1390 # processing bookmark update
1382 1391 for namespace, value in op.records['listkeys']:
1383 1392 if namespace == 'bookmarks':
1384 1393 pullop.remotebookmarks = value
1385 1394
1386 1395 # bookmark data were either already there or pulled in the bundle
1387 1396 if pullop.remotebookmarks is not None:
1388 1397 _pullbookmarks(pullop)
1389 1398
1390 1399 def _pullbundle2extraprepare(pullop, kwargs):
1391 1400 """hook function so that extensions can extend the getbundle call"""
1392 1401 pass
1393 1402
1394 1403 def _pullchangeset(pullop):
1395 1404 """pull changeset from unbundle into the local repo"""
1396 1405 # We delay the open of the transaction as late as possible so we
1397 1406 # don't open transaction for nothing or you break future useful
1398 1407 # rollback call
1399 1408 if 'changegroup' in pullop.stepsdone:
1400 1409 return
1401 1410 pullop.stepsdone.add('changegroup')
1402 1411 if not pullop.fetch:
1403 1412 pullop.repo.ui.status(_("no changes found\n"))
1404 1413 pullop.cgresult = 0
1405 1414 return
1406 1415 pullop.gettransaction()
1407 1416 if pullop.heads is None and list(pullop.common) == [nullid]:
1408 1417 pullop.repo.ui.status(_("requesting all changes\n"))
1409 1418 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1410 1419 # issue1320, avoid a race if remote changed after discovery
1411 1420 pullop.heads = pullop.rheads
1412 1421
1413 1422 if pullop.remote.capable('getbundle'):
1414 1423 # TODO: get bundlecaps from remote
1415 1424 cg = pullop.remote.getbundle('pull', common=pullop.common,
1416 1425 heads=pullop.heads or pullop.rheads)
1417 1426 elif pullop.heads is None:
1418 1427 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1419 1428 elif not pullop.remote.capable('changegroupsubset'):
1420 1429 raise error.Abort(_("partial pull cannot be done because "
1421 1430 "other repository doesn't support "
1422 1431 "changegroupsubset."))
1423 1432 else:
1424 1433 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1425 1434 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1426 1435
1427 1436 def _pullphase(pullop):
1428 1437 # Get remote phases data from remote
1429 1438 if 'phases' in pullop.stepsdone:
1430 1439 return
1431 1440 remotephases = pullop.remote.listkeys('phases')
1432 1441 _pullapplyphases(pullop, remotephases)
1433 1442
1434 1443 def _pullapplyphases(pullop, remotephases):
1435 1444 """apply phase movement from observed remote state"""
1436 1445 if 'phases' in pullop.stepsdone:
1437 1446 return
1438 1447 pullop.stepsdone.add('phases')
1439 1448 publishing = bool(remotephases.get('publishing', False))
1440 1449 if remotephases and not publishing:
1441 1450 # remote is new and non-publishing
1442 1451 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1443 1452 pullop.pulledsubset,
1444 1453 remotephases)
1445 1454 dheads = pullop.pulledsubset
1446 1455 else:
1447 1456 # Remote is old or publishing all common changesets
1448 1457 # should be seen as public
1449 1458 pheads = pullop.pulledsubset
1450 1459 dheads = []
1451 1460 unfi = pullop.repo.unfiltered()
1452 1461 phase = unfi._phasecache.phase
1453 1462 rev = unfi.changelog.nodemap.get
1454 1463 public = phases.public
1455 1464 draft = phases.draft
1456 1465
1457 1466 # exclude changesets already public locally and update the others
1458 1467 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1459 1468 if pheads:
1460 1469 tr = pullop.gettransaction()
1461 1470 phases.advanceboundary(pullop.repo, tr, public, pheads)
1462 1471
1463 1472 # exclude changesets already draft locally and update the others
1464 1473 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1465 1474 if dheads:
1466 1475 tr = pullop.gettransaction()
1467 1476 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1468 1477
1469 1478 def _pullbookmarks(pullop):
1470 1479 """process the remote bookmark information to update the local one"""
1471 1480 if 'bookmarks' in pullop.stepsdone:
1472 1481 return
1473 1482 pullop.stepsdone.add('bookmarks')
1474 1483 repo = pullop.repo
1475 1484 remotebookmarks = pullop.remotebookmarks
1476 1485 remotebookmarks = bookmod.unhexlifybookmarks(remotebookmarks)
1477 1486 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1478 1487 pullop.remote.url(),
1479 1488 pullop.gettransaction,
1480 1489 explicit=pullop.explicitbookmarks)
1481 1490
1482 1491 def _pullobsolete(pullop):
1483 1492 """utility function to pull obsolete markers from a remote
1484 1493
1485 1494 The `gettransaction` is function that return the pull transaction, creating
1486 1495 one if necessary. We return the transaction to inform the calling code that
1487 1496 a new transaction have been created (when applicable).
1488 1497
1489 1498 Exists mostly to allow overriding for experimentation purpose"""
1490 1499 if 'obsmarkers' in pullop.stepsdone:
1491 1500 return
1492 1501 pullop.stepsdone.add('obsmarkers')
1493 1502 tr = None
1494 1503 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1495 1504 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1496 1505 remoteobs = pullop.remote.listkeys('obsolete')
1497 1506 if 'dump0' in remoteobs:
1498 1507 tr = pullop.gettransaction()
1499 1508 markers = []
1500 1509 for key in sorted(remoteobs, reverse=True):
1501 1510 if key.startswith('dump'):
1502 1511 data = base85.b85decode(remoteobs[key])
1503 1512 version, newmarks = obsolete._readmarkers(data)
1504 1513 markers += newmarks
1505 1514 if markers:
1506 1515 pullop.repo.obsstore.add(tr, markers)
1507 1516 pullop.repo.invalidatevolatilesets()
1508 1517 return tr
1509 1518
1510 1519 def caps20to10(repo):
1511 1520 """return a set with appropriate options to use bundle20 during getbundle"""
1512 1521 caps = set(['HG20'])
1513 1522 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1514 1523 caps.add('bundle2=' + urlreq.quote(capsblob))
1515 1524 return caps
1516 1525
1517 1526 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1518 1527 getbundle2partsorder = []
1519 1528
1520 1529 # Mapping between step name and function
1521 1530 #
1522 1531 # This exists to help extensions wrap steps if necessary
1523 1532 getbundle2partsmapping = {}
1524 1533
1525 1534 def getbundle2partsgenerator(stepname, idx=None):
1526 1535 """decorator for function generating bundle2 part for getbundle
1527 1536
1528 1537 The function is added to the step -> function mapping and appended to the
1529 1538 list of steps. Beware that decorated functions will be added in order
1530 1539 (this may matter).
1531 1540
1532 1541 You can only use this decorator for new steps, if you want to wrap a step
1533 1542 from an extension, attack the getbundle2partsmapping dictionary directly."""
1534 1543 def dec(func):
1535 1544 assert stepname not in getbundle2partsmapping
1536 1545 getbundle2partsmapping[stepname] = func
1537 1546 if idx is None:
1538 1547 getbundle2partsorder.append(stepname)
1539 1548 else:
1540 1549 getbundle2partsorder.insert(idx, stepname)
1541 1550 return func
1542 1551 return dec
1543 1552
1544 1553 def bundle2requested(bundlecaps):
1545 1554 if bundlecaps is not None:
1546 1555 return any(cap.startswith('HG2') for cap in bundlecaps)
1547 1556 return False
1548 1557
1549 1558 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1550 1559 **kwargs):
1551 1560 """Return chunks constituting a bundle's raw data.
1552 1561
1553 1562 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1554 1563 passed.
1555 1564
1556 1565 Returns an iterator over raw chunks (of varying sizes).
1557 1566 """
1558 1567 usebundle2 = bundle2requested(bundlecaps)
1559 1568 # bundle10 case
1560 1569 if not usebundle2:
1561 1570 if bundlecaps and not kwargs.get('cg', True):
1562 1571 raise ValueError(_('request for bundle10 must include changegroup'))
1563 1572
1564 1573 if kwargs:
1565 1574 raise ValueError(_('unsupported getbundle arguments: %s')
1566 1575 % ', '.join(sorted(kwargs.keys())))
1567 1576 outgoing = _computeoutgoing(repo, heads, common)
1568 1577 bundler = changegroup.getbundler('01', repo, bundlecaps)
1569 1578 return changegroup.getsubsetraw(repo, outgoing, bundler, source)
1570 1579
1571 1580 # bundle20 case
1572 1581 b2caps = {}
1573 1582 for bcaps in bundlecaps:
1574 1583 if bcaps.startswith('bundle2='):
1575 1584 blob = urlreq.unquote(bcaps[len('bundle2='):])
1576 1585 b2caps.update(bundle2.decodecaps(blob))
1577 1586 bundler = bundle2.bundle20(repo.ui, b2caps)
1578 1587
1579 1588 kwargs['heads'] = heads
1580 1589 kwargs['common'] = common
1581 1590
1582 1591 for name in getbundle2partsorder:
1583 1592 func = getbundle2partsmapping[name]
1584 1593 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1585 1594 **kwargs)
1586 1595
1587 1596 return bundler.getchunks()
1588 1597
1589 1598 @getbundle2partsgenerator('changegroup')
1590 1599 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1591 1600 b2caps=None, heads=None, common=None, **kwargs):
1592 1601 """add a changegroup part to the requested bundle"""
1593 1602 cg = None
1594 1603 if kwargs.get('cg', True):
1595 1604 # build changegroup bundle here.
1596 1605 version = '01'
1597 1606 cgversions = b2caps.get('changegroup')
1598 1607 if cgversions: # 3.1 and 3.2 ship with an empty value
1599 1608 cgversions = [v for v in cgversions
1600 1609 if v in changegroup.supportedoutgoingversions(repo)]
1601 1610 if not cgversions:
1602 1611 raise ValueError(_('no common changegroup version'))
1603 1612 version = max(cgversions)
1604 1613 outgoing = _computeoutgoing(repo, heads, common)
1605 1614 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1606 1615 bundlecaps=bundlecaps,
1607 1616 version=version)
1608 1617
1609 1618 if cg:
1610 1619 part = bundler.newpart('changegroup', data=cg)
1611 1620 if cgversions:
1612 1621 part.addparam('version', version)
1613 1622 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1614 1623 if 'treemanifest' in repo.requirements:
1615 1624 part.addparam('treemanifest', '1')
1616 1625
1617 1626 @getbundle2partsgenerator('listkeys')
1618 1627 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1619 1628 b2caps=None, **kwargs):
1620 1629 """add parts containing listkeys namespaces to the requested bundle"""
1621 1630 listkeys = kwargs.get('listkeys', ())
1622 1631 for namespace in listkeys:
1623 1632 part = bundler.newpart('listkeys')
1624 1633 part.addparam('namespace', namespace)
1625 1634 keys = repo.listkeys(namespace).items()
1626 1635 part.data = pushkey.encodekeys(keys)
1627 1636
1628 1637 @getbundle2partsgenerator('obsmarkers')
1629 1638 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1630 1639 b2caps=None, heads=None, **kwargs):
1631 1640 """add an obsolescence markers part to the requested bundle"""
1632 1641 if kwargs.get('obsmarkers', False):
1633 1642 if heads is None:
1634 1643 heads = repo.heads()
1635 1644 subset = [c.node() for c in repo.set('::%ln', heads)]
1636 1645 markers = repo.obsstore.relevantmarkers(subset)
1637 1646 markers = sorted(markers)
1638 1647 buildobsmarkerspart(bundler, markers)
1639 1648
1640 1649 @getbundle2partsgenerator('hgtagsfnodes')
1641 1650 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1642 1651 b2caps=None, heads=None, common=None,
1643 1652 **kwargs):
1644 1653 """Transfer the .hgtags filenodes mapping.
1645 1654
1646 1655 Only values for heads in this bundle will be transferred.
1647 1656
1648 1657 The part data consists of pairs of 20 byte changeset node and .hgtags
1649 1658 filenodes raw values.
1650 1659 """
1651 1660 # Don't send unless:
1652 1661 # - changeset are being exchanged,
1653 1662 # - the client supports it.
1654 1663 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1655 1664 return
1656 1665
1657 1666 outgoing = _computeoutgoing(repo, heads, common)
1658 1667
1659 1668 if not outgoing.missingheads:
1660 1669 return
1661 1670
1662 1671 cache = tags.hgtagsfnodescache(repo.unfiltered())
1663 1672 chunks = []
1664 1673
1665 1674 # .hgtags fnodes are only relevant for head changesets. While we could
1666 1675 # transfer values for all known nodes, there will likely be little to
1667 1676 # no benefit.
1668 1677 #
1669 1678 # We don't bother using a generator to produce output data because
1670 1679 # a) we only have 40 bytes per head and even esoteric numbers of heads
1671 1680 # consume little memory (1M heads is 40MB) b) we don't want to send the
1672 1681 # part if we don't have entries and knowing if we have entries requires
1673 1682 # cache lookups.
1674 1683 for node in outgoing.missingheads:
1675 1684 # Don't compute missing, as this may slow down serving.
1676 1685 fnode = cache.getfnode(node, computemissing=False)
1677 1686 if fnode is not None:
1678 1687 chunks.extend([node, fnode])
1679 1688
1680 1689 if chunks:
1681 1690 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1682 1691
1683 1692 def _getbookmarks(repo, **kwargs):
1684 1693 """Returns bookmark to node mapping.
1685 1694
1686 1695 This function is primarily used to generate `bookmarks` bundle2 part.
1687 1696 It is a separate function in order to make it easy to wrap it
1688 1697 in extensions. Passing `kwargs` to the function makes it easy to
1689 1698 add new parameters in extensions.
1690 1699 """
1691 1700
1692 1701 return dict(bookmod.listbinbookmarks(repo))
1693 1702
1694 1703 def check_heads(repo, their_heads, context):
1695 1704 """check if the heads of a repo have been modified
1696 1705
1697 1706 Used by peer for unbundling.
1698 1707 """
1699 1708 heads = repo.heads()
1700 1709 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1701 1710 if not (their_heads == ['force'] or their_heads == heads or
1702 1711 their_heads == ['hashed', heads_hash]):
1703 1712 # someone else committed/pushed/unbundled while we
1704 1713 # were transferring data
1705 1714 raise error.PushRaced('repository changed while %s - '
1706 1715 'please try again' % context)
1707 1716
1708 1717 def unbundle(repo, cg, heads, source, url):
1709 1718 """Apply a bundle to a repo.
1710 1719
1711 1720 this function makes sure the repo is locked during the application and have
1712 1721 mechanism to check that no push race occurred between the creation of the
1713 1722 bundle and its application.
1714 1723
1715 1724 If the push was raced as PushRaced exception is raised."""
1716 1725 r = 0
1717 1726 # need a transaction when processing a bundle2 stream
1718 1727 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1719 1728 lockandtr = [None, None, None]
1720 1729 recordout = None
1721 1730 # quick fix for output mismatch with bundle2 in 3.4
1722 1731 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1723 1732 False)
1724 1733 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1725 1734 captureoutput = True
1726 1735 try:
1727 1736 check_heads(repo, heads, 'uploading changes')
1728 1737 # push can proceed
1729 1738 if util.safehasattr(cg, 'params'):
1730 1739 r = None
1731 1740 try:
1732 1741 def gettransaction():
1733 1742 if not lockandtr[2]:
1734 1743 lockandtr[0] = repo.wlock()
1735 1744 lockandtr[1] = repo.lock()
1736 1745 lockandtr[2] = repo.transaction(source)
1737 1746 lockandtr[2].hookargs['source'] = source
1738 1747 lockandtr[2].hookargs['url'] = url
1739 1748 lockandtr[2].hookargs['bundle2'] = '1'
1740 1749 return lockandtr[2]
1741 1750
1742 1751 # Do greedy locking by default until we're satisfied with lazy
1743 1752 # locking.
1744 1753 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1745 1754 gettransaction()
1746 1755
1747 1756 op = bundle2.bundleoperation(repo, gettransaction,
1748 1757 captureoutput=captureoutput)
1749 1758 try:
1750 1759 op = bundle2.processbundle(repo, cg, op=op)
1751 1760 finally:
1752 1761 r = op.reply
1753 1762 if captureoutput and r is not None:
1754 1763 repo.ui.pushbuffer(error=True, subproc=True)
1755 1764 def recordout(output):
1756 1765 r.newpart('output', data=output, mandatory=False)
1757 1766 if lockandtr[2] is not None:
1758 1767 lockandtr[2].close()
1759 1768 except BaseException as exc:
1760 1769 exc.duringunbundle2 = True
1761 1770 if captureoutput and r is not None:
1762 1771 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1763 1772 def recordout(output):
1764 1773 part = bundle2.bundlepart('output', data=output,
1765 1774 mandatory=False)
1766 1775 parts.append(part)
1767 1776 raise
1768 1777 else:
1769 1778 lockandtr[1] = repo.lock()
1770 1779 r = cg.apply(repo, source, url)
1771 1780 finally:
1772 1781 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1773 1782 if recordout is not None:
1774 1783 recordout(repo.ui.popbuffer())
1775 1784 return r
1776 1785
1777 1786 def _maybeapplyclonebundle(pullop):
1778 1787 """Apply a clone bundle from a remote, if possible."""
1779 1788
1780 1789 repo = pullop.repo
1781 1790 remote = pullop.remote
1782 1791
1783 1792 if not repo.ui.configbool('ui', 'clonebundles', True):
1784 1793 return
1785 1794
1786 1795 # Only run if local repo is empty.
1787 1796 if len(repo):
1788 1797 return
1789 1798
1790 1799 if pullop.heads:
1791 1800 return
1792 1801
1793 1802 if not remote.capable('clonebundles'):
1794 1803 return
1795 1804
1796 1805 res = remote._call('clonebundles')
1797 1806
1798 1807 # If we call the wire protocol command, that's good enough to record the
1799 1808 # attempt.
1800 1809 pullop.clonebundleattempted = True
1801 1810
1802 1811 entries = parseclonebundlesmanifest(repo, res)
1803 1812 if not entries:
1804 1813 repo.ui.note(_('no clone bundles available on remote; '
1805 1814 'falling back to regular clone\n'))
1806 1815 return
1807 1816
1808 1817 entries = filterclonebundleentries(repo, entries)
1809 1818 if not entries:
1810 1819 # There is a thundering herd concern here. However, if a server
1811 1820 # operator doesn't advertise bundles appropriate for its clients,
1812 1821 # they deserve what's coming. Furthermore, from a client's
1813 1822 # perspective, no automatic fallback would mean not being able to
1814 1823 # clone!
1815 1824 repo.ui.warn(_('no compatible clone bundles available on server; '
1816 1825 'falling back to regular clone\n'))
1817 1826 repo.ui.warn(_('(you may want to report this to the server '
1818 1827 'operator)\n'))
1819 1828 return
1820 1829
1821 1830 entries = sortclonebundleentries(repo.ui, entries)
1822 1831
1823 1832 url = entries[0]['URL']
1824 1833 repo.ui.status(_('applying clone bundle from %s\n') % url)
1825 1834 if trypullbundlefromurl(repo.ui, repo, url):
1826 1835 repo.ui.status(_('finished applying clone bundle\n'))
1827 1836 # Bundle failed.
1828 1837 #
1829 1838 # We abort by default to avoid the thundering herd of
1830 1839 # clients flooding a server that was expecting expensive
1831 1840 # clone load to be offloaded.
1832 1841 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1833 1842 repo.ui.warn(_('falling back to normal clone\n'))
1834 1843 else:
1835 1844 raise error.Abort(_('error applying bundle'),
1836 1845 hint=_('if this error persists, consider contacting '
1837 1846 'the server operator or disable clone '
1838 1847 'bundles via '
1839 1848 '"--config ui.clonebundles=false"'))
1840 1849
1841 1850 def parseclonebundlesmanifest(repo, s):
1842 1851 """Parses the raw text of a clone bundles manifest.
1843 1852
1844 1853 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1845 1854 to the URL and other keys are the attributes for the entry.
1846 1855 """
1847 1856 m = []
1848 1857 for line in s.splitlines():
1849 1858 fields = line.split()
1850 1859 if not fields:
1851 1860 continue
1852 1861 attrs = {'URL': fields[0]}
1853 1862 for rawattr in fields[1:]:
1854 1863 key, value = rawattr.split('=', 1)
1855 1864 key = urlreq.unquote(key)
1856 1865 value = urlreq.unquote(value)
1857 1866 attrs[key] = value
1858 1867
1859 1868 # Parse BUNDLESPEC into components. This makes client-side
1860 1869 # preferences easier to specify since you can prefer a single
1861 1870 # component of the BUNDLESPEC.
1862 1871 if key == 'BUNDLESPEC':
1863 1872 try:
1864 1873 comp, version, params = parsebundlespec(repo, value,
1865 1874 externalnames=True)
1866 1875 attrs['COMPRESSION'] = comp
1867 1876 attrs['VERSION'] = version
1868 1877 except error.InvalidBundleSpecification:
1869 1878 pass
1870 1879 except error.UnsupportedBundleSpecification:
1871 1880 pass
1872 1881
1873 1882 m.append(attrs)
1874 1883
1875 1884 return m
1876 1885
1877 1886 def filterclonebundleentries(repo, entries):
1878 1887 """Remove incompatible clone bundle manifest entries.
1879 1888
1880 1889 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1881 1890 and returns a new list consisting of only the entries that this client
1882 1891 should be able to apply.
1883 1892
1884 1893 There is no guarantee we'll be able to apply all returned entries because
1885 1894 the metadata we use to filter on may be missing or wrong.
1886 1895 """
1887 1896 newentries = []
1888 1897 for entry in entries:
1889 1898 spec = entry.get('BUNDLESPEC')
1890 1899 if spec:
1891 1900 try:
1892 1901 parsebundlespec(repo, spec, strict=True)
1893 1902 except error.InvalidBundleSpecification as e:
1894 1903 repo.ui.debug(str(e) + '\n')
1895 1904 continue
1896 1905 except error.UnsupportedBundleSpecification as e:
1897 1906 repo.ui.debug('filtering %s because unsupported bundle '
1898 1907 'spec: %s\n' % (entry['URL'], str(e)))
1899 1908 continue
1900 1909
1901 1910 if 'REQUIRESNI' in entry and not sslutil.hassni:
1902 1911 repo.ui.debug('filtering %s because SNI not supported\n' %
1903 1912 entry['URL'])
1904 1913 continue
1905 1914
1906 1915 newentries.append(entry)
1907 1916
1908 1917 return newentries
1909 1918
1910 1919 class clonebundleentry(object):
1911 1920 """Represents an item in a clone bundles manifest.
1912 1921
1913 1922 This rich class is needed to support sorting since sorted() in Python 3
1914 1923 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
1915 1924 won't work.
1916 1925 """
1917 1926
1918 1927 def __init__(self, value, prefers):
1919 1928 self.value = value
1920 1929 self.prefers = prefers
1921 1930
1922 1931 def _cmp(self, other):
1923 1932 for prefkey, prefvalue in self.prefers:
1924 1933 avalue = self.value.get(prefkey)
1925 1934 bvalue = other.value.get(prefkey)
1926 1935
1927 1936 # Special case for b missing attribute and a matches exactly.
1928 1937 if avalue is not None and bvalue is None and avalue == prefvalue:
1929 1938 return -1
1930 1939
1931 1940 # Special case for a missing attribute and b matches exactly.
1932 1941 if bvalue is not None and avalue is None and bvalue == prefvalue:
1933 1942 return 1
1934 1943
1935 1944 # We can't compare unless attribute present on both.
1936 1945 if avalue is None or bvalue is None:
1937 1946 continue
1938 1947
1939 1948 # Same values should fall back to next attribute.
1940 1949 if avalue == bvalue:
1941 1950 continue
1942 1951
1943 1952 # Exact matches come first.
1944 1953 if avalue == prefvalue:
1945 1954 return -1
1946 1955 if bvalue == prefvalue:
1947 1956 return 1
1948 1957
1949 1958 # Fall back to next attribute.
1950 1959 continue
1951 1960
1952 1961 # If we got here we couldn't sort by attributes and prefers. Fall
1953 1962 # back to index order.
1954 1963 return 0
1955 1964
1956 1965 def __lt__(self, other):
1957 1966 return self._cmp(other) < 0
1958 1967
1959 1968 def __gt__(self, other):
1960 1969 return self._cmp(other) > 0
1961 1970
1962 1971 def __eq__(self, other):
1963 1972 return self._cmp(other) == 0
1964 1973
1965 1974 def __le__(self, other):
1966 1975 return self._cmp(other) <= 0
1967 1976
1968 1977 def __ge__(self, other):
1969 1978 return self._cmp(other) >= 0
1970 1979
1971 1980 def __ne__(self, other):
1972 1981 return self._cmp(other) != 0
1973 1982
1974 1983 def sortclonebundleentries(ui, entries):
1975 1984 prefers = ui.configlist('ui', 'clonebundleprefers', default=[])
1976 1985 if not prefers:
1977 1986 return list(entries)
1978 1987
1979 1988 prefers = [p.split('=', 1) for p in prefers]
1980 1989
1981 1990 items = sorted(clonebundleentry(v, prefers) for v in entries)
1982 1991 return [i.value for i in items]
1983 1992
1984 1993 def trypullbundlefromurl(ui, repo, url):
1985 1994 """Attempt to apply a bundle from a URL."""
1986 1995 lock = repo.lock()
1987 1996 try:
1988 1997 tr = repo.transaction('bundleurl')
1989 1998 try:
1990 1999 try:
1991 2000 fh = urlmod.open(ui, url)
1992 2001 cg = readbundle(ui, fh, 'stream')
1993 2002
1994 2003 if isinstance(cg, bundle2.unbundle20):
1995 2004 bundle2.processbundle(repo, cg, lambda: tr)
1996 2005 elif isinstance(cg, streamclone.streamcloneapplier):
1997 2006 cg.apply(repo)
1998 2007 else:
1999 2008 cg.apply(repo, 'clonebundles', url)
2000 2009 tr.close()
2001 2010 return True
2002 2011 except urlerr.httperror as e:
2003 2012 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2004 2013 except urlerr.urlerror as e:
2005 2014 ui.warn(_('error fetching bundle: %s\n') % e.reason[1])
2006 2015
2007 2016 return False
2008 2017 finally:
2009 2018 tr.release()
2010 2019 finally:
2011 2020 lock.release()
@@ -1,195 +1,229 b''
1 1
2 2 $ cat << EOF >> $HGRCPATH
3 3 > [format]
4 4 > usegeneraldelta=yes
5 5 > EOF
6 6
7 7 bundle w/o type option
8 8
9 9 $ hg init t1
10 10 $ hg init t2
11 11 $ cd t1
12 12 $ echo blablablablabla > file.txt
13 13 $ hg ci -Ama
14 14 adding file.txt
15 15 $ hg log | grep summary
16 16 summary: a
17 17 $ hg bundle ../b1 ../t2
18 18 searching for changes
19 19 1 changesets found
20 20
21 21 $ cd ../t2
22 22 $ hg pull ../b1
23 23 pulling from ../b1
24 24 requesting all changes
25 25 adding changesets
26 26 adding manifests
27 27 adding file changes
28 28 added 1 changesets with 1 changes to 1 files
29 29 (run 'hg update' to get a working copy)
30 30 $ hg up
31 31 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
32 32 $ hg log | grep summary
33 33 summary: a
34 34 $ cd ..
35 35
36 Unknown compression type is rejected
37
38 $ hg init t3
39 $ cd t3
40 $ hg -q pull ../b1
41 $ hg bundle -a -t unknown out.hg
42 abort: unknown is not a recognized bundle specification
43 (see 'hg help bundle' for supported values for --type)
44 [255]
45
46 $ hg bundle -a -t unknown-v2 out.hg
47 abort: unknown compression is not supported
48 (see 'hg help bundle' for supported values for --type)
49 [255]
50
51 $ cd ..
52
36 53 test bundle types
37 54
38 55 $ testbundle() {
39 56 > echo % test bundle type $1
40 57 > hg init t$1
41 58 > cd t1
42 59 > hg bundle -t $1 ../b$1 ../t$1
43 60 > f -q -B6 -D ../b$1; echo
44 61 > cd ../t$1
45 62 > hg debugbundle ../b$1
46 63 > hg debugbundle --spec ../b$1
47 64 > echo
48 65 > cd ..
49 66 > }
50 67
51 68 $ for t in "None" "bzip2" "gzip" "none-v2" "v2" "v1" "gzip-v1"; do
52 69 > testbundle $t
53 70 > done
54 71 % test bundle type None
55 72 searching for changes
56 73 1 changesets found
57 74 HG20\x00\x00 (esc)
58 75 Stream params: {}
59 76 changegroup -- "sortdict([('version', '02'), ('nbchanges', '1')])"
60 77 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
61 78 none-v2
62 79
63 80 % test bundle type bzip2
64 81 searching for changes
65 82 1 changesets found
66 83 HG20\x00\x00 (esc)
67 84 Stream params: sortdict([('Compression', 'BZ')])
68 85 changegroup -- "sortdict([('version', '02'), ('nbchanges', '1')])"
69 86 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
70 87 bzip2-v2
71 88
72 89 % test bundle type gzip
73 90 searching for changes
74 91 1 changesets found
75 92 HG20\x00\x00 (esc)
76 93 Stream params: sortdict([('Compression', 'GZ')])
77 94 changegroup -- "sortdict([('version', '02'), ('nbchanges', '1')])"
78 95 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
79 96 gzip-v2
80 97
81 98 % test bundle type none-v2
82 99 searching for changes
83 100 1 changesets found
84 101 HG20\x00\x00 (esc)
85 102 Stream params: {}
86 103 changegroup -- "sortdict([('version', '02'), ('nbchanges', '1')])"
87 104 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
88 105 none-v2
89 106
90 107 % test bundle type v2
91 108 searching for changes
92 109 1 changesets found
93 110 HG20\x00\x00 (esc)
94 111 Stream params: sortdict([('Compression', 'BZ')])
95 112 changegroup -- "sortdict([('version', '02'), ('nbchanges', '1')])"
96 113 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
97 114 bzip2-v2
98 115
99 116 % test bundle type v1
100 117 searching for changes
101 118 1 changesets found
102 119 HG10BZ
103 120 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
104 121 bzip2-v1
105 122
106 123 % test bundle type gzip-v1
107 124 searching for changes
108 125 1 changesets found
109 126 HG10GZ
110 127 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
111 128 gzip-v1
112 129
113 130
114 131 Compression level can be adjusted for bundle2 bundles
115 132
116 133 $ hg init test-complevel
117 134 $ cd test-complevel
118 135
119 136 $ cat > file0 << EOF
120 137 > this is a file
121 138 > with some text
122 139 > and some more text
123 140 > and other content
124 141 > EOF
125 142 $ cat > file1 << EOF
126 143 > this is another file
127 144 > with some other content
128 145 > and repeated, repeated, repeated, repeated content
129 146 > EOF
130 147 $ hg -q commit -A -m initial
131 148
132 149 $ hg bundle -a -t gzip-v2 gzip-v2.hg
133 150 1 changesets found
134 151 $ f --size gzip-v2.hg
135 152 gzip-v2.hg: size=427
136 153
137 154 $ hg --config experimental.bundlecomplevel=1 bundle -a -t gzip-v2 gzip-v2-level1.hg
138 155 1 changesets found
139 156 $ f --size gzip-v2-level1.hg
140 157 gzip-v2-level1.hg: size=435
141 158
142 159 $ cd ..
143 160
144 161 #if zstd
145 162
146 163 $ for t in "zstd" "zstd-v2"; do
147 164 > testbundle $t
148 165 > done
149 166 % test bundle type zstd
150 167 searching for changes
151 168 1 changesets found
152 169 HG20\x00\x00 (esc)
153 170 Stream params: sortdict([('Compression', 'ZS')])
154 171 changegroup -- "sortdict([('version', '02'), ('nbchanges', '1')])"
155 172 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
156 173 zstd-v2
157 174
158 175 % test bundle type zstd-v2
159 176 searching for changes
160 177 1 changesets found
161 178 HG20\x00\x00 (esc)
162 179 Stream params: sortdict([('Compression', 'ZS')])
163 180 changegroup -- "sortdict([('version', '02'), ('nbchanges', '1')])"
164 181 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
165 182 zstd-v2
166 183
184
185 Explicit request for zstd on non-generaldelta repos
186
187 $ hg --config format.usegeneraldelta=false init nogd
188 $ hg -q -R nogd pull t1
189 $ hg -R nogd bundle -a -t zstd nogd-zstd
190 abort: compression engine zstd is not supported on v1 bundles
191 (see 'hg help bundle' for supported values for --type)
192 [255]
193
194 zstd-v1 always fails
195
196 $ hg -R tzstd bundle -a -t zstd-v1 zstd-v1
197 abort: compression engine zstd is not supported on v1 bundles
198 (see 'hg help bundle' for supported values for --type)
199 [255]
200
167 201 #else
168 202
169 203 zstd is a valid engine but isn't available
170 204
171 205 $ hg -R t1 bundle -a -t zstd irrelevant.hg
172 206 abort: compression engine zstd could not be loaded
173 207 [255]
174 208
175 209 #endif
176 210
177 211 test garbage file
178 212
179 213 $ echo garbage > bgarbage
180 214 $ hg init tgarbage
181 215 $ cd tgarbage
182 216 $ hg pull ../bgarbage
183 217 pulling from ../bgarbage
184 218 abort: ../bgarbage: not a Mercurial bundle
185 219 [255]
186 220 $ cd ..
187 221
188 222 test invalid bundle type
189 223
190 224 $ cd t1
191 225 $ hg bundle -a -t garbage ../bgarbage
192 226 abort: garbage is not a recognized bundle specification
193 227 (see 'hg help bundle' for supported values for --type)
194 228 [255]
195 229 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now