##// END OF EJS Templates
py3: use stringutil.forcebytestr() instead of str()...
Pulkit Goyal -
r37681:fc114a16 default
parent child Browse files
Show More
@@ -1,2410 +1,2410
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import errno
12 12 import hashlib
13 13
14 14 from .i18n import _
15 15 from .node import (
16 16 bin,
17 17 hex,
18 18 nullid,
19 19 )
20 20 from .thirdparty import (
21 21 attr,
22 22 )
23 23 from . import (
24 24 bookmarks as bookmod,
25 25 bundle2,
26 26 changegroup,
27 27 discovery,
28 28 error,
29 29 lock as lockmod,
30 30 logexchange,
31 31 obsolete,
32 32 phases,
33 33 pushkey,
34 34 pycompat,
35 35 scmutil,
36 36 sslutil,
37 37 streamclone,
38 38 url as urlmod,
39 39 util,
40 40 )
41 41 from .utils import (
42 42 stringutil,
43 43 )
44 44
45 45 urlerr = util.urlerr
46 46 urlreq = util.urlreq
47 47
48 48 # Maps bundle version human names to changegroup versions.
49 49 _bundlespeccgversions = {'v1': '01',
50 50 'v2': '02',
51 51 'packed1': 's1',
52 52 'bundle2': '02', #legacy
53 53 }
54 54
55 55 # Maps bundle version with content opts to choose which part to bundle
56 56 _bundlespeccontentopts = {
57 57 'v1': {
58 58 'changegroup': True,
59 59 'cg.version': '01',
60 60 'obsolescence': False,
61 61 'phases': False,
62 62 'tagsfnodescache': False,
63 63 'revbranchcache': False
64 64 },
65 65 'v2': {
66 66 'changegroup': True,
67 67 'cg.version': '02',
68 68 'obsolescence': False,
69 69 'phases': False,
70 70 'tagsfnodescache': True,
71 71 'revbranchcache': True
72 72 },
73 73 'packed1' : {
74 74 'cg.version': 's1'
75 75 }
76 76 }
77 77 _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
78 78
79 79 _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
80 80 "tagsfnodescache": False,
81 81 "revbranchcache": False}}
82 82
83 83 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
84 84 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
85 85
86 86 @attr.s
87 87 class bundlespec(object):
88 88 compression = attr.ib()
89 89 version = attr.ib()
90 90 params = attr.ib()
91 91 contentopts = attr.ib()
92 92
93 93 def parsebundlespec(repo, spec, strict=True, externalnames=False):
94 94 """Parse a bundle string specification into parts.
95 95
96 96 Bundle specifications denote a well-defined bundle/exchange format.
97 97 The content of a given specification should not change over time in
98 98 order to ensure that bundles produced by a newer version of Mercurial are
99 99 readable from an older version.
100 100
101 101 The string currently has the form:
102 102
103 103 <compression>-<type>[;<parameter0>[;<parameter1>]]
104 104
105 105 Where <compression> is one of the supported compression formats
106 106 and <type> is (currently) a version string. A ";" can follow the type and
107 107 all text afterwards is interpreted as URI encoded, ";" delimited key=value
108 108 pairs.
109 109
110 110 If ``strict`` is True (the default) <compression> is required. Otherwise,
111 111 it is optional.
112 112
113 113 If ``externalnames`` is False (the default), the human-centric names will
114 114 be converted to their internal representation.
115 115
116 116 Returns a bundlespec object of (compression, version, parameters).
117 117 Compression will be ``None`` if not in strict mode and a compression isn't
118 118 defined.
119 119
120 120 An ``InvalidBundleSpecification`` is raised when the specification is
121 121 not syntactically well formed.
122 122
123 123 An ``UnsupportedBundleSpecification`` is raised when the compression or
124 124 bundle type/version is not recognized.
125 125
126 126 Note: this function will likely eventually return a more complex data
127 127 structure, including bundle2 part information.
128 128 """
129 129 def parseparams(s):
130 130 if ';' not in s:
131 131 return s, {}
132 132
133 133 params = {}
134 134 version, paramstr = s.split(';', 1)
135 135
136 136 for p in paramstr.split(';'):
137 137 if '=' not in p:
138 138 raise error.InvalidBundleSpecification(
139 139 _('invalid bundle specification: '
140 140 'missing "=" in parameter: %s') % p)
141 141
142 142 key, value = p.split('=', 1)
143 143 key = urlreq.unquote(key)
144 144 value = urlreq.unquote(value)
145 145 params[key] = value
146 146
147 147 return version, params
148 148
149 149
150 150 if strict and '-' not in spec:
151 151 raise error.InvalidBundleSpecification(
152 152 _('invalid bundle specification; '
153 153 'must be prefixed with compression: %s') % spec)
154 154
155 155 if '-' in spec:
156 156 compression, version = spec.split('-', 1)
157 157
158 158 if compression not in util.compengines.supportedbundlenames:
159 159 raise error.UnsupportedBundleSpecification(
160 160 _('%s compression is not supported') % compression)
161 161
162 162 version, params = parseparams(version)
163 163
164 164 if version not in _bundlespeccgversions:
165 165 raise error.UnsupportedBundleSpecification(
166 166 _('%s is not a recognized bundle version') % version)
167 167 else:
168 168 # Value could be just the compression or just the version, in which
169 169 # case some defaults are assumed (but only when not in strict mode).
170 170 assert not strict
171 171
172 172 spec, params = parseparams(spec)
173 173
174 174 if spec in util.compengines.supportedbundlenames:
175 175 compression = spec
176 176 version = 'v1'
177 177 # Generaldelta repos require v2.
178 178 if 'generaldelta' in repo.requirements:
179 179 version = 'v2'
180 180 # Modern compression engines require v2.
181 181 if compression not in _bundlespecv1compengines:
182 182 version = 'v2'
183 183 elif spec in _bundlespeccgversions:
184 184 if spec == 'packed1':
185 185 compression = 'none'
186 186 else:
187 187 compression = 'bzip2'
188 188 version = spec
189 189 else:
190 190 raise error.UnsupportedBundleSpecification(
191 191 _('%s is not a recognized bundle specification') % spec)
192 192
193 193 # Bundle version 1 only supports a known set of compression engines.
194 194 if version == 'v1' and compression not in _bundlespecv1compengines:
195 195 raise error.UnsupportedBundleSpecification(
196 196 _('compression engine %s is not supported on v1 bundles') %
197 197 compression)
198 198
199 199 # The specification for packed1 can optionally declare the data formats
200 200 # required to apply it. If we see this metadata, compare against what the
201 201 # repo supports and error if the bundle isn't compatible.
202 202 if version == 'packed1' and 'requirements' in params:
203 203 requirements = set(params['requirements'].split(','))
204 204 missingreqs = requirements - repo.supportedformats
205 205 if missingreqs:
206 206 raise error.UnsupportedBundleSpecification(
207 207 _('missing support for repository features: %s') %
208 208 ', '.join(sorted(missingreqs)))
209 209
210 210 # Compute contentopts based on the version
211 211 contentopts = _bundlespeccontentopts.get(version, {}).copy()
212 212
213 213 # Process the variants
214 214 if "stream" in params and params["stream"] == "v2":
215 215 variant = _bundlespecvariants["streamv2"]
216 216 contentopts.update(variant)
217 217
218 218 if not externalnames:
219 219 engine = util.compengines.forbundlename(compression)
220 220 compression = engine.bundletype()[1]
221 221 version = _bundlespeccgversions[version]
222 222
223 223 return bundlespec(compression, version, params, contentopts)
224 224
225 225 def readbundle(ui, fh, fname, vfs=None):
226 226 header = changegroup.readexactly(fh, 4)
227 227
228 228 alg = None
229 229 if not fname:
230 230 fname = "stream"
231 231 if not header.startswith('HG') and header.startswith('\0'):
232 232 fh = changegroup.headerlessfixup(fh, header)
233 233 header = "HG10"
234 234 alg = 'UN'
235 235 elif vfs:
236 236 fname = vfs.join(fname)
237 237
238 238 magic, version = header[0:2], header[2:4]
239 239
240 240 if magic != 'HG':
241 241 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
242 242 if version == '10':
243 243 if alg is None:
244 244 alg = changegroup.readexactly(fh, 2)
245 245 return changegroup.cg1unpacker(fh, alg)
246 246 elif version.startswith('2'):
247 247 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
248 248 elif version == 'S1':
249 249 return streamclone.streamcloneapplier(fh)
250 250 else:
251 251 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
252 252
253 253 def getbundlespec(ui, fh):
254 254 """Infer the bundlespec from a bundle file handle.
255 255
256 256 The input file handle is seeked and the original seek position is not
257 257 restored.
258 258 """
259 259 def speccompression(alg):
260 260 try:
261 261 return util.compengines.forbundletype(alg).bundletype()[0]
262 262 except KeyError:
263 263 return None
264 264
265 265 b = readbundle(ui, fh, None)
266 266 if isinstance(b, changegroup.cg1unpacker):
267 267 alg = b._type
268 268 if alg == '_truncatedBZ':
269 269 alg = 'BZ'
270 270 comp = speccompression(alg)
271 271 if not comp:
272 272 raise error.Abort(_('unknown compression algorithm: %s') % alg)
273 273 return '%s-v1' % comp
274 274 elif isinstance(b, bundle2.unbundle20):
275 275 if 'Compression' in b.params:
276 276 comp = speccompression(b.params['Compression'])
277 277 if not comp:
278 278 raise error.Abort(_('unknown compression algorithm: %s') % comp)
279 279 else:
280 280 comp = 'none'
281 281
282 282 version = None
283 283 for part in b.iterparts():
284 284 if part.type == 'changegroup':
285 285 version = part.params['version']
286 286 if version in ('01', '02'):
287 287 version = 'v2'
288 288 else:
289 289 raise error.Abort(_('changegroup version %s does not have '
290 290 'a known bundlespec') % version,
291 291 hint=_('try upgrading your Mercurial '
292 292 'client'))
293 293 elif part.type == 'stream2' and version is None:
294 294 # A stream2 part requires to be part of a v2 bundle
295 295 version = "v2"
296 296 requirements = urlreq.unquote(part.params['requirements'])
297 297 splitted = requirements.split()
298 298 params = bundle2._formatrequirementsparams(splitted)
299 299 return 'none-v2;stream=v2;%s' % params
300 300
301 301 if not version:
302 302 raise error.Abort(_('could not identify changegroup version in '
303 303 'bundle'))
304 304
305 305 return '%s-%s' % (comp, version)
306 306 elif isinstance(b, streamclone.streamcloneapplier):
307 307 requirements = streamclone.readbundle1header(fh)[2]
308 308 formatted = bundle2._formatrequirementsparams(requirements)
309 309 return 'none-packed1;%s' % formatted
310 310 else:
311 311 raise error.Abort(_('unknown bundle type: %s') % b)
312 312
313 313 def _computeoutgoing(repo, heads, common):
314 314 """Computes which revs are outgoing given a set of common
315 315 and a set of heads.
316 316
317 317 This is a separate function so extensions can have access to
318 318 the logic.
319 319
320 320 Returns a discovery.outgoing object.
321 321 """
322 322 cl = repo.changelog
323 323 if common:
324 324 hasnode = cl.hasnode
325 325 common = [n for n in common if hasnode(n)]
326 326 else:
327 327 common = [nullid]
328 328 if not heads:
329 329 heads = cl.heads()
330 330 return discovery.outgoing(repo, common, heads)
331 331
332 332 def _forcebundle1(op):
333 333 """return true if a pull/push must use bundle1
334 334
335 335 This function is used to allow testing of the older bundle version"""
336 336 ui = op.repo.ui
337 337 # The goal is this config is to allow developer to choose the bundle
338 338 # version used during exchanged. This is especially handy during test.
339 339 # Value is a list of bundle version to be picked from, highest version
340 340 # should be used.
341 341 #
342 342 # developer config: devel.legacy.exchange
343 343 exchange = ui.configlist('devel', 'legacy.exchange')
344 344 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
345 345 return forcebundle1 or not op.remote.capable('bundle2')
346 346
347 347 class pushoperation(object):
348 348 """A object that represent a single push operation
349 349
350 350 Its purpose is to carry push related state and very common operations.
351 351
352 352 A new pushoperation should be created at the beginning of each push and
353 353 discarded afterward.
354 354 """
355 355
356 356 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
357 357 bookmarks=(), pushvars=None):
358 358 # repo we push from
359 359 self.repo = repo
360 360 self.ui = repo.ui
361 361 # repo we push to
362 362 self.remote = remote
363 363 # force option provided
364 364 self.force = force
365 365 # revs to be pushed (None is "all")
366 366 self.revs = revs
367 367 # bookmark explicitly pushed
368 368 self.bookmarks = bookmarks
369 369 # allow push of new branch
370 370 self.newbranch = newbranch
371 371 # step already performed
372 372 # (used to check what steps have been already performed through bundle2)
373 373 self.stepsdone = set()
374 374 # Integer version of the changegroup push result
375 375 # - None means nothing to push
376 376 # - 0 means HTTP error
377 377 # - 1 means we pushed and remote head count is unchanged *or*
378 378 # we have outgoing changesets but refused to push
379 379 # - other values as described by addchangegroup()
380 380 self.cgresult = None
381 381 # Boolean value for the bookmark push
382 382 self.bkresult = None
383 383 # discover.outgoing object (contains common and outgoing data)
384 384 self.outgoing = None
385 385 # all remote topological heads before the push
386 386 self.remoteheads = None
387 387 # Details of the remote branch pre and post push
388 388 #
389 389 # mapping: {'branch': ([remoteheads],
390 390 # [newheads],
391 391 # [unsyncedheads],
392 392 # [discardedheads])}
393 393 # - branch: the branch name
394 394 # - remoteheads: the list of remote heads known locally
395 395 # None if the branch is new
396 396 # - newheads: the new remote heads (known locally) with outgoing pushed
397 397 # - unsyncedheads: the list of remote heads unknown locally.
398 398 # - discardedheads: the list of remote heads made obsolete by the push
399 399 self.pushbranchmap = None
400 400 # testable as a boolean indicating if any nodes are missing locally.
401 401 self.incoming = None
402 402 # summary of the remote phase situation
403 403 self.remotephases = None
404 404 # phases changes that must be pushed along side the changesets
405 405 self.outdatedphases = None
406 406 # phases changes that must be pushed if changeset push fails
407 407 self.fallbackoutdatedphases = None
408 408 # outgoing obsmarkers
409 409 self.outobsmarkers = set()
410 410 # outgoing bookmarks
411 411 self.outbookmarks = []
412 412 # transaction manager
413 413 self.trmanager = None
414 414 # map { pushkey partid -> callback handling failure}
415 415 # used to handle exception from mandatory pushkey part failure
416 416 self.pkfailcb = {}
417 417 # an iterable of pushvars or None
418 418 self.pushvars = pushvars
419 419
420 420 @util.propertycache
421 421 def futureheads(self):
422 422 """future remote heads if the changeset push succeeds"""
423 423 return self.outgoing.missingheads
424 424
425 425 @util.propertycache
426 426 def fallbackheads(self):
427 427 """future remote heads if the changeset push fails"""
428 428 if self.revs is None:
429 429 # not target to push, all common are relevant
430 430 return self.outgoing.commonheads
431 431 unfi = self.repo.unfiltered()
432 432 # I want cheads = heads(::missingheads and ::commonheads)
433 433 # (missingheads is revs with secret changeset filtered out)
434 434 #
435 435 # This can be expressed as:
436 436 # cheads = ( (missingheads and ::commonheads)
437 437 # + (commonheads and ::missingheads))"
438 438 # )
439 439 #
440 440 # while trying to push we already computed the following:
441 441 # common = (::commonheads)
442 442 # missing = ((commonheads::missingheads) - commonheads)
443 443 #
444 444 # We can pick:
445 445 # * missingheads part of common (::commonheads)
446 446 common = self.outgoing.common
447 447 nm = self.repo.changelog.nodemap
448 448 cheads = [node for node in self.revs if nm[node] in common]
449 449 # and
450 450 # * commonheads parents on missing
451 451 revset = unfi.set('%ln and parents(roots(%ln))',
452 452 self.outgoing.commonheads,
453 453 self.outgoing.missing)
454 454 cheads.extend(c.node() for c in revset)
455 455 return cheads
456 456
457 457 @property
458 458 def commonheads(self):
459 459 """set of all common heads after changeset bundle push"""
460 460 if self.cgresult:
461 461 return self.futureheads
462 462 else:
463 463 return self.fallbackheads
464 464
465 465 # mapping of message used when pushing bookmark
466 466 bookmsgmap = {'update': (_("updating bookmark %s\n"),
467 467 _('updating bookmark %s failed!\n')),
468 468 'export': (_("exporting bookmark %s\n"),
469 469 _('exporting bookmark %s failed!\n')),
470 470 'delete': (_("deleting remote bookmark %s\n"),
471 471 _('deleting remote bookmark %s failed!\n')),
472 472 }
473 473
474 474
475 475 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
476 476 opargs=None):
477 477 '''Push outgoing changesets (limited by revs) from a local
478 478 repository to remote. Return an integer:
479 479 - None means nothing to push
480 480 - 0 means HTTP error
481 481 - 1 means we pushed and remote head count is unchanged *or*
482 482 we have outgoing changesets but refused to push
483 483 - other values as described by addchangegroup()
484 484 '''
485 485 if opargs is None:
486 486 opargs = {}
487 487 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
488 488 **pycompat.strkwargs(opargs))
489 489 if pushop.remote.local():
490 490 missing = (set(pushop.repo.requirements)
491 491 - pushop.remote.local().supported)
492 492 if missing:
493 493 msg = _("required features are not"
494 494 " supported in the destination:"
495 495 " %s") % (', '.join(sorted(missing)))
496 496 raise error.Abort(msg)
497 497
498 498 if not pushop.remote.canpush():
499 499 raise error.Abort(_("destination does not support push"))
500 500
501 501 if not pushop.remote.capable('unbundle'):
502 502 raise error.Abort(_('cannot push: destination does not support the '
503 503 'unbundle wire protocol command'))
504 504
505 505 # get lock as we might write phase data
506 506 wlock = lock = None
507 507 try:
508 508 # bundle2 push may receive a reply bundle touching bookmarks or other
509 509 # things requiring the wlock. Take it now to ensure proper ordering.
510 510 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
511 511 if (not _forcebundle1(pushop)) and maypushback:
512 512 wlock = pushop.repo.wlock()
513 513 lock = pushop.repo.lock()
514 514 pushop.trmanager = transactionmanager(pushop.repo,
515 515 'push-response',
516 516 pushop.remote.url())
517 517 except IOError as err:
518 518 if err.errno != errno.EACCES:
519 519 raise
520 520 # source repo cannot be locked.
521 521 # We do not abort the push, but just disable the local phase
522 522 # synchronisation.
523 523 msg = 'cannot lock source repository: %s\n' % err
524 524 pushop.ui.debug(msg)
525 525
526 526 with wlock or util.nullcontextmanager(), \
527 527 lock or util.nullcontextmanager(), \
528 528 pushop.trmanager or util.nullcontextmanager():
529 529 pushop.repo.checkpush(pushop)
530 530 _pushdiscovery(pushop)
531 531 if not _forcebundle1(pushop):
532 532 _pushbundle2(pushop)
533 533 _pushchangeset(pushop)
534 534 _pushsyncphase(pushop)
535 535 _pushobsolete(pushop)
536 536 _pushbookmark(pushop)
537 537
538 538 return pushop
539 539
540 540 # list of steps to perform discovery before push
541 541 pushdiscoveryorder = []
542 542
543 543 # Mapping between step name and function
544 544 #
545 545 # This exists to help extensions wrap steps if necessary
546 546 pushdiscoverymapping = {}
547 547
548 548 def pushdiscovery(stepname):
549 549 """decorator for function performing discovery before push
550 550
551 551 The function is added to the step -> function mapping and appended to the
552 552 list of steps. Beware that decorated function will be added in order (this
553 553 may matter).
554 554
555 555 You can only use this decorator for a new step, if you want to wrap a step
556 556 from an extension, change the pushdiscovery dictionary directly."""
557 557 def dec(func):
558 558 assert stepname not in pushdiscoverymapping
559 559 pushdiscoverymapping[stepname] = func
560 560 pushdiscoveryorder.append(stepname)
561 561 return func
562 562 return dec
563 563
564 564 def _pushdiscovery(pushop):
565 565 """Run all discovery steps"""
566 566 for stepname in pushdiscoveryorder:
567 567 step = pushdiscoverymapping[stepname]
568 568 step(pushop)
569 569
570 570 @pushdiscovery('changeset')
571 571 def _pushdiscoverychangeset(pushop):
572 572 """discover the changeset that need to be pushed"""
573 573 fci = discovery.findcommonincoming
574 574 if pushop.revs:
575 575 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
576 576 ancestorsof=pushop.revs)
577 577 else:
578 578 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
579 579 common, inc, remoteheads = commoninc
580 580 fco = discovery.findcommonoutgoing
581 581 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
582 582 commoninc=commoninc, force=pushop.force)
583 583 pushop.outgoing = outgoing
584 584 pushop.remoteheads = remoteheads
585 585 pushop.incoming = inc
586 586
587 587 @pushdiscovery('phase')
588 588 def _pushdiscoveryphase(pushop):
589 589 """discover the phase that needs to be pushed
590 590
591 591 (computed for both success and failure case for changesets push)"""
592 592 outgoing = pushop.outgoing
593 593 unfi = pushop.repo.unfiltered()
594 594 remotephases = pushop.remote.listkeys('phases')
595 595 if (pushop.ui.configbool('ui', '_usedassubrepo')
596 596 and remotephases # server supports phases
597 597 and not pushop.outgoing.missing # no changesets to be pushed
598 598 and remotephases.get('publishing', False)):
599 599 # When:
600 600 # - this is a subrepo push
601 601 # - and remote support phase
602 602 # - and no changeset are to be pushed
603 603 # - and remote is publishing
604 604 # We may be in issue 3781 case!
605 605 # We drop the possible phase synchronisation done by
606 606 # courtesy to publish changesets possibly locally draft
607 607 # on the remote.
608 608 pushop.outdatedphases = []
609 609 pushop.fallbackoutdatedphases = []
610 610 return
611 611
612 612 pushop.remotephases = phases.remotephasessummary(pushop.repo,
613 613 pushop.fallbackheads,
614 614 remotephases)
615 615 droots = pushop.remotephases.draftroots
616 616
617 617 extracond = ''
618 618 if not pushop.remotephases.publishing:
619 619 extracond = ' and public()'
620 620 revset = 'heads((%%ln::%%ln) %s)' % extracond
621 621 # Get the list of all revs draft on remote by public here.
622 622 # XXX Beware that revset break if droots is not strictly
623 623 # XXX root we may want to ensure it is but it is costly
624 624 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
625 625 if not outgoing.missing:
626 626 future = fallback
627 627 else:
628 628 # adds changeset we are going to push as draft
629 629 #
630 630 # should not be necessary for publishing server, but because of an
631 631 # issue fixed in xxxxx we have to do it anyway.
632 632 fdroots = list(unfi.set('roots(%ln + %ln::)',
633 633 outgoing.missing, droots))
634 634 fdroots = [f.node() for f in fdroots]
635 635 future = list(unfi.set(revset, fdroots, pushop.futureheads))
636 636 pushop.outdatedphases = future
637 637 pushop.fallbackoutdatedphases = fallback
638 638
639 639 @pushdiscovery('obsmarker')
640 640 def _pushdiscoveryobsmarkers(pushop):
641 641 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
642 642 and pushop.repo.obsstore
643 643 and 'obsolete' in pushop.remote.listkeys('namespaces')):
644 644 repo = pushop.repo
645 645 # very naive computation, that can be quite expensive on big repo.
646 646 # However: evolution is currently slow on them anyway.
647 647 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
648 648 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
649 649
650 650 @pushdiscovery('bookmarks')
651 651 def _pushdiscoverybookmarks(pushop):
652 652 ui = pushop.ui
653 653 repo = pushop.repo.unfiltered()
654 654 remote = pushop.remote
655 655 ui.debug("checking for updated bookmarks\n")
656 656 ancestors = ()
657 657 if pushop.revs:
658 658 revnums = map(repo.changelog.rev, pushop.revs)
659 659 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
660 660 remotebookmark = remote.listkeys('bookmarks')
661 661
662 662 explicit = set([repo._bookmarks.expandname(bookmark)
663 663 for bookmark in pushop.bookmarks])
664 664
665 665 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
666 666 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
667 667
668 668 def safehex(x):
669 669 if x is None:
670 670 return x
671 671 return hex(x)
672 672
673 673 def hexifycompbookmarks(bookmarks):
674 674 return [(b, safehex(scid), safehex(dcid))
675 675 for (b, scid, dcid) in bookmarks]
676 676
677 677 comp = [hexifycompbookmarks(marks) for marks in comp]
678 678 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
679 679
680 680 def _processcompared(pushop, pushed, explicit, remotebms, comp):
681 681 """take decision on bookmark to pull from the remote bookmark
682 682
683 683 Exist to help extensions who want to alter this behavior.
684 684 """
685 685 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
686 686
687 687 repo = pushop.repo
688 688
689 689 for b, scid, dcid in advsrc:
690 690 if b in explicit:
691 691 explicit.remove(b)
692 692 if not pushed or repo[scid].rev() in pushed:
693 693 pushop.outbookmarks.append((b, dcid, scid))
694 694 # search added bookmark
695 695 for b, scid, dcid in addsrc:
696 696 if b in explicit:
697 697 explicit.remove(b)
698 698 pushop.outbookmarks.append((b, '', scid))
699 699 # search for overwritten bookmark
700 700 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
701 701 if b in explicit:
702 702 explicit.remove(b)
703 703 pushop.outbookmarks.append((b, dcid, scid))
704 704 # search for bookmark to delete
705 705 for b, scid, dcid in adddst:
706 706 if b in explicit:
707 707 explicit.remove(b)
708 708 # treat as "deleted locally"
709 709 pushop.outbookmarks.append((b, dcid, ''))
710 710 # identical bookmarks shouldn't get reported
711 711 for b, scid, dcid in same:
712 712 if b in explicit:
713 713 explicit.remove(b)
714 714
715 715 if explicit:
716 716 explicit = sorted(explicit)
717 717 # we should probably list all of them
718 718 pushop.ui.warn(_('bookmark %s does not exist on the local '
719 719 'or remote repository!\n') % explicit[0])
720 720 pushop.bkresult = 2
721 721
722 722 pushop.outbookmarks.sort()
723 723
724 724 def _pushcheckoutgoing(pushop):
725 725 outgoing = pushop.outgoing
726 726 unfi = pushop.repo.unfiltered()
727 727 if not outgoing.missing:
728 728 # nothing to push
729 729 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
730 730 return False
731 731 # something to push
732 732 if not pushop.force:
733 733 # if repo.obsstore == False --> no obsolete
734 734 # then, save the iteration
735 735 if unfi.obsstore:
736 736 # this message are here for 80 char limit reason
737 737 mso = _("push includes obsolete changeset: %s!")
738 738 mspd = _("push includes phase-divergent changeset: %s!")
739 739 mscd = _("push includes content-divergent changeset: %s!")
740 740 mst = {"orphan": _("push includes orphan changeset: %s!"),
741 741 "phase-divergent": mspd,
742 742 "content-divergent": mscd}
743 743 # If we are to push if there is at least one
744 744 # obsolete or unstable changeset in missing, at
745 745 # least one of the missinghead will be obsolete or
746 746 # unstable. So checking heads only is ok
747 747 for node in outgoing.missingheads:
748 748 ctx = unfi[node]
749 749 if ctx.obsolete():
750 750 raise error.Abort(mso % ctx)
751 751 elif ctx.isunstable():
752 752 # TODO print more than one instability in the abort
753 753 # message
754 754 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
755 755
756 756 discovery.checkheads(pushop)
757 757 return True
758 758
759 759 # List of names of steps to perform for an outgoing bundle2, order matters.
760 760 b2partsgenorder = []
761 761
762 762 # Mapping between step name and function
763 763 #
764 764 # This exists to help extensions wrap steps if necessary
765 765 b2partsgenmapping = {}
766 766
767 767 def b2partsgenerator(stepname, idx=None):
768 768 """decorator for function generating bundle2 part
769 769
770 770 The function is added to the step -> function mapping and appended to the
771 771 list of steps. Beware that decorated functions will be added in order
772 772 (this may matter).
773 773
774 774 You can only use this decorator for new steps, if you want to wrap a step
775 775 from an extension, attack the b2partsgenmapping dictionary directly."""
776 776 def dec(func):
777 777 assert stepname not in b2partsgenmapping
778 778 b2partsgenmapping[stepname] = func
779 779 if idx is None:
780 780 b2partsgenorder.append(stepname)
781 781 else:
782 782 b2partsgenorder.insert(idx, stepname)
783 783 return func
784 784 return dec
785 785
786 786 def _pushb2ctxcheckheads(pushop, bundler):
787 787 """Generate race condition checking parts
788 788
789 789 Exists as an independent function to aid extensions
790 790 """
791 791 # * 'force' do not check for push race,
792 792 # * if we don't push anything, there are nothing to check.
793 793 if not pushop.force and pushop.outgoing.missingheads:
794 794 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
795 795 emptyremote = pushop.pushbranchmap is None
796 796 if not allowunrelated or emptyremote:
797 797 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
798 798 else:
799 799 affected = set()
800 800 for branch, heads in pushop.pushbranchmap.iteritems():
801 801 remoteheads, newheads, unsyncedheads, discardedheads = heads
802 802 if remoteheads is not None:
803 803 remote = set(remoteheads)
804 804 affected |= set(discardedheads) & remote
805 805 affected |= remote - set(newheads)
806 806 if affected:
807 807 data = iter(sorted(affected))
808 808 bundler.newpart('check:updated-heads', data=data)
809 809
810 810 def _pushing(pushop):
811 811 """return True if we are pushing anything"""
812 812 return bool(pushop.outgoing.missing
813 813 or pushop.outdatedphases
814 814 or pushop.outobsmarkers
815 815 or pushop.outbookmarks)
816 816
817 817 @b2partsgenerator('check-bookmarks')
818 818 def _pushb2checkbookmarks(pushop, bundler):
819 819 """insert bookmark move checking"""
820 820 if not _pushing(pushop) or pushop.force:
821 821 return
822 822 b2caps = bundle2.bundle2caps(pushop.remote)
823 823 hasbookmarkcheck = 'bookmarks' in b2caps
824 824 if not (pushop.outbookmarks and hasbookmarkcheck):
825 825 return
826 826 data = []
827 827 for book, old, new in pushop.outbookmarks:
828 828 old = bin(old)
829 829 data.append((book, old))
830 830 checkdata = bookmod.binaryencode(data)
831 831 bundler.newpart('check:bookmarks', data=checkdata)
832 832
833 833 @b2partsgenerator('check-phases')
834 834 def _pushb2checkphases(pushop, bundler):
835 835 """insert phase move checking"""
836 836 if not _pushing(pushop) or pushop.force:
837 837 return
838 838 b2caps = bundle2.bundle2caps(pushop.remote)
839 839 hasphaseheads = 'heads' in b2caps.get('phases', ())
840 840 if pushop.remotephases is not None and hasphaseheads:
841 841 # check that the remote phase has not changed
842 842 checks = [[] for p in phases.allphases]
843 843 checks[phases.public].extend(pushop.remotephases.publicheads)
844 844 checks[phases.draft].extend(pushop.remotephases.draftroots)
845 845 if any(checks):
846 846 for nodes in checks:
847 847 nodes.sort()
848 848 checkdata = phases.binaryencode(checks)
849 849 bundler.newpart('check:phases', data=checkdata)
850 850
851 851 @b2partsgenerator('changeset')
852 852 def _pushb2ctx(pushop, bundler):
853 853 """handle changegroup push through bundle2
854 854
855 855 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
856 856 """
857 857 if 'changesets' in pushop.stepsdone:
858 858 return
859 859 pushop.stepsdone.add('changesets')
860 860 # Send known heads to the server for race detection.
861 861 if not _pushcheckoutgoing(pushop):
862 862 return
863 863 pushop.repo.prepushoutgoinghooks(pushop)
864 864
865 865 _pushb2ctxcheckheads(pushop, bundler)
866 866
867 867 b2caps = bundle2.bundle2caps(pushop.remote)
868 868 version = '01'
869 869 cgversions = b2caps.get('changegroup')
870 870 if cgversions: # 3.1 and 3.2 ship with an empty value
871 871 cgversions = [v for v in cgversions
872 872 if v in changegroup.supportedoutgoingversions(
873 873 pushop.repo)]
874 874 if not cgversions:
875 875 raise ValueError(_('no common changegroup version'))
876 876 version = max(cgversions)
877 877 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
878 878 'push')
879 879 cgpart = bundler.newpart('changegroup', data=cgstream)
880 880 if cgversions:
881 881 cgpart.addparam('version', version)
882 882 if 'treemanifest' in pushop.repo.requirements:
883 883 cgpart.addparam('treemanifest', '1')
884 884 def handlereply(op):
885 885 """extract addchangegroup returns from server reply"""
886 886 cgreplies = op.records.getreplies(cgpart.id)
887 887 assert len(cgreplies['changegroup']) == 1
888 888 pushop.cgresult = cgreplies['changegroup'][0]['return']
889 889 return handlereply
890 890
891 891 @b2partsgenerator('phase')
892 892 def _pushb2phases(pushop, bundler):
893 893 """handle phase push through bundle2"""
894 894 if 'phases' in pushop.stepsdone:
895 895 return
896 896 b2caps = bundle2.bundle2caps(pushop.remote)
897 897 ui = pushop.repo.ui
898 898
899 899 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
900 900 haspushkey = 'pushkey' in b2caps
901 901 hasphaseheads = 'heads' in b2caps.get('phases', ())
902 902
903 903 if hasphaseheads and not legacyphase:
904 904 return _pushb2phaseheads(pushop, bundler)
905 905 elif haspushkey:
906 906 return _pushb2phasespushkey(pushop, bundler)
907 907
908 908 def _pushb2phaseheads(pushop, bundler):
909 909 """push phase information through a bundle2 - binary part"""
910 910 pushop.stepsdone.add('phases')
911 911 if pushop.outdatedphases:
912 912 updates = [[] for p in phases.allphases]
913 913 updates[0].extend(h.node() for h in pushop.outdatedphases)
914 914 phasedata = phases.binaryencode(updates)
915 915 bundler.newpart('phase-heads', data=phasedata)
916 916
917 917 def _pushb2phasespushkey(pushop, bundler):
918 918 """push phase information through a bundle2 - pushkey part"""
919 919 pushop.stepsdone.add('phases')
920 920 part2node = []
921 921
922 922 def handlefailure(pushop, exc):
923 923 targetid = int(exc.partid)
924 924 for partid, node in part2node:
925 925 if partid == targetid:
926 926 raise error.Abort(_('updating %s to public failed') % node)
927 927
928 928 enc = pushkey.encode
929 929 for newremotehead in pushop.outdatedphases:
930 930 part = bundler.newpart('pushkey')
931 931 part.addparam('namespace', enc('phases'))
932 932 part.addparam('key', enc(newremotehead.hex()))
933 933 part.addparam('old', enc('%d' % phases.draft))
934 934 part.addparam('new', enc('%d' % phases.public))
935 935 part2node.append((part.id, newremotehead))
936 936 pushop.pkfailcb[part.id] = handlefailure
937 937
938 938 def handlereply(op):
939 939 for partid, node in part2node:
940 940 partrep = op.records.getreplies(partid)
941 941 results = partrep['pushkey']
942 942 assert len(results) <= 1
943 943 msg = None
944 944 if not results:
945 945 msg = _('server ignored update of %s to public!\n') % node
946 946 elif not int(results[0]['return']):
947 947 msg = _('updating %s to public failed!\n') % node
948 948 if msg is not None:
949 949 pushop.ui.warn(msg)
950 950 return handlereply
951 951
952 952 @b2partsgenerator('obsmarkers')
953 953 def _pushb2obsmarkers(pushop, bundler):
954 954 if 'obsmarkers' in pushop.stepsdone:
955 955 return
956 956 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
957 957 if obsolete.commonversion(remoteversions) is None:
958 958 return
959 959 pushop.stepsdone.add('obsmarkers')
960 960 if pushop.outobsmarkers:
961 961 markers = sorted(pushop.outobsmarkers)
962 962 bundle2.buildobsmarkerspart(bundler, markers)
963 963
964 964 @b2partsgenerator('bookmarks')
965 965 def _pushb2bookmarks(pushop, bundler):
966 966 """handle bookmark push through bundle2"""
967 967 if 'bookmarks' in pushop.stepsdone:
968 968 return
969 969 b2caps = bundle2.bundle2caps(pushop.remote)
970 970
971 971 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
972 972 legacybooks = 'bookmarks' in legacy
973 973
974 974 if not legacybooks and 'bookmarks' in b2caps:
975 975 return _pushb2bookmarkspart(pushop, bundler)
976 976 elif 'pushkey' in b2caps:
977 977 return _pushb2bookmarkspushkey(pushop, bundler)
978 978
979 979 def _bmaction(old, new):
980 980 """small utility for bookmark pushing"""
981 981 if not old:
982 982 return 'export'
983 983 elif not new:
984 984 return 'delete'
985 985 return 'update'
986 986
987 987 def _pushb2bookmarkspart(pushop, bundler):
988 988 pushop.stepsdone.add('bookmarks')
989 989 if not pushop.outbookmarks:
990 990 return
991 991
992 992 allactions = []
993 993 data = []
994 994 for book, old, new in pushop.outbookmarks:
995 995 new = bin(new)
996 996 data.append((book, new))
997 997 allactions.append((book, _bmaction(old, new)))
998 998 checkdata = bookmod.binaryencode(data)
999 999 bundler.newpart('bookmarks', data=checkdata)
1000 1000
1001 1001 def handlereply(op):
1002 1002 ui = pushop.ui
1003 1003 # if success
1004 1004 for book, action in allactions:
1005 1005 ui.status(bookmsgmap[action][0] % book)
1006 1006
1007 1007 return handlereply
1008 1008
1009 1009 def _pushb2bookmarkspushkey(pushop, bundler):
1010 1010 pushop.stepsdone.add('bookmarks')
1011 1011 part2book = []
1012 1012 enc = pushkey.encode
1013 1013
1014 1014 def handlefailure(pushop, exc):
1015 1015 targetid = int(exc.partid)
1016 1016 for partid, book, action in part2book:
1017 1017 if partid == targetid:
1018 1018 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1019 1019 # we should not be called for part we did not generated
1020 1020 assert False
1021 1021
1022 1022 for book, old, new in pushop.outbookmarks:
1023 1023 part = bundler.newpart('pushkey')
1024 1024 part.addparam('namespace', enc('bookmarks'))
1025 1025 part.addparam('key', enc(book))
1026 1026 part.addparam('old', enc(old))
1027 1027 part.addparam('new', enc(new))
1028 1028 action = 'update'
1029 1029 if not old:
1030 1030 action = 'export'
1031 1031 elif not new:
1032 1032 action = 'delete'
1033 1033 part2book.append((part.id, book, action))
1034 1034 pushop.pkfailcb[part.id] = handlefailure
1035 1035
1036 1036 def handlereply(op):
1037 1037 ui = pushop.ui
1038 1038 for partid, book, action in part2book:
1039 1039 partrep = op.records.getreplies(partid)
1040 1040 results = partrep['pushkey']
1041 1041 assert len(results) <= 1
1042 1042 if not results:
1043 1043 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
1044 1044 else:
1045 1045 ret = int(results[0]['return'])
1046 1046 if ret:
1047 1047 ui.status(bookmsgmap[action][0] % book)
1048 1048 else:
1049 1049 ui.warn(bookmsgmap[action][1] % book)
1050 1050 if pushop.bkresult is not None:
1051 1051 pushop.bkresult = 1
1052 1052 return handlereply
1053 1053
1054 1054 @b2partsgenerator('pushvars', idx=0)
1055 1055 def _getbundlesendvars(pushop, bundler):
1056 1056 '''send shellvars via bundle2'''
1057 1057 pushvars = pushop.pushvars
1058 1058 if pushvars:
1059 1059 shellvars = {}
1060 1060 for raw in pushvars:
1061 1061 if '=' not in raw:
1062 1062 msg = ("unable to parse variable '%s', should follow "
1063 1063 "'KEY=VALUE' or 'KEY=' format")
1064 1064 raise error.Abort(msg % raw)
1065 1065 k, v = raw.split('=', 1)
1066 1066 shellvars[k] = v
1067 1067
1068 1068 part = bundler.newpart('pushvars')
1069 1069
1070 1070 for key, value in shellvars.iteritems():
1071 1071 part.addparam(key, value, mandatory=False)
1072 1072
1073 1073 def _pushbundle2(pushop):
1074 1074 """push data to the remote using bundle2
1075 1075
1076 1076 The only currently supported type of data is changegroup but this will
1077 1077 evolve in the future."""
1078 1078 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1079 1079 pushback = (pushop.trmanager
1080 1080 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1081 1081
1082 1082 # create reply capability
1083 1083 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1084 1084 allowpushback=pushback,
1085 1085 role='client'))
1086 1086 bundler.newpart('replycaps', data=capsblob)
1087 1087 replyhandlers = []
1088 1088 for partgenname in b2partsgenorder:
1089 1089 partgen = b2partsgenmapping[partgenname]
1090 1090 ret = partgen(pushop, bundler)
1091 1091 if callable(ret):
1092 1092 replyhandlers.append(ret)
1093 1093 # do not push if nothing to push
1094 1094 if bundler.nbparts <= 1:
1095 1095 return
1096 1096 stream = util.chunkbuffer(bundler.getchunks())
1097 1097 try:
1098 1098 try:
1099 1099 with pushop.remote.commandexecutor() as e:
1100 1100 reply = e.callcommand('unbundle', {
1101 1101 'bundle': stream,
1102 1102 'heads': ['force'],
1103 1103 'url': pushop.remote.url(),
1104 1104 }).result()
1105 1105 except error.BundleValueError as exc:
1106 1106 raise error.Abort(_('missing support for %s') % exc)
1107 1107 try:
1108 1108 trgetter = None
1109 1109 if pushback:
1110 1110 trgetter = pushop.trmanager.transaction
1111 1111 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1112 1112 except error.BundleValueError as exc:
1113 1113 raise error.Abort(_('missing support for %s') % exc)
1114 1114 except bundle2.AbortFromPart as exc:
1115 1115 pushop.ui.status(_('remote: %s\n') % exc)
1116 1116 if exc.hint is not None:
1117 1117 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1118 1118 raise error.Abort(_('push failed on remote'))
1119 1119 except error.PushkeyFailed as exc:
1120 1120 partid = int(exc.partid)
1121 1121 if partid not in pushop.pkfailcb:
1122 1122 raise
1123 1123 pushop.pkfailcb[partid](pushop, exc)
1124 1124 for rephand in replyhandlers:
1125 1125 rephand(op)
1126 1126
1127 1127 def _pushchangeset(pushop):
1128 1128 """Make the actual push of changeset bundle to remote repo"""
1129 1129 if 'changesets' in pushop.stepsdone:
1130 1130 return
1131 1131 pushop.stepsdone.add('changesets')
1132 1132 if not _pushcheckoutgoing(pushop):
1133 1133 return
1134 1134
1135 1135 # Should have verified this in push().
1136 1136 assert pushop.remote.capable('unbundle')
1137 1137
1138 1138 pushop.repo.prepushoutgoinghooks(pushop)
1139 1139 outgoing = pushop.outgoing
1140 1140 # TODO: get bundlecaps from remote
1141 1141 bundlecaps = None
1142 1142 # create a changegroup from local
1143 1143 if pushop.revs is None and not (outgoing.excluded
1144 1144 or pushop.repo.changelog.filteredrevs):
1145 1145 # push everything,
1146 1146 # use the fast path, no race possible on push
1147 1147 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1148 1148 fastpath=True, bundlecaps=bundlecaps)
1149 1149 else:
1150 1150 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1151 1151 'push', bundlecaps=bundlecaps)
1152 1152
1153 1153 # apply changegroup to remote
1154 1154 # local repo finds heads on server, finds out what
1155 1155 # revs it must push. once revs transferred, if server
1156 1156 # finds it has different heads (someone else won
1157 1157 # commit/push race), server aborts.
1158 1158 if pushop.force:
1159 1159 remoteheads = ['force']
1160 1160 else:
1161 1161 remoteheads = pushop.remoteheads
1162 1162 # ssh: return remote's addchangegroup()
1163 1163 # http: return remote's addchangegroup() or 0 for error
1164 1164 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1165 1165 pushop.repo.url())
1166 1166
1167 1167 def _pushsyncphase(pushop):
1168 1168 """synchronise phase information locally and remotely"""
1169 1169 cheads = pushop.commonheads
1170 1170 # even when we don't push, exchanging phase data is useful
1171 1171 remotephases = pushop.remote.listkeys('phases')
1172 1172 if (pushop.ui.configbool('ui', '_usedassubrepo')
1173 1173 and remotephases # server supports phases
1174 1174 and pushop.cgresult is None # nothing was pushed
1175 1175 and remotephases.get('publishing', False)):
1176 1176 # When:
1177 1177 # - this is a subrepo push
1178 1178 # - and remote support phase
1179 1179 # - and no changeset was pushed
1180 1180 # - and remote is publishing
1181 1181 # We may be in issue 3871 case!
1182 1182 # We drop the possible phase synchronisation done by
1183 1183 # courtesy to publish changesets possibly locally draft
1184 1184 # on the remote.
1185 1185 remotephases = {'publishing': 'True'}
1186 1186 if not remotephases: # old server or public only reply from non-publishing
1187 1187 _localphasemove(pushop, cheads)
1188 1188 # don't push any phase data as there is nothing to push
1189 1189 else:
1190 1190 ana = phases.analyzeremotephases(pushop.repo, cheads,
1191 1191 remotephases)
1192 1192 pheads, droots = ana
1193 1193 ### Apply remote phase on local
1194 1194 if remotephases.get('publishing', False):
1195 1195 _localphasemove(pushop, cheads)
1196 1196 else: # publish = False
1197 1197 _localphasemove(pushop, pheads)
1198 1198 _localphasemove(pushop, cheads, phases.draft)
1199 1199 ### Apply local phase on remote
1200 1200
1201 1201 if pushop.cgresult:
1202 1202 if 'phases' in pushop.stepsdone:
1203 1203 # phases already pushed though bundle2
1204 1204 return
1205 1205 outdated = pushop.outdatedphases
1206 1206 else:
1207 1207 outdated = pushop.fallbackoutdatedphases
1208 1208
1209 1209 pushop.stepsdone.add('phases')
1210 1210
1211 1211 # filter heads already turned public by the push
1212 1212 outdated = [c for c in outdated if c.node() not in pheads]
1213 1213 # fallback to independent pushkey command
1214 1214 for newremotehead in outdated:
1215 1215 with pushop.remote.commandexecutor() as e:
1216 1216 r = e.callcommand('pushkey', {
1217 1217 'namespace': 'phases',
1218 1218 'key': newremotehead.hex(),
1219 1219 'old': '%d' % phases.draft,
1220 1220 'new': '%d' % phases.public
1221 1221 }).result()
1222 1222
1223 1223 if not r:
1224 1224 pushop.ui.warn(_('updating %s to public failed!\n')
1225 1225 % newremotehead)
1226 1226
1227 1227 def _localphasemove(pushop, nodes, phase=phases.public):
1228 1228 """move <nodes> to <phase> in the local source repo"""
1229 1229 if pushop.trmanager:
1230 1230 phases.advanceboundary(pushop.repo,
1231 1231 pushop.trmanager.transaction(),
1232 1232 phase,
1233 1233 nodes)
1234 1234 else:
1235 1235 # repo is not locked, do not change any phases!
1236 1236 # Informs the user that phases should have been moved when
1237 1237 # applicable.
1238 1238 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1239 1239 phasestr = phases.phasenames[phase]
1240 1240 if actualmoves:
1241 1241 pushop.ui.status(_('cannot lock source repo, skipping '
1242 1242 'local %s phase update\n') % phasestr)
1243 1243
1244 1244 def _pushobsolete(pushop):
1245 1245 """utility function to push obsolete markers to a remote"""
1246 1246 if 'obsmarkers' in pushop.stepsdone:
1247 1247 return
1248 1248 repo = pushop.repo
1249 1249 remote = pushop.remote
1250 1250 pushop.stepsdone.add('obsmarkers')
1251 1251 if pushop.outobsmarkers:
1252 1252 pushop.ui.debug('try to push obsolete markers to remote\n')
1253 1253 rslts = []
1254 1254 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1255 1255 for key in sorted(remotedata, reverse=True):
1256 1256 # reverse sort to ensure we end with dump0
1257 1257 data = remotedata[key]
1258 1258 rslts.append(remote.pushkey('obsolete', key, '', data))
1259 1259 if [r for r in rslts if not r]:
1260 1260 msg = _('failed to push some obsolete markers!\n')
1261 1261 repo.ui.warn(msg)
1262 1262
1263 1263 def _pushbookmark(pushop):
1264 1264 """Update bookmark position on remote"""
1265 1265 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1266 1266 return
1267 1267 pushop.stepsdone.add('bookmarks')
1268 1268 ui = pushop.ui
1269 1269 remote = pushop.remote
1270 1270
1271 1271 for b, old, new in pushop.outbookmarks:
1272 1272 action = 'update'
1273 1273 if not old:
1274 1274 action = 'export'
1275 1275 elif not new:
1276 1276 action = 'delete'
1277 1277
1278 1278 with remote.commandexecutor() as e:
1279 1279 r = e.callcommand('pushkey', {
1280 1280 'namespace': 'bookmarks',
1281 1281 'key': b,
1282 1282 'old': old,
1283 1283 'new': new,
1284 1284 }).result()
1285 1285
1286 1286 if r:
1287 1287 ui.status(bookmsgmap[action][0] % b)
1288 1288 else:
1289 1289 ui.warn(bookmsgmap[action][1] % b)
1290 1290 # discovery can have set the value form invalid entry
1291 1291 if pushop.bkresult is not None:
1292 1292 pushop.bkresult = 1
1293 1293
1294 1294 class pulloperation(object):
1295 1295 """A object that represent a single pull operation
1296 1296
1297 1297 It purpose is to carry pull related state and very common operation.
1298 1298
1299 1299 A new should be created at the beginning of each pull and discarded
1300 1300 afterward.
1301 1301 """
1302 1302
1303 1303 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1304 1304 remotebookmarks=None, streamclonerequested=None):
1305 1305 # repo we pull into
1306 1306 self.repo = repo
1307 1307 # repo we pull from
1308 1308 self.remote = remote
1309 1309 # revision we try to pull (None is "all")
1310 1310 self.heads = heads
1311 1311 # bookmark pulled explicitly
1312 1312 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1313 1313 for bookmark in bookmarks]
1314 1314 # do we force pull?
1315 1315 self.force = force
1316 1316 # whether a streaming clone was requested
1317 1317 self.streamclonerequested = streamclonerequested
1318 1318 # transaction manager
1319 1319 self.trmanager = None
1320 1320 # set of common changeset between local and remote before pull
1321 1321 self.common = None
1322 1322 # set of pulled head
1323 1323 self.rheads = None
1324 1324 # list of missing changeset to fetch remotely
1325 1325 self.fetch = None
1326 1326 # remote bookmarks data
1327 1327 self.remotebookmarks = remotebookmarks
1328 1328 # result of changegroup pulling (used as return code by pull)
1329 1329 self.cgresult = None
1330 1330 # list of step already done
1331 1331 self.stepsdone = set()
1332 1332 # Whether we attempted a clone from pre-generated bundles.
1333 1333 self.clonebundleattempted = False
1334 1334
1335 1335 @util.propertycache
1336 1336 def pulledsubset(self):
1337 1337 """heads of the set of changeset target by the pull"""
1338 1338 # compute target subset
1339 1339 if self.heads is None:
1340 1340 # We pulled every thing possible
1341 1341 # sync on everything common
1342 1342 c = set(self.common)
1343 1343 ret = list(self.common)
1344 1344 for n in self.rheads:
1345 1345 if n not in c:
1346 1346 ret.append(n)
1347 1347 return ret
1348 1348 else:
1349 1349 # We pulled a specific subset
1350 1350 # sync on this subset
1351 1351 return self.heads
1352 1352
1353 1353 @util.propertycache
1354 1354 def canusebundle2(self):
1355 1355 return not _forcebundle1(self)
1356 1356
1357 1357 @util.propertycache
1358 1358 def remotebundle2caps(self):
1359 1359 return bundle2.bundle2caps(self.remote)
1360 1360
1361 1361 def gettransaction(self):
1362 1362 # deprecated; talk to trmanager directly
1363 1363 return self.trmanager.transaction()
1364 1364
1365 1365 class transactionmanager(util.transactional):
1366 1366 """An object to manage the life cycle of a transaction
1367 1367
1368 1368 It creates the transaction on demand and calls the appropriate hooks when
1369 1369 closing the transaction."""
1370 1370 def __init__(self, repo, source, url):
1371 1371 self.repo = repo
1372 1372 self.source = source
1373 1373 self.url = url
1374 1374 self._tr = None
1375 1375
1376 1376 def transaction(self):
1377 1377 """Return an open transaction object, constructing if necessary"""
1378 1378 if not self._tr:
1379 1379 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1380 1380 self._tr = self.repo.transaction(trname)
1381 1381 self._tr.hookargs['source'] = self.source
1382 1382 self._tr.hookargs['url'] = self.url
1383 1383 return self._tr
1384 1384
1385 1385 def close(self):
1386 1386 """close transaction if created"""
1387 1387 if self._tr is not None:
1388 1388 self._tr.close()
1389 1389
1390 1390 def release(self):
1391 1391 """release transaction if created"""
1392 1392 if self._tr is not None:
1393 1393 self._tr.release()
1394 1394
1395 1395 def _fullpullbundle2(repo, pullop):
1396 1396 # The server may send a partial reply, i.e. when inlining
1397 1397 # pre-computed bundles. In that case, update the common
1398 1398 # set based on the results and pull another bundle.
1399 1399 #
1400 1400 # There are two indicators that the process is finished:
1401 1401 # - no changeset has been added, or
1402 1402 # - all remote heads are known locally.
1403 1403 # The head check must use the unfiltered view as obsoletion
1404 1404 # markers can hide heads.
1405 1405 unfi = repo.unfiltered()
1406 1406 unficl = unfi.changelog
1407 1407 def headsofdiff(h1, h2):
1408 1408 """Returns heads(h1 % h2)"""
1409 1409 res = unfi.set('heads(%ln %% %ln)', h1, h2)
1410 1410 return set(ctx.node() for ctx in res)
1411 1411 def headsofunion(h1, h2):
1412 1412 """Returns heads((h1 + h2) - null)"""
1413 1413 res = unfi.set('heads((%ln + %ln - null))', h1, h2)
1414 1414 return set(ctx.node() for ctx in res)
1415 1415 while True:
1416 1416 old_heads = unficl.heads()
1417 1417 clstart = len(unficl)
1418 1418 _pullbundle2(pullop)
1419 1419 if changegroup.NARROW_REQUIREMENT in repo.requirements:
1420 1420 # XXX narrow clones filter the heads on the server side during
1421 1421 # XXX getbundle and result in partial replies as well.
1422 1422 # XXX Disable pull bundles in this case as band aid to avoid
1423 1423 # XXX extra round trips.
1424 1424 break
1425 1425 if clstart == len(unficl):
1426 1426 break
1427 1427 if all(unficl.hasnode(n) for n in pullop.rheads):
1428 1428 break
1429 1429 new_heads = headsofdiff(unficl.heads(), old_heads)
1430 1430 pullop.common = headsofunion(new_heads, pullop.common)
1431 1431 pullop.rheads = set(pullop.rheads) - pullop.common
1432 1432
1433 1433 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1434 1434 streamclonerequested=None):
1435 1435 """Fetch repository data from a remote.
1436 1436
1437 1437 This is the main function used to retrieve data from a remote repository.
1438 1438
1439 1439 ``repo`` is the local repository to clone into.
1440 1440 ``remote`` is a peer instance.
1441 1441 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1442 1442 default) means to pull everything from the remote.
1443 1443 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1444 1444 default, all remote bookmarks are pulled.
1445 1445 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1446 1446 initialization.
1447 1447 ``streamclonerequested`` is a boolean indicating whether a "streaming
1448 1448 clone" is requested. A "streaming clone" is essentially a raw file copy
1449 1449 of revlogs from the server. This only works when the local repository is
1450 1450 empty. The default value of ``None`` means to respect the server
1451 1451 configuration for preferring stream clones.
1452 1452
1453 1453 Returns the ``pulloperation`` created for this pull.
1454 1454 """
1455 1455 if opargs is None:
1456 1456 opargs = {}
1457 1457 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1458 1458 streamclonerequested=streamclonerequested,
1459 1459 **pycompat.strkwargs(opargs))
1460 1460
1461 1461 peerlocal = pullop.remote.local()
1462 1462 if peerlocal:
1463 1463 missing = set(peerlocal.requirements) - pullop.repo.supported
1464 1464 if missing:
1465 1465 msg = _("required features are not"
1466 1466 " supported in the destination:"
1467 1467 " %s") % (', '.join(sorted(missing)))
1468 1468 raise error.Abort(msg)
1469 1469
1470 1470 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1471 1471 with repo.wlock(), repo.lock(), pullop.trmanager:
1472 1472 # This should ideally be in _pullbundle2(). However, it needs to run
1473 1473 # before discovery to avoid extra work.
1474 1474 _maybeapplyclonebundle(pullop)
1475 1475 streamclone.maybeperformlegacystreamclone(pullop)
1476 1476 _pulldiscovery(pullop)
1477 1477 if pullop.canusebundle2:
1478 1478 _fullpullbundle2(repo, pullop)
1479 1479 _pullchangeset(pullop)
1480 1480 _pullphase(pullop)
1481 1481 _pullbookmarks(pullop)
1482 1482 _pullobsolete(pullop)
1483 1483
1484 1484 # storing remotenames
1485 1485 if repo.ui.configbool('experimental', 'remotenames'):
1486 1486 logexchange.pullremotenames(repo, remote)
1487 1487
1488 1488 return pullop
1489 1489
1490 1490 # list of steps to perform discovery before pull
1491 1491 pulldiscoveryorder = []
1492 1492
1493 1493 # Mapping between step name and function
1494 1494 #
1495 1495 # This exists to help extensions wrap steps if necessary
1496 1496 pulldiscoverymapping = {}
1497 1497
1498 1498 def pulldiscovery(stepname):
1499 1499 """decorator for function performing discovery before pull
1500 1500
1501 1501 The function is added to the step -> function mapping and appended to the
1502 1502 list of steps. Beware that decorated function will be added in order (this
1503 1503 may matter).
1504 1504
1505 1505 You can only use this decorator for a new step, if you want to wrap a step
1506 1506 from an extension, change the pulldiscovery dictionary directly."""
1507 1507 def dec(func):
1508 1508 assert stepname not in pulldiscoverymapping
1509 1509 pulldiscoverymapping[stepname] = func
1510 1510 pulldiscoveryorder.append(stepname)
1511 1511 return func
1512 1512 return dec
1513 1513
1514 1514 def _pulldiscovery(pullop):
1515 1515 """Run all discovery steps"""
1516 1516 for stepname in pulldiscoveryorder:
1517 1517 step = pulldiscoverymapping[stepname]
1518 1518 step(pullop)
1519 1519
1520 1520 @pulldiscovery('b1:bookmarks')
1521 1521 def _pullbookmarkbundle1(pullop):
1522 1522 """fetch bookmark data in bundle1 case
1523 1523
1524 1524 If not using bundle2, we have to fetch bookmarks before changeset
1525 1525 discovery to reduce the chance and impact of race conditions."""
1526 1526 if pullop.remotebookmarks is not None:
1527 1527 return
1528 1528 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1529 1529 # all known bundle2 servers now support listkeys, but lets be nice with
1530 1530 # new implementation.
1531 1531 return
1532 1532 books = pullop.remote.listkeys('bookmarks')
1533 1533 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1534 1534
1535 1535
1536 1536 @pulldiscovery('changegroup')
1537 1537 def _pulldiscoverychangegroup(pullop):
1538 1538 """discovery phase for the pull
1539 1539
1540 1540 Current handle changeset discovery only, will change handle all discovery
1541 1541 at some point."""
1542 1542 tmp = discovery.findcommonincoming(pullop.repo,
1543 1543 pullop.remote,
1544 1544 heads=pullop.heads,
1545 1545 force=pullop.force)
1546 1546 common, fetch, rheads = tmp
1547 1547 nm = pullop.repo.unfiltered().changelog.nodemap
1548 1548 if fetch and rheads:
1549 1549 # If a remote heads is filtered locally, put in back in common.
1550 1550 #
1551 1551 # This is a hackish solution to catch most of "common but locally
1552 1552 # hidden situation". We do not performs discovery on unfiltered
1553 1553 # repository because it end up doing a pathological amount of round
1554 1554 # trip for w huge amount of changeset we do not care about.
1555 1555 #
1556 1556 # If a set of such "common but filtered" changeset exist on the server
1557 1557 # but are not including a remote heads, we'll not be able to detect it,
1558 1558 scommon = set(common)
1559 1559 for n in rheads:
1560 1560 if n in nm:
1561 1561 if n not in scommon:
1562 1562 common.append(n)
1563 1563 if set(rheads).issubset(set(common)):
1564 1564 fetch = []
1565 1565 pullop.common = common
1566 1566 pullop.fetch = fetch
1567 1567 pullop.rheads = rheads
1568 1568
1569 1569 def _pullbundle2(pullop):
1570 1570 """pull data using bundle2
1571 1571
1572 1572 For now, the only supported data are changegroup."""
1573 1573 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1574 1574
1575 1575 # make ui easier to access
1576 1576 ui = pullop.repo.ui
1577 1577
1578 1578 # At the moment we don't do stream clones over bundle2. If that is
1579 1579 # implemented then here's where the check for that will go.
1580 1580 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1581 1581
1582 1582 # declare pull perimeters
1583 1583 kwargs['common'] = pullop.common
1584 1584 kwargs['heads'] = pullop.heads or pullop.rheads
1585 1585
1586 1586 if streaming:
1587 1587 kwargs['cg'] = False
1588 1588 kwargs['stream'] = True
1589 1589 pullop.stepsdone.add('changegroup')
1590 1590 pullop.stepsdone.add('phases')
1591 1591
1592 1592 else:
1593 1593 # pulling changegroup
1594 1594 pullop.stepsdone.add('changegroup')
1595 1595
1596 1596 kwargs['cg'] = pullop.fetch
1597 1597
1598 1598 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1599 1599 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1600 1600 if (not legacyphase and hasbinaryphase):
1601 1601 kwargs['phases'] = True
1602 1602 pullop.stepsdone.add('phases')
1603 1603
1604 1604 if 'listkeys' in pullop.remotebundle2caps:
1605 1605 if 'phases' not in pullop.stepsdone:
1606 1606 kwargs['listkeys'] = ['phases']
1607 1607
1608 1608 bookmarksrequested = False
1609 1609 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1610 1610 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1611 1611
1612 1612 if pullop.remotebookmarks is not None:
1613 1613 pullop.stepsdone.add('request-bookmarks')
1614 1614
1615 1615 if ('request-bookmarks' not in pullop.stepsdone
1616 1616 and pullop.remotebookmarks is None
1617 1617 and not legacybookmark and hasbinarybook):
1618 1618 kwargs['bookmarks'] = True
1619 1619 bookmarksrequested = True
1620 1620
1621 1621 if 'listkeys' in pullop.remotebundle2caps:
1622 1622 if 'request-bookmarks' not in pullop.stepsdone:
1623 1623 # make sure to always includes bookmark data when migrating
1624 1624 # `hg incoming --bundle` to using this function.
1625 1625 pullop.stepsdone.add('request-bookmarks')
1626 1626 kwargs.setdefault('listkeys', []).append('bookmarks')
1627 1627
1628 1628 # If this is a full pull / clone and the server supports the clone bundles
1629 1629 # feature, tell the server whether we attempted a clone bundle. The
1630 1630 # presence of this flag indicates the client supports clone bundles. This
1631 1631 # will enable the server to treat clients that support clone bundles
1632 1632 # differently from those that don't.
1633 1633 if (pullop.remote.capable('clonebundles')
1634 1634 and pullop.heads is None and list(pullop.common) == [nullid]):
1635 1635 kwargs['cbattempted'] = pullop.clonebundleattempted
1636 1636
1637 1637 if streaming:
1638 1638 pullop.repo.ui.status(_('streaming all changes\n'))
1639 1639 elif not pullop.fetch:
1640 1640 pullop.repo.ui.status(_("no changes found\n"))
1641 1641 pullop.cgresult = 0
1642 1642 else:
1643 1643 if pullop.heads is None and list(pullop.common) == [nullid]:
1644 1644 pullop.repo.ui.status(_("requesting all changes\n"))
1645 1645 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1646 1646 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1647 1647 if obsolete.commonversion(remoteversions) is not None:
1648 1648 kwargs['obsmarkers'] = True
1649 1649 pullop.stepsdone.add('obsmarkers')
1650 1650 _pullbundle2extraprepare(pullop, kwargs)
1651 1651
1652 1652 with pullop.remote.commandexecutor() as e:
1653 1653 args = dict(kwargs)
1654 1654 args['source'] = 'pull'
1655 1655 bundle = e.callcommand('getbundle', args).result()
1656 1656
1657 1657 try:
1658 1658 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
1659 1659 source='pull')
1660 1660 op.modes['bookmarks'] = 'records'
1661 1661 bundle2.processbundle(pullop.repo, bundle, op=op)
1662 1662 except bundle2.AbortFromPart as exc:
1663 1663 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1664 1664 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1665 1665 except error.BundleValueError as exc:
1666 1666 raise error.Abort(_('missing support for %s') % exc)
1667 1667
1668 1668 if pullop.fetch:
1669 1669 pullop.cgresult = bundle2.combinechangegroupresults(op)
1670 1670
1671 1671 # processing phases change
1672 1672 for namespace, value in op.records['listkeys']:
1673 1673 if namespace == 'phases':
1674 1674 _pullapplyphases(pullop, value)
1675 1675
1676 1676 # processing bookmark update
1677 1677 if bookmarksrequested:
1678 1678 books = {}
1679 1679 for record in op.records['bookmarks']:
1680 1680 books[record['bookmark']] = record["node"]
1681 1681 pullop.remotebookmarks = books
1682 1682 else:
1683 1683 for namespace, value in op.records['listkeys']:
1684 1684 if namespace == 'bookmarks':
1685 1685 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1686 1686
1687 1687 # bookmark data were either already there or pulled in the bundle
1688 1688 if pullop.remotebookmarks is not None:
1689 1689 _pullbookmarks(pullop)
1690 1690
1691 1691 def _pullbundle2extraprepare(pullop, kwargs):
1692 1692 """hook function so that extensions can extend the getbundle call"""
1693 1693
1694 1694 def _pullchangeset(pullop):
1695 1695 """pull changeset from unbundle into the local repo"""
1696 1696 # We delay the open of the transaction as late as possible so we
1697 1697 # don't open transaction for nothing or you break future useful
1698 1698 # rollback call
1699 1699 if 'changegroup' in pullop.stepsdone:
1700 1700 return
1701 1701 pullop.stepsdone.add('changegroup')
1702 1702 if not pullop.fetch:
1703 1703 pullop.repo.ui.status(_("no changes found\n"))
1704 1704 pullop.cgresult = 0
1705 1705 return
1706 1706 tr = pullop.gettransaction()
1707 1707 if pullop.heads is None and list(pullop.common) == [nullid]:
1708 1708 pullop.repo.ui.status(_("requesting all changes\n"))
1709 1709 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1710 1710 # issue1320, avoid a race if remote changed after discovery
1711 1711 pullop.heads = pullop.rheads
1712 1712
1713 1713 if pullop.remote.capable('getbundle'):
1714 1714 # TODO: get bundlecaps from remote
1715 1715 cg = pullop.remote.getbundle('pull', common=pullop.common,
1716 1716 heads=pullop.heads or pullop.rheads)
1717 1717 elif pullop.heads is None:
1718 1718 with pullop.remote.commandexecutor() as e:
1719 1719 cg = e.callcommand('changegroup', {
1720 1720 'nodes': pullop.fetch,
1721 1721 'source': 'pull',
1722 1722 }).result()
1723 1723
1724 1724 elif not pullop.remote.capable('changegroupsubset'):
1725 1725 raise error.Abort(_("partial pull cannot be done because "
1726 1726 "other repository doesn't support "
1727 1727 "changegroupsubset."))
1728 1728 else:
1729 1729 with pullop.remote.commandexecutor() as e:
1730 1730 cg = e.callcommand('changegroupsubset', {
1731 1731 'bases': pullop.fetch,
1732 1732 'heads': pullop.heads,
1733 1733 'source': 'pull',
1734 1734 }).result()
1735 1735
1736 1736 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1737 1737 pullop.remote.url())
1738 1738 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1739 1739
1740 1740 def _pullphase(pullop):
1741 1741 # Get remote phases data from remote
1742 1742 if 'phases' in pullop.stepsdone:
1743 1743 return
1744 1744 remotephases = pullop.remote.listkeys('phases')
1745 1745 _pullapplyphases(pullop, remotephases)
1746 1746
1747 1747 def _pullapplyphases(pullop, remotephases):
1748 1748 """apply phase movement from observed remote state"""
1749 1749 if 'phases' in pullop.stepsdone:
1750 1750 return
1751 1751 pullop.stepsdone.add('phases')
1752 1752 publishing = bool(remotephases.get('publishing', False))
1753 1753 if remotephases and not publishing:
1754 1754 # remote is new and non-publishing
1755 1755 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1756 1756 pullop.pulledsubset,
1757 1757 remotephases)
1758 1758 dheads = pullop.pulledsubset
1759 1759 else:
1760 1760 # Remote is old or publishing all common changesets
1761 1761 # should be seen as public
1762 1762 pheads = pullop.pulledsubset
1763 1763 dheads = []
1764 1764 unfi = pullop.repo.unfiltered()
1765 1765 phase = unfi._phasecache.phase
1766 1766 rev = unfi.changelog.nodemap.get
1767 1767 public = phases.public
1768 1768 draft = phases.draft
1769 1769
1770 1770 # exclude changesets already public locally and update the others
1771 1771 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1772 1772 if pheads:
1773 1773 tr = pullop.gettransaction()
1774 1774 phases.advanceboundary(pullop.repo, tr, public, pheads)
1775 1775
1776 1776 # exclude changesets already draft locally and update the others
1777 1777 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1778 1778 if dheads:
1779 1779 tr = pullop.gettransaction()
1780 1780 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1781 1781
1782 1782 def _pullbookmarks(pullop):
1783 1783 """process the remote bookmark information to update the local one"""
1784 1784 if 'bookmarks' in pullop.stepsdone:
1785 1785 return
1786 1786 pullop.stepsdone.add('bookmarks')
1787 1787 repo = pullop.repo
1788 1788 remotebookmarks = pullop.remotebookmarks
1789 1789 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1790 1790 pullop.remote.url(),
1791 1791 pullop.gettransaction,
1792 1792 explicit=pullop.explicitbookmarks)
1793 1793
1794 1794 def _pullobsolete(pullop):
1795 1795 """utility function to pull obsolete markers from a remote
1796 1796
1797 1797 The `gettransaction` is function that return the pull transaction, creating
1798 1798 one if necessary. We return the transaction to inform the calling code that
1799 1799 a new transaction have been created (when applicable).
1800 1800
1801 1801 Exists mostly to allow overriding for experimentation purpose"""
1802 1802 if 'obsmarkers' in pullop.stepsdone:
1803 1803 return
1804 1804 pullop.stepsdone.add('obsmarkers')
1805 1805 tr = None
1806 1806 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1807 1807 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1808 1808 remoteobs = pullop.remote.listkeys('obsolete')
1809 1809 if 'dump0' in remoteobs:
1810 1810 tr = pullop.gettransaction()
1811 1811 markers = []
1812 1812 for key in sorted(remoteobs, reverse=True):
1813 1813 if key.startswith('dump'):
1814 1814 data = util.b85decode(remoteobs[key])
1815 1815 version, newmarks = obsolete._readmarkers(data)
1816 1816 markers += newmarks
1817 1817 if markers:
1818 1818 pullop.repo.obsstore.add(tr, markers)
1819 1819 pullop.repo.invalidatevolatilesets()
1820 1820 return tr
1821 1821
1822 1822 def caps20to10(repo, role):
1823 1823 """return a set with appropriate options to use bundle20 during getbundle"""
1824 1824 caps = {'HG20'}
1825 1825 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
1826 1826 caps.add('bundle2=' + urlreq.quote(capsblob))
1827 1827 return caps
1828 1828
1829 1829 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1830 1830 getbundle2partsorder = []
1831 1831
1832 1832 # Mapping between step name and function
1833 1833 #
1834 1834 # This exists to help extensions wrap steps if necessary
1835 1835 getbundle2partsmapping = {}
1836 1836
1837 1837 def getbundle2partsgenerator(stepname, idx=None):
1838 1838 """decorator for function generating bundle2 part for getbundle
1839 1839
1840 1840 The function is added to the step -> function mapping and appended to the
1841 1841 list of steps. Beware that decorated functions will be added in order
1842 1842 (this may matter).
1843 1843
1844 1844 You can only use this decorator for new steps, if you want to wrap a step
1845 1845 from an extension, attack the getbundle2partsmapping dictionary directly."""
1846 1846 def dec(func):
1847 1847 assert stepname not in getbundle2partsmapping
1848 1848 getbundle2partsmapping[stepname] = func
1849 1849 if idx is None:
1850 1850 getbundle2partsorder.append(stepname)
1851 1851 else:
1852 1852 getbundle2partsorder.insert(idx, stepname)
1853 1853 return func
1854 1854 return dec
1855 1855
1856 1856 def bundle2requested(bundlecaps):
1857 1857 if bundlecaps is not None:
1858 1858 return any(cap.startswith('HG2') for cap in bundlecaps)
1859 1859 return False
1860 1860
1861 1861 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1862 1862 **kwargs):
1863 1863 """Return chunks constituting a bundle's raw data.
1864 1864
1865 1865 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1866 1866 passed.
1867 1867
1868 1868 Returns a 2-tuple of a dict with metadata about the generated bundle
1869 1869 and an iterator over raw chunks (of varying sizes).
1870 1870 """
1871 1871 kwargs = pycompat.byteskwargs(kwargs)
1872 1872 info = {}
1873 1873 usebundle2 = bundle2requested(bundlecaps)
1874 1874 # bundle10 case
1875 1875 if not usebundle2:
1876 1876 if bundlecaps and not kwargs.get('cg', True):
1877 1877 raise ValueError(_('request for bundle10 must include changegroup'))
1878 1878
1879 1879 if kwargs:
1880 1880 raise ValueError(_('unsupported getbundle arguments: %s')
1881 1881 % ', '.join(sorted(kwargs.keys())))
1882 1882 outgoing = _computeoutgoing(repo, heads, common)
1883 1883 info['bundleversion'] = 1
1884 1884 return info, changegroup.makestream(repo, outgoing, '01', source,
1885 1885 bundlecaps=bundlecaps)
1886 1886
1887 1887 # bundle20 case
1888 1888 info['bundleversion'] = 2
1889 1889 b2caps = {}
1890 1890 for bcaps in bundlecaps:
1891 1891 if bcaps.startswith('bundle2='):
1892 1892 blob = urlreq.unquote(bcaps[len('bundle2='):])
1893 1893 b2caps.update(bundle2.decodecaps(blob))
1894 1894 bundler = bundle2.bundle20(repo.ui, b2caps)
1895 1895
1896 1896 kwargs['heads'] = heads
1897 1897 kwargs['common'] = common
1898 1898
1899 1899 for name in getbundle2partsorder:
1900 1900 func = getbundle2partsmapping[name]
1901 1901 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1902 1902 **pycompat.strkwargs(kwargs))
1903 1903
1904 1904 info['prefercompressed'] = bundler.prefercompressed
1905 1905
1906 1906 return info, bundler.getchunks()
1907 1907
1908 1908 @getbundle2partsgenerator('stream2')
1909 1909 def _getbundlestream2(bundler, repo, *args, **kwargs):
1910 1910 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
1911 1911
1912 1912 @getbundle2partsgenerator('changegroup')
1913 1913 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1914 1914 b2caps=None, heads=None, common=None, **kwargs):
1915 1915 """add a changegroup part to the requested bundle"""
1916 1916 cgstream = None
1917 1917 if kwargs.get(r'cg', True):
1918 1918 # build changegroup bundle here.
1919 1919 version = '01'
1920 1920 cgversions = b2caps.get('changegroup')
1921 1921 if cgversions: # 3.1 and 3.2 ship with an empty value
1922 1922 cgversions = [v for v in cgversions
1923 1923 if v in changegroup.supportedoutgoingversions(repo)]
1924 1924 if not cgversions:
1925 1925 raise ValueError(_('no common changegroup version'))
1926 1926 version = max(cgversions)
1927 1927 outgoing = _computeoutgoing(repo, heads, common)
1928 1928 if outgoing.missing:
1929 1929 cgstream = changegroup.makestream(repo, outgoing, version, source,
1930 1930 bundlecaps=bundlecaps)
1931 1931
1932 1932 if cgstream:
1933 1933 part = bundler.newpart('changegroup', data=cgstream)
1934 1934 if cgversions:
1935 1935 part.addparam('version', version)
1936 1936 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1937 1937 mandatory=False)
1938 1938 if 'treemanifest' in repo.requirements:
1939 1939 part.addparam('treemanifest', '1')
1940 1940
1941 1941 @getbundle2partsgenerator('bookmarks')
1942 1942 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
1943 1943 b2caps=None, **kwargs):
1944 1944 """add a bookmark part to the requested bundle"""
1945 1945 if not kwargs.get(r'bookmarks', False):
1946 1946 return
1947 1947 if 'bookmarks' not in b2caps:
1948 1948 raise ValueError(_('no common bookmarks exchange method'))
1949 1949 books = bookmod.listbinbookmarks(repo)
1950 1950 data = bookmod.binaryencode(books)
1951 1951 if data:
1952 1952 bundler.newpart('bookmarks', data=data)
1953 1953
1954 1954 @getbundle2partsgenerator('listkeys')
1955 1955 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1956 1956 b2caps=None, **kwargs):
1957 1957 """add parts containing listkeys namespaces to the requested bundle"""
1958 1958 listkeys = kwargs.get(r'listkeys', ())
1959 1959 for namespace in listkeys:
1960 1960 part = bundler.newpart('listkeys')
1961 1961 part.addparam('namespace', namespace)
1962 1962 keys = repo.listkeys(namespace).items()
1963 1963 part.data = pushkey.encodekeys(keys)
1964 1964
1965 1965 @getbundle2partsgenerator('obsmarkers')
1966 1966 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1967 1967 b2caps=None, heads=None, **kwargs):
1968 1968 """add an obsolescence markers part to the requested bundle"""
1969 1969 if kwargs.get(r'obsmarkers', False):
1970 1970 if heads is None:
1971 1971 heads = repo.heads()
1972 1972 subset = [c.node() for c in repo.set('::%ln', heads)]
1973 1973 markers = repo.obsstore.relevantmarkers(subset)
1974 1974 markers = sorted(markers)
1975 1975 bundle2.buildobsmarkerspart(bundler, markers)
1976 1976
1977 1977 @getbundle2partsgenerator('phases')
1978 1978 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1979 1979 b2caps=None, heads=None, **kwargs):
1980 1980 """add phase heads part to the requested bundle"""
1981 1981 if kwargs.get(r'phases', False):
1982 1982 if not 'heads' in b2caps.get('phases'):
1983 1983 raise ValueError(_('no common phases exchange method'))
1984 1984 if heads is None:
1985 1985 heads = repo.heads()
1986 1986
1987 1987 headsbyphase = collections.defaultdict(set)
1988 1988 if repo.publishing():
1989 1989 headsbyphase[phases.public] = heads
1990 1990 else:
1991 1991 # find the appropriate heads to move
1992 1992
1993 1993 phase = repo._phasecache.phase
1994 1994 node = repo.changelog.node
1995 1995 rev = repo.changelog.rev
1996 1996 for h in heads:
1997 1997 headsbyphase[phase(repo, rev(h))].add(h)
1998 1998 seenphases = list(headsbyphase.keys())
1999 1999
2000 2000 # We do not handle anything but public and draft phase for now)
2001 2001 if seenphases:
2002 2002 assert max(seenphases) <= phases.draft
2003 2003
2004 2004 # if client is pulling non-public changesets, we need to find
2005 2005 # intermediate public heads.
2006 2006 draftheads = headsbyphase.get(phases.draft, set())
2007 2007 if draftheads:
2008 2008 publicheads = headsbyphase.get(phases.public, set())
2009 2009
2010 2010 revset = 'heads(only(%ln, %ln) and public())'
2011 2011 extraheads = repo.revs(revset, draftheads, publicheads)
2012 2012 for r in extraheads:
2013 2013 headsbyphase[phases.public].add(node(r))
2014 2014
2015 2015 # transform data in a format used by the encoding function
2016 2016 phasemapping = []
2017 2017 for phase in phases.allphases:
2018 2018 phasemapping.append(sorted(headsbyphase[phase]))
2019 2019
2020 2020 # generate the actual part
2021 2021 phasedata = phases.binaryencode(phasemapping)
2022 2022 bundler.newpart('phase-heads', data=phasedata)
2023 2023
2024 2024 @getbundle2partsgenerator('hgtagsfnodes')
2025 2025 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
2026 2026 b2caps=None, heads=None, common=None,
2027 2027 **kwargs):
2028 2028 """Transfer the .hgtags filenodes mapping.
2029 2029
2030 2030 Only values for heads in this bundle will be transferred.
2031 2031
2032 2032 The part data consists of pairs of 20 byte changeset node and .hgtags
2033 2033 filenodes raw values.
2034 2034 """
2035 2035 # Don't send unless:
2036 2036 # - changeset are being exchanged,
2037 2037 # - the client supports it.
2038 2038 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
2039 2039 return
2040 2040
2041 2041 outgoing = _computeoutgoing(repo, heads, common)
2042 2042 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2043 2043
2044 2044 @getbundle2partsgenerator('cache:rev-branch-cache')
2045 2045 def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
2046 2046 b2caps=None, heads=None, common=None,
2047 2047 **kwargs):
2048 2048 """Transfer the rev-branch-cache mapping
2049 2049
2050 2050 The payload is a series of data related to each branch
2051 2051
2052 2052 1) branch name length
2053 2053 2) number of open heads
2054 2054 3) number of closed heads
2055 2055 4) open heads nodes
2056 2056 5) closed heads nodes
2057 2057 """
2058 2058 # Don't send unless:
2059 2059 # - changeset are being exchanged,
2060 2060 # - the client supports it.
2061 2061 if not (kwargs.get(r'cg', True)) or 'rev-branch-cache' not in b2caps:
2062 2062 return
2063 2063 outgoing = _computeoutgoing(repo, heads, common)
2064 2064 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2065 2065
2066 2066 def check_heads(repo, their_heads, context):
2067 2067 """check if the heads of a repo have been modified
2068 2068
2069 2069 Used by peer for unbundling.
2070 2070 """
2071 2071 heads = repo.heads()
2072 2072 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
2073 2073 if not (their_heads == ['force'] or their_heads == heads or
2074 2074 their_heads == ['hashed', heads_hash]):
2075 2075 # someone else committed/pushed/unbundled while we
2076 2076 # were transferring data
2077 2077 raise error.PushRaced('repository changed while %s - '
2078 2078 'please try again' % context)
2079 2079
2080 2080 def unbundle(repo, cg, heads, source, url):
2081 2081 """Apply a bundle to a repo.
2082 2082
2083 2083 this function makes sure the repo is locked during the application and have
2084 2084 mechanism to check that no push race occurred between the creation of the
2085 2085 bundle and its application.
2086 2086
2087 2087 If the push was raced as PushRaced exception is raised."""
2088 2088 r = 0
2089 2089 # need a transaction when processing a bundle2 stream
2090 2090 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2091 2091 lockandtr = [None, None, None]
2092 2092 recordout = None
2093 2093 # quick fix for output mismatch with bundle2 in 3.4
2094 2094 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
2095 2095 if url.startswith('remote:http:') or url.startswith('remote:https:'):
2096 2096 captureoutput = True
2097 2097 try:
2098 2098 # note: outside bundle1, 'heads' is expected to be empty and this
2099 2099 # 'check_heads' call wil be a no-op
2100 2100 check_heads(repo, heads, 'uploading changes')
2101 2101 # push can proceed
2102 2102 if not isinstance(cg, bundle2.unbundle20):
2103 2103 # legacy case: bundle1 (changegroup 01)
2104 2104 txnname = "\n".join([source, util.hidepassword(url)])
2105 2105 with repo.lock(), repo.transaction(txnname) as tr:
2106 2106 op = bundle2.applybundle(repo, cg, tr, source, url)
2107 2107 r = bundle2.combinechangegroupresults(op)
2108 2108 else:
2109 2109 r = None
2110 2110 try:
2111 2111 def gettransaction():
2112 2112 if not lockandtr[2]:
2113 2113 lockandtr[0] = repo.wlock()
2114 2114 lockandtr[1] = repo.lock()
2115 2115 lockandtr[2] = repo.transaction(source)
2116 2116 lockandtr[2].hookargs['source'] = source
2117 2117 lockandtr[2].hookargs['url'] = url
2118 2118 lockandtr[2].hookargs['bundle2'] = '1'
2119 2119 return lockandtr[2]
2120 2120
2121 2121 # Do greedy locking by default until we're satisfied with lazy
2122 2122 # locking.
2123 2123 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
2124 2124 gettransaction()
2125 2125
2126 2126 op = bundle2.bundleoperation(repo, gettransaction,
2127 2127 captureoutput=captureoutput,
2128 2128 source='push')
2129 2129 try:
2130 2130 op = bundle2.processbundle(repo, cg, op=op)
2131 2131 finally:
2132 2132 r = op.reply
2133 2133 if captureoutput and r is not None:
2134 2134 repo.ui.pushbuffer(error=True, subproc=True)
2135 2135 def recordout(output):
2136 2136 r.newpart('output', data=output, mandatory=False)
2137 2137 if lockandtr[2] is not None:
2138 2138 lockandtr[2].close()
2139 2139 except BaseException as exc:
2140 2140 exc.duringunbundle2 = True
2141 2141 if captureoutput and r is not None:
2142 2142 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2143 2143 def recordout(output):
2144 2144 part = bundle2.bundlepart('output', data=output,
2145 2145 mandatory=False)
2146 2146 parts.append(part)
2147 2147 raise
2148 2148 finally:
2149 2149 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2150 2150 if recordout is not None:
2151 2151 recordout(repo.ui.popbuffer())
2152 2152 return r
2153 2153
2154 2154 def _maybeapplyclonebundle(pullop):
2155 2155 """Apply a clone bundle from a remote, if possible."""
2156 2156
2157 2157 repo = pullop.repo
2158 2158 remote = pullop.remote
2159 2159
2160 2160 if not repo.ui.configbool('ui', 'clonebundles'):
2161 2161 return
2162 2162
2163 2163 # Only run if local repo is empty.
2164 2164 if len(repo):
2165 2165 return
2166 2166
2167 2167 if pullop.heads:
2168 2168 return
2169 2169
2170 2170 if not remote.capable('clonebundles'):
2171 2171 return
2172 2172
2173 2173 with remote.commandexecutor() as e:
2174 2174 res = e.callcommand('clonebundles', {}).result()
2175 2175
2176 2176 # If we call the wire protocol command, that's good enough to record the
2177 2177 # attempt.
2178 2178 pullop.clonebundleattempted = True
2179 2179
2180 2180 entries = parseclonebundlesmanifest(repo, res)
2181 2181 if not entries:
2182 2182 repo.ui.note(_('no clone bundles available on remote; '
2183 2183 'falling back to regular clone\n'))
2184 2184 return
2185 2185
2186 2186 entries = filterclonebundleentries(
2187 2187 repo, entries, streamclonerequested=pullop.streamclonerequested)
2188 2188
2189 2189 if not entries:
2190 2190 # There is a thundering herd concern here. However, if a server
2191 2191 # operator doesn't advertise bundles appropriate for its clients,
2192 2192 # they deserve what's coming. Furthermore, from a client's
2193 2193 # perspective, no automatic fallback would mean not being able to
2194 2194 # clone!
2195 2195 repo.ui.warn(_('no compatible clone bundles available on server; '
2196 2196 'falling back to regular clone\n'))
2197 2197 repo.ui.warn(_('(you may want to report this to the server '
2198 2198 'operator)\n'))
2199 2199 return
2200 2200
2201 2201 entries = sortclonebundleentries(repo.ui, entries)
2202 2202
2203 2203 url = entries[0]['URL']
2204 2204 repo.ui.status(_('applying clone bundle from %s\n') % url)
2205 2205 if trypullbundlefromurl(repo.ui, repo, url):
2206 2206 repo.ui.status(_('finished applying clone bundle\n'))
2207 2207 # Bundle failed.
2208 2208 #
2209 2209 # We abort by default to avoid the thundering herd of
2210 2210 # clients flooding a server that was expecting expensive
2211 2211 # clone load to be offloaded.
2212 2212 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2213 2213 repo.ui.warn(_('falling back to normal clone\n'))
2214 2214 else:
2215 2215 raise error.Abort(_('error applying bundle'),
2216 2216 hint=_('if this error persists, consider contacting '
2217 2217 'the server operator or disable clone '
2218 2218 'bundles via '
2219 2219 '"--config ui.clonebundles=false"'))
2220 2220
2221 2221 def parseclonebundlesmanifest(repo, s):
2222 2222 """Parses the raw text of a clone bundles manifest.
2223 2223
2224 2224 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2225 2225 to the URL and other keys are the attributes for the entry.
2226 2226 """
2227 2227 m = []
2228 2228 for line in s.splitlines():
2229 2229 fields = line.split()
2230 2230 if not fields:
2231 2231 continue
2232 2232 attrs = {'URL': fields[0]}
2233 2233 for rawattr in fields[1:]:
2234 2234 key, value = rawattr.split('=', 1)
2235 2235 key = urlreq.unquote(key)
2236 2236 value = urlreq.unquote(value)
2237 2237 attrs[key] = value
2238 2238
2239 2239 # Parse BUNDLESPEC into components. This makes client-side
2240 2240 # preferences easier to specify since you can prefer a single
2241 2241 # component of the BUNDLESPEC.
2242 2242 if key == 'BUNDLESPEC':
2243 2243 try:
2244 2244 bundlespec = parsebundlespec(repo, value,
2245 2245 externalnames=True)
2246 2246 attrs['COMPRESSION'] = bundlespec.compression
2247 2247 attrs['VERSION'] = bundlespec.version
2248 2248 except error.InvalidBundleSpecification:
2249 2249 pass
2250 2250 except error.UnsupportedBundleSpecification:
2251 2251 pass
2252 2252
2253 2253 m.append(attrs)
2254 2254
2255 2255 return m
2256 2256
2257 2257 def isstreamclonespec(bundlespec):
2258 2258 # Stream clone v1
2259 2259 if (bundlespec.compression == 'UN' and bundlespec.version == 's1'):
2260 2260 return True
2261 2261
2262 2262 # Stream clone v2
2263 2263 if (bundlespec.compression == 'UN' and bundlespec.version == '02' and \
2264 2264 bundlespec.contentopts.get('streamv2')):
2265 2265 return True
2266 2266
2267 2267 return False
2268 2268
2269 2269 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2270 2270 """Remove incompatible clone bundle manifest entries.
2271 2271
2272 2272 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2273 2273 and returns a new list consisting of only the entries that this client
2274 2274 should be able to apply.
2275 2275
2276 2276 There is no guarantee we'll be able to apply all returned entries because
2277 2277 the metadata we use to filter on may be missing or wrong.
2278 2278 """
2279 2279 newentries = []
2280 2280 for entry in entries:
2281 2281 spec = entry.get('BUNDLESPEC')
2282 2282 if spec:
2283 2283 try:
2284 2284 bundlespec = parsebundlespec(repo, spec, strict=True)
2285 2285
2286 2286 # If a stream clone was requested, filter out non-streamclone
2287 2287 # entries.
2288 2288 if streamclonerequested and not isstreamclonespec(bundlespec):
2289 2289 repo.ui.debug('filtering %s because not a stream clone\n' %
2290 2290 entry['URL'])
2291 2291 continue
2292 2292
2293 2293 except error.InvalidBundleSpecification as e:
2294 repo.ui.debug(str(e) + '\n')
2294 repo.ui.debug(stringutil.forcebytestr(e) + '\n')
2295 2295 continue
2296 2296 except error.UnsupportedBundleSpecification as e:
2297 2297 repo.ui.debug('filtering %s because unsupported bundle '
2298 2298 'spec: %s\n' % (
2299 2299 entry['URL'], stringutil.forcebytestr(e)))
2300 2300 continue
2301 2301 # If we don't have a spec and requested a stream clone, we don't know
2302 2302 # what the entry is so don't attempt to apply it.
2303 2303 elif streamclonerequested:
2304 2304 repo.ui.debug('filtering %s because cannot determine if a stream '
2305 2305 'clone bundle\n' % entry['URL'])
2306 2306 continue
2307 2307
2308 2308 if 'REQUIRESNI' in entry and not sslutil.hassni:
2309 2309 repo.ui.debug('filtering %s because SNI not supported\n' %
2310 2310 entry['URL'])
2311 2311 continue
2312 2312
2313 2313 newentries.append(entry)
2314 2314
2315 2315 return newentries
2316 2316
2317 2317 class clonebundleentry(object):
2318 2318 """Represents an item in a clone bundles manifest.
2319 2319
2320 2320 This rich class is needed to support sorting since sorted() in Python 3
2321 2321 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2322 2322 won't work.
2323 2323 """
2324 2324
2325 2325 def __init__(self, value, prefers):
2326 2326 self.value = value
2327 2327 self.prefers = prefers
2328 2328
2329 2329 def _cmp(self, other):
2330 2330 for prefkey, prefvalue in self.prefers:
2331 2331 avalue = self.value.get(prefkey)
2332 2332 bvalue = other.value.get(prefkey)
2333 2333
2334 2334 # Special case for b missing attribute and a matches exactly.
2335 2335 if avalue is not None and bvalue is None and avalue == prefvalue:
2336 2336 return -1
2337 2337
2338 2338 # Special case for a missing attribute and b matches exactly.
2339 2339 if bvalue is not None and avalue is None and bvalue == prefvalue:
2340 2340 return 1
2341 2341
2342 2342 # We can't compare unless attribute present on both.
2343 2343 if avalue is None or bvalue is None:
2344 2344 continue
2345 2345
2346 2346 # Same values should fall back to next attribute.
2347 2347 if avalue == bvalue:
2348 2348 continue
2349 2349
2350 2350 # Exact matches come first.
2351 2351 if avalue == prefvalue:
2352 2352 return -1
2353 2353 if bvalue == prefvalue:
2354 2354 return 1
2355 2355
2356 2356 # Fall back to next attribute.
2357 2357 continue
2358 2358
2359 2359 # If we got here we couldn't sort by attributes and prefers. Fall
2360 2360 # back to index order.
2361 2361 return 0
2362 2362
2363 2363 def __lt__(self, other):
2364 2364 return self._cmp(other) < 0
2365 2365
2366 2366 def __gt__(self, other):
2367 2367 return self._cmp(other) > 0
2368 2368
2369 2369 def __eq__(self, other):
2370 2370 return self._cmp(other) == 0
2371 2371
2372 2372 def __le__(self, other):
2373 2373 return self._cmp(other) <= 0
2374 2374
2375 2375 def __ge__(self, other):
2376 2376 return self._cmp(other) >= 0
2377 2377
2378 2378 def __ne__(self, other):
2379 2379 return self._cmp(other) != 0
2380 2380
2381 2381 def sortclonebundleentries(ui, entries):
2382 2382 prefers = ui.configlist('ui', 'clonebundleprefers')
2383 2383 if not prefers:
2384 2384 return list(entries)
2385 2385
2386 2386 prefers = [p.split('=', 1) for p in prefers]
2387 2387
2388 2388 items = sorted(clonebundleentry(v, prefers) for v in entries)
2389 2389 return [i.value for i in items]
2390 2390
2391 2391 def trypullbundlefromurl(ui, repo, url):
2392 2392 """Attempt to apply a bundle from a URL."""
2393 2393 with repo.lock(), repo.transaction('bundleurl') as tr:
2394 2394 try:
2395 2395 fh = urlmod.open(ui, url)
2396 2396 cg = readbundle(ui, fh, 'stream')
2397 2397
2398 2398 if isinstance(cg, streamclone.streamcloneapplier):
2399 2399 cg.apply(repo)
2400 2400 else:
2401 2401 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2402 2402 return True
2403 2403 except urlerr.httperror as e:
2404 2404 ui.warn(_('HTTP error fetching bundle: %s\n') %
2405 2405 stringutil.forcebytestr(e))
2406 2406 except urlerr.urlerror as e:
2407 2407 ui.warn(_('error fetching bundle: %s\n') %
2408 2408 stringutil.forcebytestr(e.reason))
2409 2409
2410 2410 return False
General Comments 0
You need to be logged in to leave comments. Login now