##// END OF EJS Templates
exchange: use command executor interface for calling listkeys...
Gregory Szorc -
r37775:2a8ad00b default
parent child Browse files
Show More
@@ -1,2410 +1,2422 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import errno
12 12 import hashlib
13 13
14 14 from .i18n import _
15 15 from .node import (
16 16 bin,
17 17 hex,
18 18 nullid,
19 19 )
20 20 from .thirdparty import (
21 21 attr,
22 22 )
23 23 from . import (
24 24 bookmarks as bookmod,
25 25 bundle2,
26 26 changegroup,
27 27 discovery,
28 28 error,
29 29 lock as lockmod,
30 30 logexchange,
31 31 obsolete,
32 32 phases,
33 33 pushkey,
34 34 pycompat,
35 35 scmutil,
36 36 sslutil,
37 37 streamclone,
38 38 url as urlmod,
39 39 util,
40 40 )
41 41 from .utils import (
42 42 stringutil,
43 43 )
44 44
45 45 urlerr = util.urlerr
46 46 urlreq = util.urlreq
47 47
48 48 # Maps bundle version human names to changegroup versions.
49 49 _bundlespeccgversions = {'v1': '01',
50 50 'v2': '02',
51 51 'packed1': 's1',
52 52 'bundle2': '02', #legacy
53 53 }
54 54
55 55 # Maps bundle version with content opts to choose which part to bundle
56 56 _bundlespeccontentopts = {
57 57 'v1': {
58 58 'changegroup': True,
59 59 'cg.version': '01',
60 60 'obsolescence': False,
61 61 'phases': False,
62 62 'tagsfnodescache': False,
63 63 'revbranchcache': False
64 64 },
65 65 'v2': {
66 66 'changegroup': True,
67 67 'cg.version': '02',
68 68 'obsolescence': False,
69 69 'phases': False,
70 70 'tagsfnodescache': True,
71 71 'revbranchcache': True
72 72 },
73 73 'packed1' : {
74 74 'cg.version': 's1'
75 75 }
76 76 }
77 77 _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
78 78
79 79 _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
80 80 "tagsfnodescache": False,
81 81 "revbranchcache": False}}
82 82
83 83 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
84 84 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
85 85
86 86 @attr.s
87 87 class bundlespec(object):
88 88 compression = attr.ib()
89 89 version = attr.ib()
90 90 params = attr.ib()
91 91 contentopts = attr.ib()
92 92
93 93 def parsebundlespec(repo, spec, strict=True, externalnames=False):
94 94 """Parse a bundle string specification into parts.
95 95
96 96 Bundle specifications denote a well-defined bundle/exchange format.
97 97 The content of a given specification should not change over time in
98 98 order to ensure that bundles produced by a newer version of Mercurial are
99 99 readable from an older version.
100 100
101 101 The string currently has the form:
102 102
103 103 <compression>-<type>[;<parameter0>[;<parameter1>]]
104 104
105 105 Where <compression> is one of the supported compression formats
106 106 and <type> is (currently) a version string. A ";" can follow the type and
107 107 all text afterwards is interpreted as URI encoded, ";" delimited key=value
108 108 pairs.
109 109
110 110 If ``strict`` is True (the default) <compression> is required. Otherwise,
111 111 it is optional.
112 112
113 113 If ``externalnames`` is False (the default), the human-centric names will
114 114 be converted to their internal representation.
115 115
116 116 Returns a bundlespec object of (compression, version, parameters).
117 117 Compression will be ``None`` if not in strict mode and a compression isn't
118 118 defined.
119 119
120 120 An ``InvalidBundleSpecification`` is raised when the specification is
121 121 not syntactically well formed.
122 122
123 123 An ``UnsupportedBundleSpecification`` is raised when the compression or
124 124 bundle type/version is not recognized.
125 125
126 126 Note: this function will likely eventually return a more complex data
127 127 structure, including bundle2 part information.
128 128 """
129 129 def parseparams(s):
130 130 if ';' not in s:
131 131 return s, {}
132 132
133 133 params = {}
134 134 version, paramstr = s.split(';', 1)
135 135
136 136 for p in paramstr.split(';'):
137 137 if '=' not in p:
138 138 raise error.InvalidBundleSpecification(
139 139 _('invalid bundle specification: '
140 140 'missing "=" in parameter: %s') % p)
141 141
142 142 key, value = p.split('=', 1)
143 143 key = urlreq.unquote(key)
144 144 value = urlreq.unquote(value)
145 145 params[key] = value
146 146
147 147 return version, params
148 148
149 149
150 150 if strict and '-' not in spec:
151 151 raise error.InvalidBundleSpecification(
152 152 _('invalid bundle specification; '
153 153 'must be prefixed with compression: %s') % spec)
154 154
155 155 if '-' in spec:
156 156 compression, version = spec.split('-', 1)
157 157
158 158 if compression not in util.compengines.supportedbundlenames:
159 159 raise error.UnsupportedBundleSpecification(
160 160 _('%s compression is not supported') % compression)
161 161
162 162 version, params = parseparams(version)
163 163
164 164 if version not in _bundlespeccgversions:
165 165 raise error.UnsupportedBundleSpecification(
166 166 _('%s is not a recognized bundle version') % version)
167 167 else:
168 168 # Value could be just the compression or just the version, in which
169 169 # case some defaults are assumed (but only when not in strict mode).
170 170 assert not strict
171 171
172 172 spec, params = parseparams(spec)
173 173
174 174 if spec in util.compengines.supportedbundlenames:
175 175 compression = spec
176 176 version = 'v1'
177 177 # Generaldelta repos require v2.
178 178 if 'generaldelta' in repo.requirements:
179 179 version = 'v2'
180 180 # Modern compression engines require v2.
181 181 if compression not in _bundlespecv1compengines:
182 182 version = 'v2'
183 183 elif spec in _bundlespeccgversions:
184 184 if spec == 'packed1':
185 185 compression = 'none'
186 186 else:
187 187 compression = 'bzip2'
188 188 version = spec
189 189 else:
190 190 raise error.UnsupportedBundleSpecification(
191 191 _('%s is not a recognized bundle specification') % spec)
192 192
193 193 # Bundle version 1 only supports a known set of compression engines.
194 194 if version == 'v1' and compression not in _bundlespecv1compengines:
195 195 raise error.UnsupportedBundleSpecification(
196 196 _('compression engine %s is not supported on v1 bundles') %
197 197 compression)
198 198
199 199 # The specification for packed1 can optionally declare the data formats
200 200 # required to apply it. If we see this metadata, compare against what the
201 201 # repo supports and error if the bundle isn't compatible.
202 202 if version == 'packed1' and 'requirements' in params:
203 203 requirements = set(params['requirements'].split(','))
204 204 missingreqs = requirements - repo.supportedformats
205 205 if missingreqs:
206 206 raise error.UnsupportedBundleSpecification(
207 207 _('missing support for repository features: %s') %
208 208 ', '.join(sorted(missingreqs)))
209 209
210 210 # Compute contentopts based on the version
211 211 contentopts = _bundlespeccontentopts.get(version, {}).copy()
212 212
213 213 # Process the variants
214 214 if "stream" in params and params["stream"] == "v2":
215 215 variant = _bundlespecvariants["streamv2"]
216 216 contentopts.update(variant)
217 217
218 218 if not externalnames:
219 219 engine = util.compengines.forbundlename(compression)
220 220 compression = engine.bundletype()[1]
221 221 version = _bundlespeccgversions[version]
222 222
223 223 return bundlespec(compression, version, params, contentopts)
224 224
225 225 def readbundle(ui, fh, fname, vfs=None):
226 226 header = changegroup.readexactly(fh, 4)
227 227
228 228 alg = None
229 229 if not fname:
230 230 fname = "stream"
231 231 if not header.startswith('HG') and header.startswith('\0'):
232 232 fh = changegroup.headerlessfixup(fh, header)
233 233 header = "HG10"
234 234 alg = 'UN'
235 235 elif vfs:
236 236 fname = vfs.join(fname)
237 237
238 238 magic, version = header[0:2], header[2:4]
239 239
240 240 if magic != 'HG':
241 241 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
242 242 if version == '10':
243 243 if alg is None:
244 244 alg = changegroup.readexactly(fh, 2)
245 245 return changegroup.cg1unpacker(fh, alg)
246 246 elif version.startswith('2'):
247 247 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
248 248 elif version == 'S1':
249 249 return streamclone.streamcloneapplier(fh)
250 250 else:
251 251 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
252 252
253 253 def getbundlespec(ui, fh):
254 254 """Infer the bundlespec from a bundle file handle.
255 255
256 256 The input file handle is seeked and the original seek position is not
257 257 restored.
258 258 """
259 259 def speccompression(alg):
260 260 try:
261 261 return util.compengines.forbundletype(alg).bundletype()[0]
262 262 except KeyError:
263 263 return None
264 264
265 265 b = readbundle(ui, fh, None)
266 266 if isinstance(b, changegroup.cg1unpacker):
267 267 alg = b._type
268 268 if alg == '_truncatedBZ':
269 269 alg = 'BZ'
270 270 comp = speccompression(alg)
271 271 if not comp:
272 272 raise error.Abort(_('unknown compression algorithm: %s') % alg)
273 273 return '%s-v1' % comp
274 274 elif isinstance(b, bundle2.unbundle20):
275 275 if 'Compression' in b.params:
276 276 comp = speccompression(b.params['Compression'])
277 277 if not comp:
278 278 raise error.Abort(_('unknown compression algorithm: %s') % comp)
279 279 else:
280 280 comp = 'none'
281 281
282 282 version = None
283 283 for part in b.iterparts():
284 284 if part.type == 'changegroup':
285 285 version = part.params['version']
286 286 if version in ('01', '02'):
287 287 version = 'v2'
288 288 else:
289 289 raise error.Abort(_('changegroup version %s does not have '
290 290 'a known bundlespec') % version,
291 291 hint=_('try upgrading your Mercurial '
292 292 'client'))
293 293 elif part.type == 'stream2' and version is None:
294 294 # A stream2 part requires to be part of a v2 bundle
295 295 version = "v2"
296 296 requirements = urlreq.unquote(part.params['requirements'])
297 297 splitted = requirements.split()
298 298 params = bundle2._formatrequirementsparams(splitted)
299 299 return 'none-v2;stream=v2;%s' % params
300 300
301 301 if not version:
302 302 raise error.Abort(_('could not identify changegroup version in '
303 303 'bundle'))
304 304
305 305 return '%s-%s' % (comp, version)
306 306 elif isinstance(b, streamclone.streamcloneapplier):
307 307 requirements = streamclone.readbundle1header(fh)[2]
308 308 formatted = bundle2._formatrequirementsparams(requirements)
309 309 return 'none-packed1;%s' % formatted
310 310 else:
311 311 raise error.Abort(_('unknown bundle type: %s') % b)
312 312
313 313 def _computeoutgoing(repo, heads, common):
314 314 """Computes which revs are outgoing given a set of common
315 315 and a set of heads.
316 316
317 317 This is a separate function so extensions can have access to
318 318 the logic.
319 319
320 320 Returns a discovery.outgoing object.
321 321 """
322 322 cl = repo.changelog
323 323 if common:
324 324 hasnode = cl.hasnode
325 325 common = [n for n in common if hasnode(n)]
326 326 else:
327 327 common = [nullid]
328 328 if not heads:
329 329 heads = cl.heads()
330 330 return discovery.outgoing(repo, common, heads)
331 331
332 332 def _forcebundle1(op):
333 333 """return true if a pull/push must use bundle1
334 334
335 335 This function is used to allow testing of the older bundle version"""
336 336 ui = op.repo.ui
337 337 # The goal is this config is to allow developer to choose the bundle
338 338 # version used during exchanged. This is especially handy during test.
339 339 # Value is a list of bundle version to be picked from, highest version
340 340 # should be used.
341 341 #
342 342 # developer config: devel.legacy.exchange
343 343 exchange = ui.configlist('devel', 'legacy.exchange')
344 344 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
345 345 return forcebundle1 or not op.remote.capable('bundle2')
346 346
347 347 class pushoperation(object):
348 348 """A object that represent a single push operation
349 349
350 350 Its purpose is to carry push related state and very common operations.
351 351
352 352 A new pushoperation should be created at the beginning of each push and
353 353 discarded afterward.
354 354 """
355 355
356 356 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
357 357 bookmarks=(), pushvars=None):
358 358 # repo we push from
359 359 self.repo = repo
360 360 self.ui = repo.ui
361 361 # repo we push to
362 362 self.remote = remote
363 363 # force option provided
364 364 self.force = force
365 365 # revs to be pushed (None is "all")
366 366 self.revs = revs
367 367 # bookmark explicitly pushed
368 368 self.bookmarks = bookmarks
369 369 # allow push of new branch
370 370 self.newbranch = newbranch
371 371 # step already performed
372 372 # (used to check what steps have been already performed through bundle2)
373 373 self.stepsdone = set()
374 374 # Integer version of the changegroup push result
375 375 # - None means nothing to push
376 376 # - 0 means HTTP error
377 377 # - 1 means we pushed and remote head count is unchanged *or*
378 378 # we have outgoing changesets but refused to push
379 379 # - other values as described by addchangegroup()
380 380 self.cgresult = None
381 381 # Boolean value for the bookmark push
382 382 self.bkresult = None
383 383 # discover.outgoing object (contains common and outgoing data)
384 384 self.outgoing = None
385 385 # all remote topological heads before the push
386 386 self.remoteheads = None
387 387 # Details of the remote branch pre and post push
388 388 #
389 389 # mapping: {'branch': ([remoteheads],
390 390 # [newheads],
391 391 # [unsyncedheads],
392 392 # [discardedheads])}
393 393 # - branch: the branch name
394 394 # - remoteheads: the list of remote heads known locally
395 395 # None if the branch is new
396 396 # - newheads: the new remote heads (known locally) with outgoing pushed
397 397 # - unsyncedheads: the list of remote heads unknown locally.
398 398 # - discardedheads: the list of remote heads made obsolete by the push
399 399 self.pushbranchmap = None
400 400 # testable as a boolean indicating if any nodes are missing locally.
401 401 self.incoming = None
402 402 # summary of the remote phase situation
403 403 self.remotephases = None
404 404 # phases changes that must be pushed along side the changesets
405 405 self.outdatedphases = None
406 406 # phases changes that must be pushed if changeset push fails
407 407 self.fallbackoutdatedphases = None
408 408 # outgoing obsmarkers
409 409 self.outobsmarkers = set()
410 410 # outgoing bookmarks
411 411 self.outbookmarks = []
412 412 # transaction manager
413 413 self.trmanager = None
414 414 # map { pushkey partid -> callback handling failure}
415 415 # used to handle exception from mandatory pushkey part failure
416 416 self.pkfailcb = {}
417 417 # an iterable of pushvars or None
418 418 self.pushvars = pushvars
419 419
420 420 @util.propertycache
421 421 def futureheads(self):
422 422 """future remote heads if the changeset push succeeds"""
423 423 return self.outgoing.missingheads
424 424
425 425 @util.propertycache
426 426 def fallbackheads(self):
427 427 """future remote heads if the changeset push fails"""
428 428 if self.revs is None:
429 429 # not target to push, all common are relevant
430 430 return self.outgoing.commonheads
431 431 unfi = self.repo.unfiltered()
432 432 # I want cheads = heads(::missingheads and ::commonheads)
433 433 # (missingheads is revs with secret changeset filtered out)
434 434 #
435 435 # This can be expressed as:
436 436 # cheads = ( (missingheads and ::commonheads)
437 437 # + (commonheads and ::missingheads))"
438 438 # )
439 439 #
440 440 # while trying to push we already computed the following:
441 441 # common = (::commonheads)
442 442 # missing = ((commonheads::missingheads) - commonheads)
443 443 #
444 444 # We can pick:
445 445 # * missingheads part of common (::commonheads)
446 446 common = self.outgoing.common
447 447 nm = self.repo.changelog.nodemap
448 448 cheads = [node for node in self.revs if nm[node] in common]
449 449 # and
450 450 # * commonheads parents on missing
451 451 revset = unfi.set('%ln and parents(roots(%ln))',
452 452 self.outgoing.commonheads,
453 453 self.outgoing.missing)
454 454 cheads.extend(c.node() for c in revset)
455 455 return cheads
456 456
457 457 @property
458 458 def commonheads(self):
459 459 """set of all common heads after changeset bundle push"""
460 460 if self.cgresult:
461 461 return self.futureheads
462 462 else:
463 463 return self.fallbackheads
464 464
465 465 # mapping of message used when pushing bookmark
466 466 bookmsgmap = {'update': (_("updating bookmark %s\n"),
467 467 _('updating bookmark %s failed!\n')),
468 468 'export': (_("exporting bookmark %s\n"),
469 469 _('exporting bookmark %s failed!\n')),
470 470 'delete': (_("deleting remote bookmark %s\n"),
471 471 _('deleting remote bookmark %s failed!\n')),
472 472 }
473 473
474 474
475 475 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
476 476 opargs=None):
477 477 '''Push outgoing changesets (limited by revs) from a local
478 478 repository to remote. Return an integer:
479 479 - None means nothing to push
480 480 - 0 means HTTP error
481 481 - 1 means we pushed and remote head count is unchanged *or*
482 482 we have outgoing changesets but refused to push
483 483 - other values as described by addchangegroup()
484 484 '''
485 485 if opargs is None:
486 486 opargs = {}
487 487 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
488 488 **pycompat.strkwargs(opargs))
489 489 if pushop.remote.local():
490 490 missing = (set(pushop.repo.requirements)
491 491 - pushop.remote.local().supported)
492 492 if missing:
493 493 msg = _("required features are not"
494 494 " supported in the destination:"
495 495 " %s") % (', '.join(sorted(missing)))
496 496 raise error.Abort(msg)
497 497
498 498 if not pushop.remote.canpush():
499 499 raise error.Abort(_("destination does not support push"))
500 500
501 501 if not pushop.remote.capable('unbundle'):
502 502 raise error.Abort(_('cannot push: destination does not support the '
503 503 'unbundle wire protocol command'))
504 504
505 505 # get lock as we might write phase data
506 506 wlock = lock = None
507 507 try:
508 508 # bundle2 push may receive a reply bundle touching bookmarks or other
509 509 # things requiring the wlock. Take it now to ensure proper ordering.
510 510 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
511 511 if (not _forcebundle1(pushop)) and maypushback:
512 512 wlock = pushop.repo.wlock()
513 513 lock = pushop.repo.lock()
514 514 pushop.trmanager = transactionmanager(pushop.repo,
515 515 'push-response',
516 516 pushop.remote.url())
517 517 except IOError as err:
518 518 if err.errno != errno.EACCES:
519 519 raise
520 520 # source repo cannot be locked.
521 521 # We do not abort the push, but just disable the local phase
522 522 # synchronisation.
523 523 msg = 'cannot lock source repository: %s\n' % err
524 524 pushop.ui.debug(msg)
525 525
526 526 with wlock or util.nullcontextmanager(), \
527 527 lock or util.nullcontextmanager(), \
528 528 pushop.trmanager or util.nullcontextmanager():
529 529 pushop.repo.checkpush(pushop)
530 530 _pushdiscovery(pushop)
531 531 if not _forcebundle1(pushop):
532 532 _pushbundle2(pushop)
533 533 _pushchangeset(pushop)
534 534 _pushsyncphase(pushop)
535 535 _pushobsolete(pushop)
536 536 _pushbookmark(pushop)
537 537
538 538 return pushop
539 539
540 540 # list of steps to perform discovery before push
541 541 pushdiscoveryorder = []
542 542
543 543 # Mapping between step name and function
544 544 #
545 545 # This exists to help extensions wrap steps if necessary
546 546 pushdiscoverymapping = {}
547 547
548 548 def pushdiscovery(stepname):
549 549 """decorator for function performing discovery before push
550 550
551 551 The function is added to the step -> function mapping and appended to the
552 552 list of steps. Beware that decorated function will be added in order (this
553 553 may matter).
554 554
555 555 You can only use this decorator for a new step, if you want to wrap a step
556 556 from an extension, change the pushdiscovery dictionary directly."""
557 557 def dec(func):
558 558 assert stepname not in pushdiscoverymapping
559 559 pushdiscoverymapping[stepname] = func
560 560 pushdiscoveryorder.append(stepname)
561 561 return func
562 562 return dec
563 563
564 564 def _pushdiscovery(pushop):
565 565 """Run all discovery steps"""
566 566 for stepname in pushdiscoveryorder:
567 567 step = pushdiscoverymapping[stepname]
568 568 step(pushop)
569 569
570 570 @pushdiscovery('changeset')
571 571 def _pushdiscoverychangeset(pushop):
572 572 """discover the changeset that need to be pushed"""
573 573 fci = discovery.findcommonincoming
574 574 if pushop.revs:
575 575 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
576 576 ancestorsof=pushop.revs)
577 577 else:
578 578 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
579 579 common, inc, remoteheads = commoninc
580 580 fco = discovery.findcommonoutgoing
581 581 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
582 582 commoninc=commoninc, force=pushop.force)
583 583 pushop.outgoing = outgoing
584 584 pushop.remoteheads = remoteheads
585 585 pushop.incoming = inc
586 586
587 587 @pushdiscovery('phase')
588 588 def _pushdiscoveryphase(pushop):
589 589 """discover the phase that needs to be pushed
590 590
591 591 (computed for both success and failure case for changesets push)"""
592 592 outgoing = pushop.outgoing
593 593 unfi = pushop.repo.unfiltered()
594 remotephases = pushop.remote.listkeys('phases')
594 remotephases = listkeys(pushop.remote, 'phases')
595
595 596 if (pushop.ui.configbool('ui', '_usedassubrepo')
596 597 and remotephases # server supports phases
597 598 and not pushop.outgoing.missing # no changesets to be pushed
598 599 and remotephases.get('publishing', False)):
599 600 # When:
600 601 # - this is a subrepo push
601 602 # - and remote support phase
602 603 # - and no changeset are to be pushed
603 604 # - and remote is publishing
604 605 # We may be in issue 3781 case!
605 606 # We drop the possible phase synchronisation done by
606 607 # courtesy to publish changesets possibly locally draft
607 608 # on the remote.
608 609 pushop.outdatedphases = []
609 610 pushop.fallbackoutdatedphases = []
610 611 return
611 612
612 613 pushop.remotephases = phases.remotephasessummary(pushop.repo,
613 614 pushop.fallbackheads,
614 615 remotephases)
615 616 droots = pushop.remotephases.draftroots
616 617
617 618 extracond = ''
618 619 if not pushop.remotephases.publishing:
619 620 extracond = ' and public()'
620 621 revset = 'heads((%%ln::%%ln) %s)' % extracond
621 622 # Get the list of all revs draft on remote by public here.
622 623 # XXX Beware that revset break if droots is not strictly
623 624 # XXX root we may want to ensure it is but it is costly
624 625 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
625 626 if not outgoing.missing:
626 627 future = fallback
627 628 else:
628 629 # adds changeset we are going to push as draft
629 630 #
630 631 # should not be necessary for publishing server, but because of an
631 632 # issue fixed in xxxxx we have to do it anyway.
632 633 fdroots = list(unfi.set('roots(%ln + %ln::)',
633 634 outgoing.missing, droots))
634 635 fdroots = [f.node() for f in fdroots]
635 636 future = list(unfi.set(revset, fdroots, pushop.futureheads))
636 637 pushop.outdatedphases = future
637 638 pushop.fallbackoutdatedphases = fallback
638 639
639 640 @pushdiscovery('obsmarker')
640 641 def _pushdiscoveryobsmarkers(pushop):
641 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
642 and pushop.repo.obsstore
643 and 'obsolete' in pushop.remote.listkeys('namespaces')):
644 repo = pushop.repo
645 # very naive computation, that can be quite expensive on big repo.
646 # However: evolution is currently slow on them anyway.
647 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
648 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
642 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
643 return
644
645 if not pushop.repo.obsstore:
646 return
647
648 if 'obsolete' not in listkeys(pushop.remote, 'namespaces'):
649 return
650
651 repo = pushop.repo
652 # very naive computation, that can be quite expensive on big repo.
653 # However: evolution is currently slow on them anyway.
654 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
655 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
649 656
650 657 @pushdiscovery('bookmarks')
651 658 def _pushdiscoverybookmarks(pushop):
652 659 ui = pushop.ui
653 660 repo = pushop.repo.unfiltered()
654 661 remote = pushop.remote
655 662 ui.debug("checking for updated bookmarks\n")
656 663 ancestors = ()
657 664 if pushop.revs:
658 665 revnums = map(repo.changelog.rev, pushop.revs)
659 666 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
660 remotebookmark = remote.listkeys('bookmarks')
667
668 remotebookmark = listkeys(remote, 'bookmarks')
661 669
662 670 explicit = set([repo._bookmarks.expandname(bookmark)
663 671 for bookmark in pushop.bookmarks])
664 672
665 673 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
666 674 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
667 675
668 676 def safehex(x):
669 677 if x is None:
670 678 return x
671 679 return hex(x)
672 680
673 681 def hexifycompbookmarks(bookmarks):
674 682 return [(b, safehex(scid), safehex(dcid))
675 683 for (b, scid, dcid) in bookmarks]
676 684
677 685 comp = [hexifycompbookmarks(marks) for marks in comp]
678 686 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
679 687
680 688 def _processcompared(pushop, pushed, explicit, remotebms, comp):
681 689 """take decision on bookmark to pull from the remote bookmark
682 690
683 691 Exist to help extensions who want to alter this behavior.
684 692 """
685 693 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
686 694
687 695 repo = pushop.repo
688 696
689 697 for b, scid, dcid in advsrc:
690 698 if b in explicit:
691 699 explicit.remove(b)
692 700 if not pushed or repo[scid].rev() in pushed:
693 701 pushop.outbookmarks.append((b, dcid, scid))
694 702 # search added bookmark
695 703 for b, scid, dcid in addsrc:
696 704 if b in explicit:
697 705 explicit.remove(b)
698 706 pushop.outbookmarks.append((b, '', scid))
699 707 # search for overwritten bookmark
700 708 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
701 709 if b in explicit:
702 710 explicit.remove(b)
703 711 pushop.outbookmarks.append((b, dcid, scid))
704 712 # search for bookmark to delete
705 713 for b, scid, dcid in adddst:
706 714 if b in explicit:
707 715 explicit.remove(b)
708 716 # treat as "deleted locally"
709 717 pushop.outbookmarks.append((b, dcid, ''))
710 718 # identical bookmarks shouldn't get reported
711 719 for b, scid, dcid in same:
712 720 if b in explicit:
713 721 explicit.remove(b)
714 722
715 723 if explicit:
716 724 explicit = sorted(explicit)
717 725 # we should probably list all of them
718 726 pushop.ui.warn(_('bookmark %s does not exist on the local '
719 727 'or remote repository!\n') % explicit[0])
720 728 pushop.bkresult = 2
721 729
722 730 pushop.outbookmarks.sort()
723 731
724 732 def _pushcheckoutgoing(pushop):
725 733 outgoing = pushop.outgoing
726 734 unfi = pushop.repo.unfiltered()
727 735 if not outgoing.missing:
728 736 # nothing to push
729 737 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
730 738 return False
731 739 # something to push
732 740 if not pushop.force:
733 741 # if repo.obsstore == False --> no obsolete
734 742 # then, save the iteration
735 743 if unfi.obsstore:
736 744 # this message are here for 80 char limit reason
737 745 mso = _("push includes obsolete changeset: %s!")
738 746 mspd = _("push includes phase-divergent changeset: %s!")
739 747 mscd = _("push includes content-divergent changeset: %s!")
740 748 mst = {"orphan": _("push includes orphan changeset: %s!"),
741 749 "phase-divergent": mspd,
742 750 "content-divergent": mscd}
743 751 # If we are to push if there is at least one
744 752 # obsolete or unstable changeset in missing, at
745 753 # least one of the missinghead will be obsolete or
746 754 # unstable. So checking heads only is ok
747 755 for node in outgoing.missingheads:
748 756 ctx = unfi[node]
749 757 if ctx.obsolete():
750 758 raise error.Abort(mso % ctx)
751 759 elif ctx.isunstable():
752 760 # TODO print more than one instability in the abort
753 761 # message
754 762 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
755 763
756 764 discovery.checkheads(pushop)
757 765 return True
758 766
759 767 # List of names of steps to perform for an outgoing bundle2, order matters.
760 768 b2partsgenorder = []
761 769
762 770 # Mapping between step name and function
763 771 #
764 772 # This exists to help extensions wrap steps if necessary
765 773 b2partsgenmapping = {}
766 774
767 775 def b2partsgenerator(stepname, idx=None):
768 776 """decorator for function generating bundle2 part
769 777
770 778 The function is added to the step -> function mapping and appended to the
771 779 list of steps. Beware that decorated functions will be added in order
772 780 (this may matter).
773 781
774 782 You can only use this decorator for new steps, if you want to wrap a step
775 783 from an extension, attack the b2partsgenmapping dictionary directly."""
776 784 def dec(func):
777 785 assert stepname not in b2partsgenmapping
778 786 b2partsgenmapping[stepname] = func
779 787 if idx is None:
780 788 b2partsgenorder.append(stepname)
781 789 else:
782 790 b2partsgenorder.insert(idx, stepname)
783 791 return func
784 792 return dec
785 793
786 794 def _pushb2ctxcheckheads(pushop, bundler):
787 795 """Generate race condition checking parts
788 796
789 797 Exists as an independent function to aid extensions
790 798 """
791 799 # * 'force' do not check for push race,
792 800 # * if we don't push anything, there are nothing to check.
793 801 if not pushop.force and pushop.outgoing.missingheads:
794 802 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
795 803 emptyremote = pushop.pushbranchmap is None
796 804 if not allowunrelated or emptyremote:
797 805 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
798 806 else:
799 807 affected = set()
800 808 for branch, heads in pushop.pushbranchmap.iteritems():
801 809 remoteheads, newheads, unsyncedheads, discardedheads = heads
802 810 if remoteheads is not None:
803 811 remote = set(remoteheads)
804 812 affected |= set(discardedheads) & remote
805 813 affected |= remote - set(newheads)
806 814 if affected:
807 815 data = iter(sorted(affected))
808 816 bundler.newpart('check:updated-heads', data=data)
809 817
810 818 def _pushing(pushop):
811 819 """return True if we are pushing anything"""
812 820 return bool(pushop.outgoing.missing
813 821 or pushop.outdatedphases
814 822 or pushop.outobsmarkers
815 823 or pushop.outbookmarks)
816 824
817 825 @b2partsgenerator('check-bookmarks')
818 826 def _pushb2checkbookmarks(pushop, bundler):
819 827 """insert bookmark move checking"""
820 828 if not _pushing(pushop) or pushop.force:
821 829 return
822 830 b2caps = bundle2.bundle2caps(pushop.remote)
823 831 hasbookmarkcheck = 'bookmarks' in b2caps
824 832 if not (pushop.outbookmarks and hasbookmarkcheck):
825 833 return
826 834 data = []
827 835 for book, old, new in pushop.outbookmarks:
828 836 old = bin(old)
829 837 data.append((book, old))
830 838 checkdata = bookmod.binaryencode(data)
831 839 bundler.newpart('check:bookmarks', data=checkdata)
832 840
833 841 @b2partsgenerator('check-phases')
834 842 def _pushb2checkphases(pushop, bundler):
835 843 """insert phase move checking"""
836 844 if not _pushing(pushop) or pushop.force:
837 845 return
838 846 b2caps = bundle2.bundle2caps(pushop.remote)
839 847 hasphaseheads = 'heads' in b2caps.get('phases', ())
840 848 if pushop.remotephases is not None and hasphaseheads:
841 849 # check that the remote phase has not changed
842 850 checks = [[] for p in phases.allphases]
843 851 checks[phases.public].extend(pushop.remotephases.publicheads)
844 852 checks[phases.draft].extend(pushop.remotephases.draftroots)
845 853 if any(checks):
846 854 for nodes in checks:
847 855 nodes.sort()
848 856 checkdata = phases.binaryencode(checks)
849 857 bundler.newpart('check:phases', data=checkdata)
850 858
851 859 @b2partsgenerator('changeset')
852 860 def _pushb2ctx(pushop, bundler):
853 861 """handle changegroup push through bundle2
854 862
855 863 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
856 864 """
857 865 if 'changesets' in pushop.stepsdone:
858 866 return
859 867 pushop.stepsdone.add('changesets')
860 868 # Send known heads to the server for race detection.
861 869 if not _pushcheckoutgoing(pushop):
862 870 return
863 871 pushop.repo.prepushoutgoinghooks(pushop)
864 872
865 873 _pushb2ctxcheckheads(pushop, bundler)
866 874
867 875 b2caps = bundle2.bundle2caps(pushop.remote)
868 876 version = '01'
869 877 cgversions = b2caps.get('changegroup')
870 878 if cgversions: # 3.1 and 3.2 ship with an empty value
871 879 cgversions = [v for v in cgversions
872 880 if v in changegroup.supportedoutgoingversions(
873 881 pushop.repo)]
874 882 if not cgversions:
875 883 raise ValueError(_('no common changegroup version'))
876 884 version = max(cgversions)
877 885 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
878 886 'push')
879 887 cgpart = bundler.newpart('changegroup', data=cgstream)
880 888 if cgversions:
881 889 cgpart.addparam('version', version)
882 890 if 'treemanifest' in pushop.repo.requirements:
883 891 cgpart.addparam('treemanifest', '1')
884 892 def handlereply(op):
885 893 """extract addchangegroup returns from server reply"""
886 894 cgreplies = op.records.getreplies(cgpart.id)
887 895 assert len(cgreplies['changegroup']) == 1
888 896 pushop.cgresult = cgreplies['changegroup'][0]['return']
889 897 return handlereply
890 898
891 899 @b2partsgenerator('phase')
892 900 def _pushb2phases(pushop, bundler):
893 901 """handle phase push through bundle2"""
894 902 if 'phases' in pushop.stepsdone:
895 903 return
896 904 b2caps = bundle2.bundle2caps(pushop.remote)
897 905 ui = pushop.repo.ui
898 906
899 907 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
900 908 haspushkey = 'pushkey' in b2caps
901 909 hasphaseheads = 'heads' in b2caps.get('phases', ())
902 910
903 911 if hasphaseheads and not legacyphase:
904 912 return _pushb2phaseheads(pushop, bundler)
905 913 elif haspushkey:
906 914 return _pushb2phasespushkey(pushop, bundler)
907 915
908 916 def _pushb2phaseheads(pushop, bundler):
909 917 """push phase information through a bundle2 - binary part"""
910 918 pushop.stepsdone.add('phases')
911 919 if pushop.outdatedphases:
912 920 updates = [[] for p in phases.allphases]
913 921 updates[0].extend(h.node() for h in pushop.outdatedphases)
914 922 phasedata = phases.binaryencode(updates)
915 923 bundler.newpart('phase-heads', data=phasedata)
916 924
917 925 def _pushb2phasespushkey(pushop, bundler):
918 926 """push phase information through a bundle2 - pushkey part"""
919 927 pushop.stepsdone.add('phases')
920 928 part2node = []
921 929
922 930 def handlefailure(pushop, exc):
923 931 targetid = int(exc.partid)
924 932 for partid, node in part2node:
925 933 if partid == targetid:
926 934 raise error.Abort(_('updating %s to public failed') % node)
927 935
928 936 enc = pushkey.encode
929 937 for newremotehead in pushop.outdatedphases:
930 938 part = bundler.newpart('pushkey')
931 939 part.addparam('namespace', enc('phases'))
932 940 part.addparam('key', enc(newremotehead.hex()))
933 941 part.addparam('old', enc('%d' % phases.draft))
934 942 part.addparam('new', enc('%d' % phases.public))
935 943 part2node.append((part.id, newremotehead))
936 944 pushop.pkfailcb[part.id] = handlefailure
937 945
938 946 def handlereply(op):
939 947 for partid, node in part2node:
940 948 partrep = op.records.getreplies(partid)
941 949 results = partrep['pushkey']
942 950 assert len(results) <= 1
943 951 msg = None
944 952 if not results:
945 953 msg = _('server ignored update of %s to public!\n') % node
946 954 elif not int(results[0]['return']):
947 955 msg = _('updating %s to public failed!\n') % node
948 956 if msg is not None:
949 957 pushop.ui.warn(msg)
950 958 return handlereply
951 959
952 960 @b2partsgenerator('obsmarkers')
953 961 def _pushb2obsmarkers(pushop, bundler):
954 962 if 'obsmarkers' in pushop.stepsdone:
955 963 return
956 964 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
957 965 if obsolete.commonversion(remoteversions) is None:
958 966 return
959 967 pushop.stepsdone.add('obsmarkers')
960 968 if pushop.outobsmarkers:
961 969 markers = sorted(pushop.outobsmarkers)
962 970 bundle2.buildobsmarkerspart(bundler, markers)
963 971
964 972 @b2partsgenerator('bookmarks')
965 973 def _pushb2bookmarks(pushop, bundler):
966 974 """handle bookmark push through bundle2"""
967 975 if 'bookmarks' in pushop.stepsdone:
968 976 return
969 977 b2caps = bundle2.bundle2caps(pushop.remote)
970 978
971 979 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
972 980 legacybooks = 'bookmarks' in legacy
973 981
974 982 if not legacybooks and 'bookmarks' in b2caps:
975 983 return _pushb2bookmarkspart(pushop, bundler)
976 984 elif 'pushkey' in b2caps:
977 985 return _pushb2bookmarkspushkey(pushop, bundler)
978 986
979 987 def _bmaction(old, new):
980 988 """small utility for bookmark pushing"""
981 989 if not old:
982 990 return 'export'
983 991 elif not new:
984 992 return 'delete'
985 993 return 'update'
986 994
987 995 def _pushb2bookmarkspart(pushop, bundler):
988 996 pushop.stepsdone.add('bookmarks')
989 997 if not pushop.outbookmarks:
990 998 return
991 999
992 1000 allactions = []
993 1001 data = []
994 1002 for book, old, new in pushop.outbookmarks:
995 1003 new = bin(new)
996 1004 data.append((book, new))
997 1005 allactions.append((book, _bmaction(old, new)))
998 1006 checkdata = bookmod.binaryencode(data)
999 1007 bundler.newpart('bookmarks', data=checkdata)
1000 1008
1001 1009 def handlereply(op):
1002 1010 ui = pushop.ui
1003 1011 # if success
1004 1012 for book, action in allactions:
1005 1013 ui.status(bookmsgmap[action][0] % book)
1006 1014
1007 1015 return handlereply
1008 1016
1009 1017 def _pushb2bookmarkspushkey(pushop, bundler):
1010 1018 pushop.stepsdone.add('bookmarks')
1011 1019 part2book = []
1012 1020 enc = pushkey.encode
1013 1021
1014 1022 def handlefailure(pushop, exc):
1015 1023 targetid = int(exc.partid)
1016 1024 for partid, book, action in part2book:
1017 1025 if partid == targetid:
1018 1026 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1019 1027 # we should not be called for part we did not generated
1020 1028 assert False
1021 1029
1022 1030 for book, old, new in pushop.outbookmarks:
1023 1031 part = bundler.newpart('pushkey')
1024 1032 part.addparam('namespace', enc('bookmarks'))
1025 1033 part.addparam('key', enc(book))
1026 1034 part.addparam('old', enc(old))
1027 1035 part.addparam('new', enc(new))
1028 1036 action = 'update'
1029 1037 if not old:
1030 1038 action = 'export'
1031 1039 elif not new:
1032 1040 action = 'delete'
1033 1041 part2book.append((part.id, book, action))
1034 1042 pushop.pkfailcb[part.id] = handlefailure
1035 1043
1036 1044 def handlereply(op):
1037 1045 ui = pushop.ui
1038 1046 for partid, book, action in part2book:
1039 1047 partrep = op.records.getreplies(partid)
1040 1048 results = partrep['pushkey']
1041 1049 assert len(results) <= 1
1042 1050 if not results:
1043 1051 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
1044 1052 else:
1045 1053 ret = int(results[0]['return'])
1046 1054 if ret:
1047 1055 ui.status(bookmsgmap[action][0] % book)
1048 1056 else:
1049 1057 ui.warn(bookmsgmap[action][1] % book)
1050 1058 if pushop.bkresult is not None:
1051 1059 pushop.bkresult = 1
1052 1060 return handlereply
1053 1061
1054 1062 @b2partsgenerator('pushvars', idx=0)
1055 1063 def _getbundlesendvars(pushop, bundler):
1056 1064 '''send shellvars via bundle2'''
1057 1065 pushvars = pushop.pushvars
1058 1066 if pushvars:
1059 1067 shellvars = {}
1060 1068 for raw in pushvars:
1061 1069 if '=' not in raw:
1062 1070 msg = ("unable to parse variable '%s', should follow "
1063 1071 "'KEY=VALUE' or 'KEY=' format")
1064 1072 raise error.Abort(msg % raw)
1065 1073 k, v = raw.split('=', 1)
1066 1074 shellvars[k] = v
1067 1075
1068 1076 part = bundler.newpart('pushvars')
1069 1077
1070 1078 for key, value in shellvars.iteritems():
1071 1079 part.addparam(key, value, mandatory=False)
1072 1080
1073 1081 def _pushbundle2(pushop):
1074 1082 """push data to the remote using bundle2
1075 1083
1076 1084 The only currently supported type of data is changegroup but this will
1077 1085 evolve in the future."""
1078 1086 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1079 1087 pushback = (pushop.trmanager
1080 1088 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1081 1089
1082 1090 # create reply capability
1083 1091 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1084 1092 allowpushback=pushback,
1085 1093 role='client'))
1086 1094 bundler.newpart('replycaps', data=capsblob)
1087 1095 replyhandlers = []
1088 1096 for partgenname in b2partsgenorder:
1089 1097 partgen = b2partsgenmapping[partgenname]
1090 1098 ret = partgen(pushop, bundler)
1091 1099 if callable(ret):
1092 1100 replyhandlers.append(ret)
1093 1101 # do not push if nothing to push
1094 1102 if bundler.nbparts <= 1:
1095 1103 return
1096 1104 stream = util.chunkbuffer(bundler.getchunks())
1097 1105 try:
1098 1106 try:
1099 1107 with pushop.remote.commandexecutor() as e:
1100 1108 reply = e.callcommand('unbundle', {
1101 1109 'bundle': stream,
1102 1110 'heads': ['force'],
1103 1111 'url': pushop.remote.url(),
1104 1112 }).result()
1105 1113 except error.BundleValueError as exc:
1106 1114 raise error.Abort(_('missing support for %s') % exc)
1107 1115 try:
1108 1116 trgetter = None
1109 1117 if pushback:
1110 1118 trgetter = pushop.trmanager.transaction
1111 1119 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1112 1120 except error.BundleValueError as exc:
1113 1121 raise error.Abort(_('missing support for %s') % exc)
1114 1122 except bundle2.AbortFromPart as exc:
1115 1123 pushop.ui.status(_('remote: %s\n') % exc)
1116 1124 if exc.hint is not None:
1117 1125 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1118 1126 raise error.Abort(_('push failed on remote'))
1119 1127 except error.PushkeyFailed as exc:
1120 1128 partid = int(exc.partid)
1121 1129 if partid not in pushop.pkfailcb:
1122 1130 raise
1123 1131 pushop.pkfailcb[partid](pushop, exc)
1124 1132 for rephand in replyhandlers:
1125 1133 rephand(op)
1126 1134
1127 1135 def _pushchangeset(pushop):
1128 1136 """Make the actual push of changeset bundle to remote repo"""
1129 1137 if 'changesets' in pushop.stepsdone:
1130 1138 return
1131 1139 pushop.stepsdone.add('changesets')
1132 1140 if not _pushcheckoutgoing(pushop):
1133 1141 return
1134 1142
1135 1143 # Should have verified this in push().
1136 1144 assert pushop.remote.capable('unbundle')
1137 1145
1138 1146 pushop.repo.prepushoutgoinghooks(pushop)
1139 1147 outgoing = pushop.outgoing
1140 1148 # TODO: get bundlecaps from remote
1141 1149 bundlecaps = None
1142 1150 # create a changegroup from local
1143 1151 if pushop.revs is None and not (outgoing.excluded
1144 1152 or pushop.repo.changelog.filteredrevs):
1145 1153 # push everything,
1146 1154 # use the fast path, no race possible on push
1147 1155 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1148 1156 fastpath=True, bundlecaps=bundlecaps)
1149 1157 else:
1150 1158 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1151 1159 'push', bundlecaps=bundlecaps)
1152 1160
1153 1161 # apply changegroup to remote
1154 1162 # local repo finds heads on server, finds out what
1155 1163 # revs it must push. once revs transferred, if server
1156 1164 # finds it has different heads (someone else won
1157 1165 # commit/push race), server aborts.
1158 1166 if pushop.force:
1159 1167 remoteheads = ['force']
1160 1168 else:
1161 1169 remoteheads = pushop.remoteheads
1162 1170 # ssh: return remote's addchangegroup()
1163 1171 # http: return remote's addchangegroup() or 0 for error
1164 1172 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1165 1173 pushop.repo.url())
1166 1174
1167 1175 def _pushsyncphase(pushop):
1168 1176 """synchronise phase information locally and remotely"""
1169 1177 cheads = pushop.commonheads
1170 1178 # even when we don't push, exchanging phase data is useful
1171 remotephases = pushop.remote.listkeys('phases')
1179 remotephases = listkeys(pushop.remote, 'phases')
1172 1180 if (pushop.ui.configbool('ui', '_usedassubrepo')
1173 1181 and remotephases # server supports phases
1174 1182 and pushop.cgresult is None # nothing was pushed
1175 1183 and remotephases.get('publishing', False)):
1176 1184 # When:
1177 1185 # - this is a subrepo push
1178 1186 # - and remote support phase
1179 1187 # - and no changeset was pushed
1180 1188 # - and remote is publishing
1181 1189 # We may be in issue 3871 case!
1182 1190 # We drop the possible phase synchronisation done by
1183 1191 # courtesy to publish changesets possibly locally draft
1184 1192 # on the remote.
1185 1193 remotephases = {'publishing': 'True'}
1186 1194 if not remotephases: # old server or public only reply from non-publishing
1187 1195 _localphasemove(pushop, cheads)
1188 1196 # don't push any phase data as there is nothing to push
1189 1197 else:
1190 1198 ana = phases.analyzeremotephases(pushop.repo, cheads,
1191 1199 remotephases)
1192 1200 pheads, droots = ana
1193 1201 ### Apply remote phase on local
1194 1202 if remotephases.get('publishing', False):
1195 1203 _localphasemove(pushop, cheads)
1196 1204 else: # publish = False
1197 1205 _localphasemove(pushop, pheads)
1198 1206 _localphasemove(pushop, cheads, phases.draft)
1199 1207 ### Apply local phase on remote
1200 1208
1201 1209 if pushop.cgresult:
1202 1210 if 'phases' in pushop.stepsdone:
1203 1211 # phases already pushed though bundle2
1204 1212 return
1205 1213 outdated = pushop.outdatedphases
1206 1214 else:
1207 1215 outdated = pushop.fallbackoutdatedphases
1208 1216
1209 1217 pushop.stepsdone.add('phases')
1210 1218
1211 1219 # filter heads already turned public by the push
1212 1220 outdated = [c for c in outdated if c.node() not in pheads]
1213 1221 # fallback to independent pushkey command
1214 1222 for newremotehead in outdated:
1215 1223 with pushop.remote.commandexecutor() as e:
1216 1224 r = e.callcommand('pushkey', {
1217 1225 'namespace': 'phases',
1218 1226 'key': newremotehead.hex(),
1219 1227 'old': '%d' % phases.draft,
1220 1228 'new': '%d' % phases.public
1221 1229 }).result()
1222 1230
1223 1231 if not r:
1224 1232 pushop.ui.warn(_('updating %s to public failed!\n')
1225 1233 % newremotehead)
1226 1234
1227 1235 def _localphasemove(pushop, nodes, phase=phases.public):
1228 1236 """move <nodes> to <phase> in the local source repo"""
1229 1237 if pushop.trmanager:
1230 1238 phases.advanceboundary(pushop.repo,
1231 1239 pushop.trmanager.transaction(),
1232 1240 phase,
1233 1241 nodes)
1234 1242 else:
1235 1243 # repo is not locked, do not change any phases!
1236 1244 # Informs the user that phases should have been moved when
1237 1245 # applicable.
1238 1246 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1239 1247 phasestr = phases.phasenames[phase]
1240 1248 if actualmoves:
1241 1249 pushop.ui.status(_('cannot lock source repo, skipping '
1242 1250 'local %s phase update\n') % phasestr)
1243 1251
1244 1252 def _pushobsolete(pushop):
1245 1253 """utility function to push obsolete markers to a remote"""
1246 1254 if 'obsmarkers' in pushop.stepsdone:
1247 1255 return
1248 1256 repo = pushop.repo
1249 1257 remote = pushop.remote
1250 1258 pushop.stepsdone.add('obsmarkers')
1251 1259 if pushop.outobsmarkers:
1252 1260 pushop.ui.debug('try to push obsolete markers to remote\n')
1253 1261 rslts = []
1254 1262 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1255 1263 for key in sorted(remotedata, reverse=True):
1256 1264 # reverse sort to ensure we end with dump0
1257 1265 data = remotedata[key]
1258 1266 rslts.append(remote.pushkey('obsolete', key, '', data))
1259 1267 if [r for r in rslts if not r]:
1260 1268 msg = _('failed to push some obsolete markers!\n')
1261 1269 repo.ui.warn(msg)
1262 1270
1263 1271 def _pushbookmark(pushop):
1264 1272 """Update bookmark position on remote"""
1265 1273 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1266 1274 return
1267 1275 pushop.stepsdone.add('bookmarks')
1268 1276 ui = pushop.ui
1269 1277 remote = pushop.remote
1270 1278
1271 1279 for b, old, new in pushop.outbookmarks:
1272 1280 action = 'update'
1273 1281 if not old:
1274 1282 action = 'export'
1275 1283 elif not new:
1276 1284 action = 'delete'
1277 1285
1278 1286 with remote.commandexecutor() as e:
1279 1287 r = e.callcommand('pushkey', {
1280 1288 'namespace': 'bookmarks',
1281 1289 'key': b,
1282 1290 'old': old,
1283 1291 'new': new,
1284 1292 }).result()
1285 1293
1286 1294 if r:
1287 1295 ui.status(bookmsgmap[action][0] % b)
1288 1296 else:
1289 1297 ui.warn(bookmsgmap[action][1] % b)
1290 1298 # discovery can have set the value form invalid entry
1291 1299 if pushop.bkresult is not None:
1292 1300 pushop.bkresult = 1
1293 1301
1294 1302 class pulloperation(object):
1295 1303 """A object that represent a single pull operation
1296 1304
1297 1305 It purpose is to carry pull related state and very common operation.
1298 1306
1299 1307 A new should be created at the beginning of each pull and discarded
1300 1308 afterward.
1301 1309 """
1302 1310
1303 1311 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1304 1312 remotebookmarks=None, streamclonerequested=None):
1305 1313 # repo we pull into
1306 1314 self.repo = repo
1307 1315 # repo we pull from
1308 1316 self.remote = remote
1309 1317 # revision we try to pull (None is "all")
1310 1318 self.heads = heads
1311 1319 # bookmark pulled explicitly
1312 1320 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1313 1321 for bookmark in bookmarks]
1314 1322 # do we force pull?
1315 1323 self.force = force
1316 1324 # whether a streaming clone was requested
1317 1325 self.streamclonerequested = streamclonerequested
1318 1326 # transaction manager
1319 1327 self.trmanager = None
1320 1328 # set of common changeset between local and remote before pull
1321 1329 self.common = None
1322 1330 # set of pulled head
1323 1331 self.rheads = None
1324 1332 # list of missing changeset to fetch remotely
1325 1333 self.fetch = None
1326 1334 # remote bookmarks data
1327 1335 self.remotebookmarks = remotebookmarks
1328 1336 # result of changegroup pulling (used as return code by pull)
1329 1337 self.cgresult = None
1330 1338 # list of step already done
1331 1339 self.stepsdone = set()
1332 1340 # Whether we attempted a clone from pre-generated bundles.
1333 1341 self.clonebundleattempted = False
1334 1342
1335 1343 @util.propertycache
1336 1344 def pulledsubset(self):
1337 1345 """heads of the set of changeset target by the pull"""
1338 1346 # compute target subset
1339 1347 if self.heads is None:
1340 1348 # We pulled every thing possible
1341 1349 # sync on everything common
1342 1350 c = set(self.common)
1343 1351 ret = list(self.common)
1344 1352 for n in self.rheads:
1345 1353 if n not in c:
1346 1354 ret.append(n)
1347 1355 return ret
1348 1356 else:
1349 1357 # We pulled a specific subset
1350 1358 # sync on this subset
1351 1359 return self.heads
1352 1360
1353 1361 @util.propertycache
1354 1362 def canusebundle2(self):
1355 1363 return not _forcebundle1(self)
1356 1364
1357 1365 @util.propertycache
1358 1366 def remotebundle2caps(self):
1359 1367 return bundle2.bundle2caps(self.remote)
1360 1368
1361 1369 def gettransaction(self):
1362 1370 # deprecated; talk to trmanager directly
1363 1371 return self.trmanager.transaction()
1364 1372
1365 1373 class transactionmanager(util.transactional):
1366 1374 """An object to manage the life cycle of a transaction
1367 1375
1368 1376 It creates the transaction on demand and calls the appropriate hooks when
1369 1377 closing the transaction."""
1370 1378 def __init__(self, repo, source, url):
1371 1379 self.repo = repo
1372 1380 self.source = source
1373 1381 self.url = url
1374 1382 self._tr = None
1375 1383
1376 1384 def transaction(self):
1377 1385 """Return an open transaction object, constructing if necessary"""
1378 1386 if not self._tr:
1379 1387 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1380 1388 self._tr = self.repo.transaction(trname)
1381 1389 self._tr.hookargs['source'] = self.source
1382 1390 self._tr.hookargs['url'] = self.url
1383 1391 return self._tr
1384 1392
1385 1393 def close(self):
1386 1394 """close transaction if created"""
1387 1395 if self._tr is not None:
1388 1396 self._tr.close()
1389 1397
1390 1398 def release(self):
1391 1399 """release transaction if created"""
1392 1400 if self._tr is not None:
1393 1401 self._tr.release()
1394 1402
1403 def listkeys(remote, namespace):
1404 with remote.commandexecutor() as e:
1405 return e.callcommand('listkeys', {'namespace': namespace}).result()
1406
1395 1407 def _fullpullbundle2(repo, pullop):
1396 1408 # The server may send a partial reply, i.e. when inlining
1397 1409 # pre-computed bundles. In that case, update the common
1398 1410 # set based on the results and pull another bundle.
1399 1411 #
1400 1412 # There are two indicators that the process is finished:
1401 1413 # - no changeset has been added, or
1402 1414 # - all remote heads are known locally.
1403 1415 # The head check must use the unfiltered view as obsoletion
1404 1416 # markers can hide heads.
1405 1417 unfi = repo.unfiltered()
1406 1418 unficl = unfi.changelog
1407 1419 def headsofdiff(h1, h2):
1408 1420 """Returns heads(h1 % h2)"""
1409 1421 res = unfi.set('heads(%ln %% %ln)', h1, h2)
1410 1422 return set(ctx.node() for ctx in res)
1411 1423 def headsofunion(h1, h2):
1412 1424 """Returns heads((h1 + h2) - null)"""
1413 1425 res = unfi.set('heads((%ln + %ln - null))', h1, h2)
1414 1426 return set(ctx.node() for ctx in res)
1415 1427 while True:
1416 1428 old_heads = unficl.heads()
1417 1429 clstart = len(unficl)
1418 1430 _pullbundle2(pullop)
1419 1431 if changegroup.NARROW_REQUIREMENT in repo.requirements:
1420 1432 # XXX narrow clones filter the heads on the server side during
1421 1433 # XXX getbundle and result in partial replies as well.
1422 1434 # XXX Disable pull bundles in this case as band aid to avoid
1423 1435 # XXX extra round trips.
1424 1436 break
1425 1437 if clstart == len(unficl):
1426 1438 break
1427 1439 if all(unficl.hasnode(n) for n in pullop.rheads):
1428 1440 break
1429 1441 new_heads = headsofdiff(unficl.heads(), old_heads)
1430 1442 pullop.common = headsofunion(new_heads, pullop.common)
1431 1443 pullop.rheads = set(pullop.rheads) - pullop.common
1432 1444
1433 1445 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1434 1446 streamclonerequested=None):
1435 1447 """Fetch repository data from a remote.
1436 1448
1437 1449 This is the main function used to retrieve data from a remote repository.
1438 1450
1439 1451 ``repo`` is the local repository to clone into.
1440 1452 ``remote`` is a peer instance.
1441 1453 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1442 1454 default) means to pull everything from the remote.
1443 1455 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1444 1456 default, all remote bookmarks are pulled.
1445 1457 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1446 1458 initialization.
1447 1459 ``streamclonerequested`` is a boolean indicating whether a "streaming
1448 1460 clone" is requested. A "streaming clone" is essentially a raw file copy
1449 1461 of revlogs from the server. This only works when the local repository is
1450 1462 empty. The default value of ``None`` means to respect the server
1451 1463 configuration for preferring stream clones.
1452 1464
1453 1465 Returns the ``pulloperation`` created for this pull.
1454 1466 """
1455 1467 if opargs is None:
1456 1468 opargs = {}
1457 1469 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1458 1470 streamclonerequested=streamclonerequested,
1459 1471 **pycompat.strkwargs(opargs))
1460 1472
1461 1473 peerlocal = pullop.remote.local()
1462 1474 if peerlocal:
1463 1475 missing = set(peerlocal.requirements) - pullop.repo.supported
1464 1476 if missing:
1465 1477 msg = _("required features are not"
1466 1478 " supported in the destination:"
1467 1479 " %s") % (', '.join(sorted(missing)))
1468 1480 raise error.Abort(msg)
1469 1481
1470 1482 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1471 1483 with repo.wlock(), repo.lock(), pullop.trmanager:
1472 1484 # This should ideally be in _pullbundle2(). However, it needs to run
1473 1485 # before discovery to avoid extra work.
1474 1486 _maybeapplyclonebundle(pullop)
1475 1487 streamclone.maybeperformlegacystreamclone(pullop)
1476 1488 _pulldiscovery(pullop)
1477 1489 if pullop.canusebundle2:
1478 1490 _fullpullbundle2(repo, pullop)
1479 1491 _pullchangeset(pullop)
1480 1492 _pullphase(pullop)
1481 1493 _pullbookmarks(pullop)
1482 1494 _pullobsolete(pullop)
1483 1495
1484 1496 # storing remotenames
1485 1497 if repo.ui.configbool('experimental', 'remotenames'):
1486 1498 logexchange.pullremotenames(repo, remote)
1487 1499
1488 1500 return pullop
1489 1501
1490 1502 # list of steps to perform discovery before pull
1491 1503 pulldiscoveryorder = []
1492 1504
1493 1505 # Mapping between step name and function
1494 1506 #
1495 1507 # This exists to help extensions wrap steps if necessary
1496 1508 pulldiscoverymapping = {}
1497 1509
1498 1510 def pulldiscovery(stepname):
1499 1511 """decorator for function performing discovery before pull
1500 1512
1501 1513 The function is added to the step -> function mapping and appended to the
1502 1514 list of steps. Beware that decorated function will be added in order (this
1503 1515 may matter).
1504 1516
1505 1517 You can only use this decorator for a new step, if you want to wrap a step
1506 1518 from an extension, change the pulldiscovery dictionary directly."""
1507 1519 def dec(func):
1508 1520 assert stepname not in pulldiscoverymapping
1509 1521 pulldiscoverymapping[stepname] = func
1510 1522 pulldiscoveryorder.append(stepname)
1511 1523 return func
1512 1524 return dec
1513 1525
1514 1526 def _pulldiscovery(pullop):
1515 1527 """Run all discovery steps"""
1516 1528 for stepname in pulldiscoveryorder:
1517 1529 step = pulldiscoverymapping[stepname]
1518 1530 step(pullop)
1519 1531
1520 1532 @pulldiscovery('b1:bookmarks')
1521 1533 def _pullbookmarkbundle1(pullop):
1522 1534 """fetch bookmark data in bundle1 case
1523 1535
1524 1536 If not using bundle2, we have to fetch bookmarks before changeset
1525 1537 discovery to reduce the chance and impact of race conditions."""
1526 1538 if pullop.remotebookmarks is not None:
1527 1539 return
1528 1540 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1529 1541 # all known bundle2 servers now support listkeys, but lets be nice with
1530 1542 # new implementation.
1531 1543 return
1532 books = pullop.remote.listkeys('bookmarks')
1544 books = listkeys(pullop.remote, 'bookmarks')
1533 1545 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1534 1546
1535 1547
1536 1548 @pulldiscovery('changegroup')
1537 1549 def _pulldiscoverychangegroup(pullop):
1538 1550 """discovery phase for the pull
1539 1551
1540 1552 Current handle changeset discovery only, will change handle all discovery
1541 1553 at some point."""
1542 1554 tmp = discovery.findcommonincoming(pullop.repo,
1543 1555 pullop.remote,
1544 1556 heads=pullop.heads,
1545 1557 force=pullop.force)
1546 1558 common, fetch, rheads = tmp
1547 1559 nm = pullop.repo.unfiltered().changelog.nodemap
1548 1560 if fetch and rheads:
1549 1561 # If a remote heads is filtered locally, put in back in common.
1550 1562 #
1551 1563 # This is a hackish solution to catch most of "common but locally
1552 1564 # hidden situation". We do not performs discovery on unfiltered
1553 1565 # repository because it end up doing a pathological amount of round
1554 1566 # trip for w huge amount of changeset we do not care about.
1555 1567 #
1556 1568 # If a set of such "common but filtered" changeset exist on the server
1557 1569 # but are not including a remote heads, we'll not be able to detect it,
1558 1570 scommon = set(common)
1559 1571 for n in rheads:
1560 1572 if n in nm:
1561 1573 if n not in scommon:
1562 1574 common.append(n)
1563 1575 if set(rheads).issubset(set(common)):
1564 1576 fetch = []
1565 1577 pullop.common = common
1566 1578 pullop.fetch = fetch
1567 1579 pullop.rheads = rheads
1568 1580
1569 1581 def _pullbundle2(pullop):
1570 1582 """pull data using bundle2
1571 1583
1572 1584 For now, the only supported data are changegroup."""
1573 1585 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1574 1586
1575 1587 # make ui easier to access
1576 1588 ui = pullop.repo.ui
1577 1589
1578 1590 # At the moment we don't do stream clones over bundle2. If that is
1579 1591 # implemented then here's where the check for that will go.
1580 1592 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1581 1593
1582 1594 # declare pull perimeters
1583 1595 kwargs['common'] = pullop.common
1584 1596 kwargs['heads'] = pullop.heads or pullop.rheads
1585 1597
1586 1598 if streaming:
1587 1599 kwargs['cg'] = False
1588 1600 kwargs['stream'] = True
1589 1601 pullop.stepsdone.add('changegroup')
1590 1602 pullop.stepsdone.add('phases')
1591 1603
1592 1604 else:
1593 1605 # pulling changegroup
1594 1606 pullop.stepsdone.add('changegroup')
1595 1607
1596 1608 kwargs['cg'] = pullop.fetch
1597 1609
1598 1610 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1599 1611 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1600 1612 if (not legacyphase and hasbinaryphase):
1601 1613 kwargs['phases'] = True
1602 1614 pullop.stepsdone.add('phases')
1603 1615
1604 1616 if 'listkeys' in pullop.remotebundle2caps:
1605 1617 if 'phases' not in pullop.stepsdone:
1606 1618 kwargs['listkeys'] = ['phases']
1607 1619
1608 1620 bookmarksrequested = False
1609 1621 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1610 1622 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1611 1623
1612 1624 if pullop.remotebookmarks is not None:
1613 1625 pullop.stepsdone.add('request-bookmarks')
1614 1626
1615 1627 if ('request-bookmarks' not in pullop.stepsdone
1616 1628 and pullop.remotebookmarks is None
1617 1629 and not legacybookmark and hasbinarybook):
1618 1630 kwargs['bookmarks'] = True
1619 1631 bookmarksrequested = True
1620 1632
1621 1633 if 'listkeys' in pullop.remotebundle2caps:
1622 1634 if 'request-bookmarks' not in pullop.stepsdone:
1623 1635 # make sure to always includes bookmark data when migrating
1624 1636 # `hg incoming --bundle` to using this function.
1625 1637 pullop.stepsdone.add('request-bookmarks')
1626 1638 kwargs.setdefault('listkeys', []).append('bookmarks')
1627 1639
1628 1640 # If this is a full pull / clone and the server supports the clone bundles
1629 1641 # feature, tell the server whether we attempted a clone bundle. The
1630 1642 # presence of this flag indicates the client supports clone bundles. This
1631 1643 # will enable the server to treat clients that support clone bundles
1632 1644 # differently from those that don't.
1633 1645 if (pullop.remote.capable('clonebundles')
1634 1646 and pullop.heads is None and list(pullop.common) == [nullid]):
1635 1647 kwargs['cbattempted'] = pullop.clonebundleattempted
1636 1648
1637 1649 if streaming:
1638 1650 pullop.repo.ui.status(_('streaming all changes\n'))
1639 1651 elif not pullop.fetch:
1640 1652 pullop.repo.ui.status(_("no changes found\n"))
1641 1653 pullop.cgresult = 0
1642 1654 else:
1643 1655 if pullop.heads is None and list(pullop.common) == [nullid]:
1644 1656 pullop.repo.ui.status(_("requesting all changes\n"))
1645 1657 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1646 1658 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1647 1659 if obsolete.commonversion(remoteversions) is not None:
1648 1660 kwargs['obsmarkers'] = True
1649 1661 pullop.stepsdone.add('obsmarkers')
1650 1662 _pullbundle2extraprepare(pullop, kwargs)
1651 1663
1652 1664 with pullop.remote.commandexecutor() as e:
1653 1665 args = dict(kwargs)
1654 1666 args['source'] = 'pull'
1655 1667 bundle = e.callcommand('getbundle', args).result()
1656 1668
1657 1669 try:
1658 1670 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
1659 1671 source='pull')
1660 1672 op.modes['bookmarks'] = 'records'
1661 1673 bundle2.processbundle(pullop.repo, bundle, op=op)
1662 1674 except bundle2.AbortFromPart as exc:
1663 1675 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1664 1676 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1665 1677 except error.BundleValueError as exc:
1666 1678 raise error.Abort(_('missing support for %s') % exc)
1667 1679
1668 1680 if pullop.fetch:
1669 1681 pullop.cgresult = bundle2.combinechangegroupresults(op)
1670 1682
1671 1683 # processing phases change
1672 1684 for namespace, value in op.records['listkeys']:
1673 1685 if namespace == 'phases':
1674 1686 _pullapplyphases(pullop, value)
1675 1687
1676 1688 # processing bookmark update
1677 1689 if bookmarksrequested:
1678 1690 books = {}
1679 1691 for record in op.records['bookmarks']:
1680 1692 books[record['bookmark']] = record["node"]
1681 1693 pullop.remotebookmarks = books
1682 1694 else:
1683 1695 for namespace, value in op.records['listkeys']:
1684 1696 if namespace == 'bookmarks':
1685 1697 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1686 1698
1687 1699 # bookmark data were either already there or pulled in the bundle
1688 1700 if pullop.remotebookmarks is not None:
1689 1701 _pullbookmarks(pullop)
1690 1702
1691 1703 def _pullbundle2extraprepare(pullop, kwargs):
1692 1704 """hook function so that extensions can extend the getbundle call"""
1693 1705
1694 1706 def _pullchangeset(pullop):
1695 1707 """pull changeset from unbundle into the local repo"""
1696 1708 # We delay the open of the transaction as late as possible so we
1697 1709 # don't open transaction for nothing or you break future useful
1698 1710 # rollback call
1699 1711 if 'changegroup' in pullop.stepsdone:
1700 1712 return
1701 1713 pullop.stepsdone.add('changegroup')
1702 1714 if not pullop.fetch:
1703 1715 pullop.repo.ui.status(_("no changes found\n"))
1704 1716 pullop.cgresult = 0
1705 1717 return
1706 1718 tr = pullop.gettransaction()
1707 1719 if pullop.heads is None and list(pullop.common) == [nullid]:
1708 1720 pullop.repo.ui.status(_("requesting all changes\n"))
1709 1721 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1710 1722 # issue1320, avoid a race if remote changed after discovery
1711 1723 pullop.heads = pullop.rheads
1712 1724
1713 1725 if pullop.remote.capable('getbundle'):
1714 1726 # TODO: get bundlecaps from remote
1715 1727 cg = pullop.remote.getbundle('pull', common=pullop.common,
1716 1728 heads=pullop.heads or pullop.rheads)
1717 1729 elif pullop.heads is None:
1718 1730 with pullop.remote.commandexecutor() as e:
1719 1731 cg = e.callcommand('changegroup', {
1720 1732 'nodes': pullop.fetch,
1721 1733 'source': 'pull',
1722 1734 }).result()
1723 1735
1724 1736 elif not pullop.remote.capable('changegroupsubset'):
1725 1737 raise error.Abort(_("partial pull cannot be done because "
1726 1738 "other repository doesn't support "
1727 1739 "changegroupsubset."))
1728 1740 else:
1729 1741 with pullop.remote.commandexecutor() as e:
1730 1742 cg = e.callcommand('changegroupsubset', {
1731 1743 'bases': pullop.fetch,
1732 1744 'heads': pullop.heads,
1733 1745 'source': 'pull',
1734 1746 }).result()
1735 1747
1736 1748 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1737 1749 pullop.remote.url())
1738 1750 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1739 1751
1740 1752 def _pullphase(pullop):
1741 1753 # Get remote phases data from remote
1742 1754 if 'phases' in pullop.stepsdone:
1743 1755 return
1744 remotephases = pullop.remote.listkeys('phases')
1756 remotephases = listkeys(pullop.remote, 'phases')
1745 1757 _pullapplyphases(pullop, remotephases)
1746 1758
1747 1759 def _pullapplyphases(pullop, remotephases):
1748 1760 """apply phase movement from observed remote state"""
1749 1761 if 'phases' in pullop.stepsdone:
1750 1762 return
1751 1763 pullop.stepsdone.add('phases')
1752 1764 publishing = bool(remotephases.get('publishing', False))
1753 1765 if remotephases and not publishing:
1754 1766 # remote is new and non-publishing
1755 1767 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1756 1768 pullop.pulledsubset,
1757 1769 remotephases)
1758 1770 dheads = pullop.pulledsubset
1759 1771 else:
1760 1772 # Remote is old or publishing all common changesets
1761 1773 # should be seen as public
1762 1774 pheads = pullop.pulledsubset
1763 1775 dheads = []
1764 1776 unfi = pullop.repo.unfiltered()
1765 1777 phase = unfi._phasecache.phase
1766 1778 rev = unfi.changelog.nodemap.get
1767 1779 public = phases.public
1768 1780 draft = phases.draft
1769 1781
1770 1782 # exclude changesets already public locally and update the others
1771 1783 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1772 1784 if pheads:
1773 1785 tr = pullop.gettransaction()
1774 1786 phases.advanceboundary(pullop.repo, tr, public, pheads)
1775 1787
1776 1788 # exclude changesets already draft locally and update the others
1777 1789 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1778 1790 if dheads:
1779 1791 tr = pullop.gettransaction()
1780 1792 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1781 1793
1782 1794 def _pullbookmarks(pullop):
1783 1795 """process the remote bookmark information to update the local one"""
1784 1796 if 'bookmarks' in pullop.stepsdone:
1785 1797 return
1786 1798 pullop.stepsdone.add('bookmarks')
1787 1799 repo = pullop.repo
1788 1800 remotebookmarks = pullop.remotebookmarks
1789 1801 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1790 1802 pullop.remote.url(),
1791 1803 pullop.gettransaction,
1792 1804 explicit=pullop.explicitbookmarks)
1793 1805
1794 1806 def _pullobsolete(pullop):
1795 1807 """utility function to pull obsolete markers from a remote
1796 1808
1797 1809 The `gettransaction` is function that return the pull transaction, creating
1798 1810 one if necessary. We return the transaction to inform the calling code that
1799 1811 a new transaction have been created (when applicable).
1800 1812
1801 1813 Exists mostly to allow overriding for experimentation purpose"""
1802 1814 if 'obsmarkers' in pullop.stepsdone:
1803 1815 return
1804 1816 pullop.stepsdone.add('obsmarkers')
1805 1817 tr = None
1806 1818 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1807 1819 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1808 remoteobs = pullop.remote.listkeys('obsolete')
1820 remoteobs = listkeys(pullop.remote, 'obsolete')
1809 1821 if 'dump0' in remoteobs:
1810 1822 tr = pullop.gettransaction()
1811 1823 markers = []
1812 1824 for key in sorted(remoteobs, reverse=True):
1813 1825 if key.startswith('dump'):
1814 1826 data = util.b85decode(remoteobs[key])
1815 1827 version, newmarks = obsolete._readmarkers(data)
1816 1828 markers += newmarks
1817 1829 if markers:
1818 1830 pullop.repo.obsstore.add(tr, markers)
1819 1831 pullop.repo.invalidatevolatilesets()
1820 1832 return tr
1821 1833
1822 1834 def caps20to10(repo, role):
1823 1835 """return a set with appropriate options to use bundle20 during getbundle"""
1824 1836 caps = {'HG20'}
1825 1837 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
1826 1838 caps.add('bundle2=' + urlreq.quote(capsblob))
1827 1839 return caps
1828 1840
1829 1841 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1830 1842 getbundle2partsorder = []
1831 1843
1832 1844 # Mapping between step name and function
1833 1845 #
1834 1846 # This exists to help extensions wrap steps if necessary
1835 1847 getbundle2partsmapping = {}
1836 1848
1837 1849 def getbundle2partsgenerator(stepname, idx=None):
1838 1850 """decorator for function generating bundle2 part for getbundle
1839 1851
1840 1852 The function is added to the step -> function mapping and appended to the
1841 1853 list of steps. Beware that decorated functions will be added in order
1842 1854 (this may matter).
1843 1855
1844 1856 You can only use this decorator for new steps, if you want to wrap a step
1845 1857 from an extension, attack the getbundle2partsmapping dictionary directly."""
1846 1858 def dec(func):
1847 1859 assert stepname not in getbundle2partsmapping
1848 1860 getbundle2partsmapping[stepname] = func
1849 1861 if idx is None:
1850 1862 getbundle2partsorder.append(stepname)
1851 1863 else:
1852 1864 getbundle2partsorder.insert(idx, stepname)
1853 1865 return func
1854 1866 return dec
1855 1867
1856 1868 def bundle2requested(bundlecaps):
1857 1869 if bundlecaps is not None:
1858 1870 return any(cap.startswith('HG2') for cap in bundlecaps)
1859 1871 return False
1860 1872
1861 1873 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1862 1874 **kwargs):
1863 1875 """Return chunks constituting a bundle's raw data.
1864 1876
1865 1877 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1866 1878 passed.
1867 1879
1868 1880 Returns a 2-tuple of a dict with metadata about the generated bundle
1869 1881 and an iterator over raw chunks (of varying sizes).
1870 1882 """
1871 1883 kwargs = pycompat.byteskwargs(kwargs)
1872 1884 info = {}
1873 1885 usebundle2 = bundle2requested(bundlecaps)
1874 1886 # bundle10 case
1875 1887 if not usebundle2:
1876 1888 if bundlecaps and not kwargs.get('cg', True):
1877 1889 raise ValueError(_('request for bundle10 must include changegroup'))
1878 1890
1879 1891 if kwargs:
1880 1892 raise ValueError(_('unsupported getbundle arguments: %s')
1881 1893 % ', '.join(sorted(kwargs.keys())))
1882 1894 outgoing = _computeoutgoing(repo, heads, common)
1883 1895 info['bundleversion'] = 1
1884 1896 return info, changegroup.makestream(repo, outgoing, '01', source,
1885 1897 bundlecaps=bundlecaps)
1886 1898
1887 1899 # bundle20 case
1888 1900 info['bundleversion'] = 2
1889 1901 b2caps = {}
1890 1902 for bcaps in bundlecaps:
1891 1903 if bcaps.startswith('bundle2='):
1892 1904 blob = urlreq.unquote(bcaps[len('bundle2='):])
1893 1905 b2caps.update(bundle2.decodecaps(blob))
1894 1906 bundler = bundle2.bundle20(repo.ui, b2caps)
1895 1907
1896 1908 kwargs['heads'] = heads
1897 1909 kwargs['common'] = common
1898 1910
1899 1911 for name in getbundle2partsorder:
1900 1912 func = getbundle2partsmapping[name]
1901 1913 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1902 1914 **pycompat.strkwargs(kwargs))
1903 1915
1904 1916 info['prefercompressed'] = bundler.prefercompressed
1905 1917
1906 1918 return info, bundler.getchunks()
1907 1919
1908 1920 @getbundle2partsgenerator('stream2')
1909 1921 def _getbundlestream2(bundler, repo, *args, **kwargs):
1910 1922 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
1911 1923
1912 1924 @getbundle2partsgenerator('changegroup')
1913 1925 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1914 1926 b2caps=None, heads=None, common=None, **kwargs):
1915 1927 """add a changegroup part to the requested bundle"""
1916 1928 cgstream = None
1917 1929 if kwargs.get(r'cg', True):
1918 1930 # build changegroup bundle here.
1919 1931 version = '01'
1920 1932 cgversions = b2caps.get('changegroup')
1921 1933 if cgversions: # 3.1 and 3.2 ship with an empty value
1922 1934 cgversions = [v for v in cgversions
1923 1935 if v in changegroup.supportedoutgoingversions(repo)]
1924 1936 if not cgversions:
1925 1937 raise ValueError(_('no common changegroup version'))
1926 1938 version = max(cgversions)
1927 1939 outgoing = _computeoutgoing(repo, heads, common)
1928 1940 if outgoing.missing:
1929 1941 cgstream = changegroup.makestream(repo, outgoing, version, source,
1930 1942 bundlecaps=bundlecaps)
1931 1943
1932 1944 if cgstream:
1933 1945 part = bundler.newpart('changegroup', data=cgstream)
1934 1946 if cgversions:
1935 1947 part.addparam('version', version)
1936 1948 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1937 1949 mandatory=False)
1938 1950 if 'treemanifest' in repo.requirements:
1939 1951 part.addparam('treemanifest', '1')
1940 1952
1941 1953 @getbundle2partsgenerator('bookmarks')
1942 1954 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
1943 1955 b2caps=None, **kwargs):
1944 1956 """add a bookmark part to the requested bundle"""
1945 1957 if not kwargs.get(r'bookmarks', False):
1946 1958 return
1947 1959 if 'bookmarks' not in b2caps:
1948 1960 raise ValueError(_('no common bookmarks exchange method'))
1949 1961 books = bookmod.listbinbookmarks(repo)
1950 1962 data = bookmod.binaryencode(books)
1951 1963 if data:
1952 1964 bundler.newpart('bookmarks', data=data)
1953 1965
1954 1966 @getbundle2partsgenerator('listkeys')
1955 1967 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1956 1968 b2caps=None, **kwargs):
1957 1969 """add parts containing listkeys namespaces to the requested bundle"""
1958 1970 listkeys = kwargs.get(r'listkeys', ())
1959 1971 for namespace in listkeys:
1960 1972 part = bundler.newpart('listkeys')
1961 1973 part.addparam('namespace', namespace)
1962 1974 keys = repo.listkeys(namespace).items()
1963 1975 part.data = pushkey.encodekeys(keys)
1964 1976
1965 1977 @getbundle2partsgenerator('obsmarkers')
1966 1978 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1967 1979 b2caps=None, heads=None, **kwargs):
1968 1980 """add an obsolescence markers part to the requested bundle"""
1969 1981 if kwargs.get(r'obsmarkers', False):
1970 1982 if heads is None:
1971 1983 heads = repo.heads()
1972 1984 subset = [c.node() for c in repo.set('::%ln', heads)]
1973 1985 markers = repo.obsstore.relevantmarkers(subset)
1974 1986 markers = sorted(markers)
1975 1987 bundle2.buildobsmarkerspart(bundler, markers)
1976 1988
1977 1989 @getbundle2partsgenerator('phases')
1978 1990 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1979 1991 b2caps=None, heads=None, **kwargs):
1980 1992 """add phase heads part to the requested bundle"""
1981 1993 if kwargs.get(r'phases', False):
1982 1994 if not 'heads' in b2caps.get('phases'):
1983 1995 raise ValueError(_('no common phases exchange method'))
1984 1996 if heads is None:
1985 1997 heads = repo.heads()
1986 1998
1987 1999 headsbyphase = collections.defaultdict(set)
1988 2000 if repo.publishing():
1989 2001 headsbyphase[phases.public] = heads
1990 2002 else:
1991 2003 # find the appropriate heads to move
1992 2004
1993 2005 phase = repo._phasecache.phase
1994 2006 node = repo.changelog.node
1995 2007 rev = repo.changelog.rev
1996 2008 for h in heads:
1997 2009 headsbyphase[phase(repo, rev(h))].add(h)
1998 2010 seenphases = list(headsbyphase.keys())
1999 2011
2000 2012 # We do not handle anything but public and draft phase for now)
2001 2013 if seenphases:
2002 2014 assert max(seenphases) <= phases.draft
2003 2015
2004 2016 # if client is pulling non-public changesets, we need to find
2005 2017 # intermediate public heads.
2006 2018 draftheads = headsbyphase.get(phases.draft, set())
2007 2019 if draftheads:
2008 2020 publicheads = headsbyphase.get(phases.public, set())
2009 2021
2010 2022 revset = 'heads(only(%ln, %ln) and public())'
2011 2023 extraheads = repo.revs(revset, draftheads, publicheads)
2012 2024 for r in extraheads:
2013 2025 headsbyphase[phases.public].add(node(r))
2014 2026
2015 2027 # transform data in a format used by the encoding function
2016 2028 phasemapping = []
2017 2029 for phase in phases.allphases:
2018 2030 phasemapping.append(sorted(headsbyphase[phase]))
2019 2031
2020 2032 # generate the actual part
2021 2033 phasedata = phases.binaryencode(phasemapping)
2022 2034 bundler.newpart('phase-heads', data=phasedata)
2023 2035
2024 2036 @getbundle2partsgenerator('hgtagsfnodes')
2025 2037 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
2026 2038 b2caps=None, heads=None, common=None,
2027 2039 **kwargs):
2028 2040 """Transfer the .hgtags filenodes mapping.
2029 2041
2030 2042 Only values for heads in this bundle will be transferred.
2031 2043
2032 2044 The part data consists of pairs of 20 byte changeset node and .hgtags
2033 2045 filenodes raw values.
2034 2046 """
2035 2047 # Don't send unless:
2036 2048 # - changeset are being exchanged,
2037 2049 # - the client supports it.
2038 2050 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
2039 2051 return
2040 2052
2041 2053 outgoing = _computeoutgoing(repo, heads, common)
2042 2054 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2043 2055
2044 2056 @getbundle2partsgenerator('cache:rev-branch-cache')
2045 2057 def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
2046 2058 b2caps=None, heads=None, common=None,
2047 2059 **kwargs):
2048 2060 """Transfer the rev-branch-cache mapping
2049 2061
2050 2062 The payload is a series of data related to each branch
2051 2063
2052 2064 1) branch name length
2053 2065 2) number of open heads
2054 2066 3) number of closed heads
2055 2067 4) open heads nodes
2056 2068 5) closed heads nodes
2057 2069 """
2058 2070 # Don't send unless:
2059 2071 # - changeset are being exchanged,
2060 2072 # - the client supports it.
2061 2073 if not (kwargs.get(r'cg', True)) or 'rev-branch-cache' not in b2caps:
2062 2074 return
2063 2075 outgoing = _computeoutgoing(repo, heads, common)
2064 2076 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2065 2077
2066 2078 def check_heads(repo, their_heads, context):
2067 2079 """check if the heads of a repo have been modified
2068 2080
2069 2081 Used by peer for unbundling.
2070 2082 """
2071 2083 heads = repo.heads()
2072 2084 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
2073 2085 if not (their_heads == ['force'] or their_heads == heads or
2074 2086 their_heads == ['hashed', heads_hash]):
2075 2087 # someone else committed/pushed/unbundled while we
2076 2088 # were transferring data
2077 2089 raise error.PushRaced('repository changed while %s - '
2078 2090 'please try again' % context)
2079 2091
2080 2092 def unbundle(repo, cg, heads, source, url):
2081 2093 """Apply a bundle to a repo.
2082 2094
2083 2095 this function makes sure the repo is locked during the application and have
2084 2096 mechanism to check that no push race occurred between the creation of the
2085 2097 bundle and its application.
2086 2098
2087 2099 If the push was raced as PushRaced exception is raised."""
2088 2100 r = 0
2089 2101 # need a transaction when processing a bundle2 stream
2090 2102 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2091 2103 lockandtr = [None, None, None]
2092 2104 recordout = None
2093 2105 # quick fix for output mismatch with bundle2 in 3.4
2094 2106 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
2095 2107 if url.startswith('remote:http:') or url.startswith('remote:https:'):
2096 2108 captureoutput = True
2097 2109 try:
2098 2110 # note: outside bundle1, 'heads' is expected to be empty and this
2099 2111 # 'check_heads' call wil be a no-op
2100 2112 check_heads(repo, heads, 'uploading changes')
2101 2113 # push can proceed
2102 2114 if not isinstance(cg, bundle2.unbundle20):
2103 2115 # legacy case: bundle1 (changegroup 01)
2104 2116 txnname = "\n".join([source, util.hidepassword(url)])
2105 2117 with repo.lock(), repo.transaction(txnname) as tr:
2106 2118 op = bundle2.applybundle(repo, cg, tr, source, url)
2107 2119 r = bundle2.combinechangegroupresults(op)
2108 2120 else:
2109 2121 r = None
2110 2122 try:
2111 2123 def gettransaction():
2112 2124 if not lockandtr[2]:
2113 2125 lockandtr[0] = repo.wlock()
2114 2126 lockandtr[1] = repo.lock()
2115 2127 lockandtr[2] = repo.transaction(source)
2116 2128 lockandtr[2].hookargs['source'] = source
2117 2129 lockandtr[2].hookargs['url'] = url
2118 2130 lockandtr[2].hookargs['bundle2'] = '1'
2119 2131 return lockandtr[2]
2120 2132
2121 2133 # Do greedy locking by default until we're satisfied with lazy
2122 2134 # locking.
2123 2135 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
2124 2136 gettransaction()
2125 2137
2126 2138 op = bundle2.bundleoperation(repo, gettransaction,
2127 2139 captureoutput=captureoutput,
2128 2140 source='push')
2129 2141 try:
2130 2142 op = bundle2.processbundle(repo, cg, op=op)
2131 2143 finally:
2132 2144 r = op.reply
2133 2145 if captureoutput and r is not None:
2134 2146 repo.ui.pushbuffer(error=True, subproc=True)
2135 2147 def recordout(output):
2136 2148 r.newpart('output', data=output, mandatory=False)
2137 2149 if lockandtr[2] is not None:
2138 2150 lockandtr[2].close()
2139 2151 except BaseException as exc:
2140 2152 exc.duringunbundle2 = True
2141 2153 if captureoutput and r is not None:
2142 2154 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2143 2155 def recordout(output):
2144 2156 part = bundle2.bundlepart('output', data=output,
2145 2157 mandatory=False)
2146 2158 parts.append(part)
2147 2159 raise
2148 2160 finally:
2149 2161 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2150 2162 if recordout is not None:
2151 2163 recordout(repo.ui.popbuffer())
2152 2164 return r
2153 2165
2154 2166 def _maybeapplyclonebundle(pullop):
2155 2167 """Apply a clone bundle from a remote, if possible."""
2156 2168
2157 2169 repo = pullop.repo
2158 2170 remote = pullop.remote
2159 2171
2160 2172 if not repo.ui.configbool('ui', 'clonebundles'):
2161 2173 return
2162 2174
2163 2175 # Only run if local repo is empty.
2164 2176 if len(repo):
2165 2177 return
2166 2178
2167 2179 if pullop.heads:
2168 2180 return
2169 2181
2170 2182 if not remote.capable('clonebundles'):
2171 2183 return
2172 2184
2173 2185 with remote.commandexecutor() as e:
2174 2186 res = e.callcommand('clonebundles', {}).result()
2175 2187
2176 2188 # If we call the wire protocol command, that's good enough to record the
2177 2189 # attempt.
2178 2190 pullop.clonebundleattempted = True
2179 2191
2180 2192 entries = parseclonebundlesmanifest(repo, res)
2181 2193 if not entries:
2182 2194 repo.ui.note(_('no clone bundles available on remote; '
2183 2195 'falling back to regular clone\n'))
2184 2196 return
2185 2197
2186 2198 entries = filterclonebundleentries(
2187 2199 repo, entries, streamclonerequested=pullop.streamclonerequested)
2188 2200
2189 2201 if not entries:
2190 2202 # There is a thundering herd concern here. However, if a server
2191 2203 # operator doesn't advertise bundles appropriate for its clients,
2192 2204 # they deserve what's coming. Furthermore, from a client's
2193 2205 # perspective, no automatic fallback would mean not being able to
2194 2206 # clone!
2195 2207 repo.ui.warn(_('no compatible clone bundles available on server; '
2196 2208 'falling back to regular clone\n'))
2197 2209 repo.ui.warn(_('(you may want to report this to the server '
2198 2210 'operator)\n'))
2199 2211 return
2200 2212
2201 2213 entries = sortclonebundleentries(repo.ui, entries)
2202 2214
2203 2215 url = entries[0]['URL']
2204 2216 repo.ui.status(_('applying clone bundle from %s\n') % url)
2205 2217 if trypullbundlefromurl(repo.ui, repo, url):
2206 2218 repo.ui.status(_('finished applying clone bundle\n'))
2207 2219 # Bundle failed.
2208 2220 #
2209 2221 # We abort by default to avoid the thundering herd of
2210 2222 # clients flooding a server that was expecting expensive
2211 2223 # clone load to be offloaded.
2212 2224 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2213 2225 repo.ui.warn(_('falling back to normal clone\n'))
2214 2226 else:
2215 2227 raise error.Abort(_('error applying bundle'),
2216 2228 hint=_('if this error persists, consider contacting '
2217 2229 'the server operator or disable clone '
2218 2230 'bundles via '
2219 2231 '"--config ui.clonebundles=false"'))
2220 2232
2221 2233 def parseclonebundlesmanifest(repo, s):
2222 2234 """Parses the raw text of a clone bundles manifest.
2223 2235
2224 2236 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2225 2237 to the URL and other keys are the attributes for the entry.
2226 2238 """
2227 2239 m = []
2228 2240 for line in s.splitlines():
2229 2241 fields = line.split()
2230 2242 if not fields:
2231 2243 continue
2232 2244 attrs = {'URL': fields[0]}
2233 2245 for rawattr in fields[1:]:
2234 2246 key, value = rawattr.split('=', 1)
2235 2247 key = urlreq.unquote(key)
2236 2248 value = urlreq.unquote(value)
2237 2249 attrs[key] = value
2238 2250
2239 2251 # Parse BUNDLESPEC into components. This makes client-side
2240 2252 # preferences easier to specify since you can prefer a single
2241 2253 # component of the BUNDLESPEC.
2242 2254 if key == 'BUNDLESPEC':
2243 2255 try:
2244 2256 bundlespec = parsebundlespec(repo, value,
2245 2257 externalnames=True)
2246 2258 attrs['COMPRESSION'] = bundlespec.compression
2247 2259 attrs['VERSION'] = bundlespec.version
2248 2260 except error.InvalidBundleSpecification:
2249 2261 pass
2250 2262 except error.UnsupportedBundleSpecification:
2251 2263 pass
2252 2264
2253 2265 m.append(attrs)
2254 2266
2255 2267 return m
2256 2268
2257 2269 def isstreamclonespec(bundlespec):
2258 2270 # Stream clone v1
2259 2271 if (bundlespec.compression == 'UN' and bundlespec.version == 's1'):
2260 2272 return True
2261 2273
2262 2274 # Stream clone v2
2263 2275 if (bundlespec.compression == 'UN' and bundlespec.version == '02' and \
2264 2276 bundlespec.contentopts.get('streamv2')):
2265 2277 return True
2266 2278
2267 2279 return False
2268 2280
2269 2281 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2270 2282 """Remove incompatible clone bundle manifest entries.
2271 2283
2272 2284 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2273 2285 and returns a new list consisting of only the entries that this client
2274 2286 should be able to apply.
2275 2287
2276 2288 There is no guarantee we'll be able to apply all returned entries because
2277 2289 the metadata we use to filter on may be missing or wrong.
2278 2290 """
2279 2291 newentries = []
2280 2292 for entry in entries:
2281 2293 spec = entry.get('BUNDLESPEC')
2282 2294 if spec:
2283 2295 try:
2284 2296 bundlespec = parsebundlespec(repo, spec, strict=True)
2285 2297
2286 2298 # If a stream clone was requested, filter out non-streamclone
2287 2299 # entries.
2288 2300 if streamclonerequested and not isstreamclonespec(bundlespec):
2289 2301 repo.ui.debug('filtering %s because not a stream clone\n' %
2290 2302 entry['URL'])
2291 2303 continue
2292 2304
2293 2305 except error.InvalidBundleSpecification as e:
2294 2306 repo.ui.debug(stringutil.forcebytestr(e) + '\n')
2295 2307 continue
2296 2308 except error.UnsupportedBundleSpecification as e:
2297 2309 repo.ui.debug('filtering %s because unsupported bundle '
2298 2310 'spec: %s\n' % (
2299 2311 entry['URL'], stringutil.forcebytestr(e)))
2300 2312 continue
2301 2313 # If we don't have a spec and requested a stream clone, we don't know
2302 2314 # what the entry is so don't attempt to apply it.
2303 2315 elif streamclonerequested:
2304 2316 repo.ui.debug('filtering %s because cannot determine if a stream '
2305 2317 'clone bundle\n' % entry['URL'])
2306 2318 continue
2307 2319
2308 2320 if 'REQUIRESNI' in entry and not sslutil.hassni:
2309 2321 repo.ui.debug('filtering %s because SNI not supported\n' %
2310 2322 entry['URL'])
2311 2323 continue
2312 2324
2313 2325 newentries.append(entry)
2314 2326
2315 2327 return newentries
2316 2328
2317 2329 class clonebundleentry(object):
2318 2330 """Represents an item in a clone bundles manifest.
2319 2331
2320 2332 This rich class is needed to support sorting since sorted() in Python 3
2321 2333 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2322 2334 won't work.
2323 2335 """
2324 2336
2325 2337 def __init__(self, value, prefers):
2326 2338 self.value = value
2327 2339 self.prefers = prefers
2328 2340
2329 2341 def _cmp(self, other):
2330 2342 for prefkey, prefvalue in self.prefers:
2331 2343 avalue = self.value.get(prefkey)
2332 2344 bvalue = other.value.get(prefkey)
2333 2345
2334 2346 # Special case for b missing attribute and a matches exactly.
2335 2347 if avalue is not None and bvalue is None and avalue == prefvalue:
2336 2348 return -1
2337 2349
2338 2350 # Special case for a missing attribute and b matches exactly.
2339 2351 if bvalue is not None and avalue is None and bvalue == prefvalue:
2340 2352 return 1
2341 2353
2342 2354 # We can't compare unless attribute present on both.
2343 2355 if avalue is None or bvalue is None:
2344 2356 continue
2345 2357
2346 2358 # Same values should fall back to next attribute.
2347 2359 if avalue == bvalue:
2348 2360 continue
2349 2361
2350 2362 # Exact matches come first.
2351 2363 if avalue == prefvalue:
2352 2364 return -1
2353 2365 if bvalue == prefvalue:
2354 2366 return 1
2355 2367
2356 2368 # Fall back to next attribute.
2357 2369 continue
2358 2370
2359 2371 # If we got here we couldn't sort by attributes and prefers. Fall
2360 2372 # back to index order.
2361 2373 return 0
2362 2374
2363 2375 def __lt__(self, other):
2364 2376 return self._cmp(other) < 0
2365 2377
2366 2378 def __gt__(self, other):
2367 2379 return self._cmp(other) > 0
2368 2380
2369 2381 def __eq__(self, other):
2370 2382 return self._cmp(other) == 0
2371 2383
2372 2384 def __le__(self, other):
2373 2385 return self._cmp(other) <= 0
2374 2386
2375 2387 def __ge__(self, other):
2376 2388 return self._cmp(other) >= 0
2377 2389
2378 2390 def __ne__(self, other):
2379 2391 return self._cmp(other) != 0
2380 2392
2381 2393 def sortclonebundleentries(ui, entries):
2382 2394 prefers = ui.configlist('ui', 'clonebundleprefers')
2383 2395 if not prefers:
2384 2396 return list(entries)
2385 2397
2386 2398 prefers = [p.split('=', 1) for p in prefers]
2387 2399
2388 2400 items = sorted(clonebundleentry(v, prefers) for v in entries)
2389 2401 return [i.value for i in items]
2390 2402
2391 2403 def trypullbundlefromurl(ui, repo, url):
2392 2404 """Attempt to apply a bundle from a URL."""
2393 2405 with repo.lock(), repo.transaction('bundleurl') as tr:
2394 2406 try:
2395 2407 fh = urlmod.open(ui, url)
2396 2408 cg = readbundle(ui, fh, 'stream')
2397 2409
2398 2410 if isinstance(cg, streamclone.streamcloneapplier):
2399 2411 cg.apply(repo)
2400 2412 else:
2401 2413 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2402 2414 return True
2403 2415 except urlerr.httperror as e:
2404 2416 ui.warn(_('HTTP error fetching bundle: %s\n') %
2405 2417 stringutil.forcebytestr(e))
2406 2418 except urlerr.urlerror as e:
2407 2419 ui.warn(_('error fetching bundle: %s\n') %
2408 2420 stringutil.forcebytestr(e.reason))
2409 2421
2410 2422 return False
General Comments 0
You need to be logged in to leave comments. Login now