##// END OF EJS Templates
exchange: don't use dagutil...
Gregory Szorc -
r39194:b0c73866 default
parent child Browse files
Show More
@@ -1,2623 +1,2623 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import hashlib
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 bin,
16 16 hex,
17 17 nullid,
18 18 nullrev,
19 19 )
20 20 from .thirdparty import (
21 21 attr,
22 22 )
23 23 from . import (
24 24 bookmarks as bookmod,
25 25 bundle2,
26 26 changegroup,
27 dagutil,
28 27 discovery,
29 28 error,
30 29 lock as lockmod,
31 30 logexchange,
32 31 narrowspec,
33 32 obsolete,
34 33 phases,
35 34 pushkey,
36 35 pycompat,
37 36 repository,
38 37 scmutil,
39 38 sslutil,
40 39 streamclone,
41 40 url as urlmod,
42 41 util,
43 42 )
44 43 from .utils import (
45 44 stringutil,
46 45 )
47 46
48 47 urlerr = util.urlerr
49 48 urlreq = util.urlreq
50 49
51 50 _NARROWACL_SECTION = 'narrowhgacl'
52 51
53 52 # Maps bundle version human names to changegroup versions.
54 53 _bundlespeccgversions = {'v1': '01',
55 54 'v2': '02',
56 55 'packed1': 's1',
57 56 'bundle2': '02', #legacy
58 57 }
59 58
60 59 # Maps bundle version with content opts to choose which part to bundle
61 60 _bundlespeccontentopts = {
62 61 'v1': {
63 62 'changegroup': True,
64 63 'cg.version': '01',
65 64 'obsolescence': False,
66 65 'phases': False,
67 66 'tagsfnodescache': False,
68 67 'revbranchcache': False
69 68 },
70 69 'v2': {
71 70 'changegroup': True,
72 71 'cg.version': '02',
73 72 'obsolescence': False,
74 73 'phases': False,
75 74 'tagsfnodescache': True,
76 75 'revbranchcache': True
77 76 },
78 77 'packed1' : {
79 78 'cg.version': 's1'
80 79 }
81 80 }
82 81 _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
83 82
84 83 _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
85 84 "tagsfnodescache": False,
86 85 "revbranchcache": False}}
87 86
88 87 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
89 88 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
90 89
91 90 @attr.s
92 91 class bundlespec(object):
93 92 compression = attr.ib()
94 93 wirecompression = attr.ib()
95 94 version = attr.ib()
96 95 wireversion = attr.ib()
97 96 params = attr.ib()
98 97 contentopts = attr.ib()
99 98
100 99 def parsebundlespec(repo, spec, strict=True):
101 100 """Parse a bundle string specification into parts.
102 101
103 102 Bundle specifications denote a well-defined bundle/exchange format.
104 103 The content of a given specification should not change over time in
105 104 order to ensure that bundles produced by a newer version of Mercurial are
106 105 readable from an older version.
107 106
108 107 The string currently has the form:
109 108
110 109 <compression>-<type>[;<parameter0>[;<parameter1>]]
111 110
112 111 Where <compression> is one of the supported compression formats
113 112 and <type> is (currently) a version string. A ";" can follow the type and
114 113 all text afterwards is interpreted as URI encoded, ";" delimited key=value
115 114 pairs.
116 115
117 116 If ``strict`` is True (the default) <compression> is required. Otherwise,
118 117 it is optional.
119 118
120 119 Returns a bundlespec object of (compression, version, parameters).
121 120 Compression will be ``None`` if not in strict mode and a compression isn't
122 121 defined.
123 122
124 123 An ``InvalidBundleSpecification`` is raised when the specification is
125 124 not syntactically well formed.
126 125
127 126 An ``UnsupportedBundleSpecification`` is raised when the compression or
128 127 bundle type/version is not recognized.
129 128
130 129 Note: this function will likely eventually return a more complex data
131 130 structure, including bundle2 part information.
132 131 """
133 132 def parseparams(s):
134 133 if ';' not in s:
135 134 return s, {}
136 135
137 136 params = {}
138 137 version, paramstr = s.split(';', 1)
139 138
140 139 for p in paramstr.split(';'):
141 140 if '=' not in p:
142 141 raise error.InvalidBundleSpecification(
143 142 _('invalid bundle specification: '
144 143 'missing "=" in parameter: %s') % p)
145 144
146 145 key, value = p.split('=', 1)
147 146 key = urlreq.unquote(key)
148 147 value = urlreq.unquote(value)
149 148 params[key] = value
150 149
151 150 return version, params
152 151
153 152
154 153 if strict and '-' not in spec:
155 154 raise error.InvalidBundleSpecification(
156 155 _('invalid bundle specification; '
157 156 'must be prefixed with compression: %s') % spec)
158 157
159 158 if '-' in spec:
160 159 compression, version = spec.split('-', 1)
161 160
162 161 if compression not in util.compengines.supportedbundlenames:
163 162 raise error.UnsupportedBundleSpecification(
164 163 _('%s compression is not supported') % compression)
165 164
166 165 version, params = parseparams(version)
167 166
168 167 if version not in _bundlespeccgversions:
169 168 raise error.UnsupportedBundleSpecification(
170 169 _('%s is not a recognized bundle version') % version)
171 170 else:
172 171 # Value could be just the compression or just the version, in which
173 172 # case some defaults are assumed (but only when not in strict mode).
174 173 assert not strict
175 174
176 175 spec, params = parseparams(spec)
177 176
178 177 if spec in util.compengines.supportedbundlenames:
179 178 compression = spec
180 179 version = 'v1'
181 180 # Generaldelta repos require v2.
182 181 if 'generaldelta' in repo.requirements:
183 182 version = 'v2'
184 183 # Modern compression engines require v2.
185 184 if compression not in _bundlespecv1compengines:
186 185 version = 'v2'
187 186 elif spec in _bundlespeccgversions:
188 187 if spec == 'packed1':
189 188 compression = 'none'
190 189 else:
191 190 compression = 'bzip2'
192 191 version = spec
193 192 else:
194 193 raise error.UnsupportedBundleSpecification(
195 194 _('%s is not a recognized bundle specification') % spec)
196 195
197 196 # Bundle version 1 only supports a known set of compression engines.
198 197 if version == 'v1' and compression not in _bundlespecv1compengines:
199 198 raise error.UnsupportedBundleSpecification(
200 199 _('compression engine %s is not supported on v1 bundles') %
201 200 compression)
202 201
203 202 # The specification for packed1 can optionally declare the data formats
204 203 # required to apply it. If we see this metadata, compare against what the
205 204 # repo supports and error if the bundle isn't compatible.
206 205 if version == 'packed1' and 'requirements' in params:
207 206 requirements = set(params['requirements'].split(','))
208 207 missingreqs = requirements - repo.supportedformats
209 208 if missingreqs:
210 209 raise error.UnsupportedBundleSpecification(
211 210 _('missing support for repository features: %s') %
212 211 ', '.join(sorted(missingreqs)))
213 212
214 213 # Compute contentopts based on the version
215 214 contentopts = _bundlespeccontentopts.get(version, {}).copy()
216 215
217 216 # Process the variants
218 217 if "stream" in params and params["stream"] == "v2":
219 218 variant = _bundlespecvariants["streamv2"]
220 219 contentopts.update(variant)
221 220
222 221 engine = util.compengines.forbundlename(compression)
223 222 compression, wirecompression = engine.bundletype()
224 223 wireversion = _bundlespeccgversions[version]
225 224
226 225 return bundlespec(compression, wirecompression, version, wireversion,
227 226 params, contentopts)
228 227
229 228 def readbundle(ui, fh, fname, vfs=None):
230 229 header = changegroup.readexactly(fh, 4)
231 230
232 231 alg = None
233 232 if not fname:
234 233 fname = "stream"
235 234 if not header.startswith('HG') and header.startswith('\0'):
236 235 fh = changegroup.headerlessfixup(fh, header)
237 236 header = "HG10"
238 237 alg = 'UN'
239 238 elif vfs:
240 239 fname = vfs.join(fname)
241 240
242 241 magic, version = header[0:2], header[2:4]
243 242
244 243 if magic != 'HG':
245 244 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
246 245 if version == '10':
247 246 if alg is None:
248 247 alg = changegroup.readexactly(fh, 2)
249 248 return changegroup.cg1unpacker(fh, alg)
250 249 elif version.startswith('2'):
251 250 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
252 251 elif version == 'S1':
253 252 return streamclone.streamcloneapplier(fh)
254 253 else:
255 254 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
256 255
257 256 def getbundlespec(ui, fh):
258 257 """Infer the bundlespec from a bundle file handle.
259 258
260 259 The input file handle is seeked and the original seek position is not
261 260 restored.
262 261 """
263 262 def speccompression(alg):
264 263 try:
265 264 return util.compengines.forbundletype(alg).bundletype()[0]
266 265 except KeyError:
267 266 return None
268 267
269 268 b = readbundle(ui, fh, None)
270 269 if isinstance(b, changegroup.cg1unpacker):
271 270 alg = b._type
272 271 if alg == '_truncatedBZ':
273 272 alg = 'BZ'
274 273 comp = speccompression(alg)
275 274 if not comp:
276 275 raise error.Abort(_('unknown compression algorithm: %s') % alg)
277 276 return '%s-v1' % comp
278 277 elif isinstance(b, bundle2.unbundle20):
279 278 if 'Compression' in b.params:
280 279 comp = speccompression(b.params['Compression'])
281 280 if not comp:
282 281 raise error.Abort(_('unknown compression algorithm: %s') % comp)
283 282 else:
284 283 comp = 'none'
285 284
286 285 version = None
287 286 for part in b.iterparts():
288 287 if part.type == 'changegroup':
289 288 version = part.params['version']
290 289 if version in ('01', '02'):
291 290 version = 'v2'
292 291 else:
293 292 raise error.Abort(_('changegroup version %s does not have '
294 293 'a known bundlespec') % version,
295 294 hint=_('try upgrading your Mercurial '
296 295 'client'))
297 296 elif part.type == 'stream2' and version is None:
298 297 # A stream2 part requires to be part of a v2 bundle
299 298 version = "v2"
300 299 requirements = urlreq.unquote(part.params['requirements'])
301 300 splitted = requirements.split()
302 301 params = bundle2._formatrequirementsparams(splitted)
303 302 return 'none-v2;stream=v2;%s' % params
304 303
305 304 if not version:
306 305 raise error.Abort(_('could not identify changegroup version in '
307 306 'bundle'))
308 307
309 308 return '%s-%s' % (comp, version)
310 309 elif isinstance(b, streamclone.streamcloneapplier):
311 310 requirements = streamclone.readbundle1header(fh)[2]
312 311 formatted = bundle2._formatrequirementsparams(requirements)
313 312 return 'none-packed1;%s' % formatted
314 313 else:
315 314 raise error.Abort(_('unknown bundle type: %s') % b)
316 315
317 316 def _computeoutgoing(repo, heads, common):
318 317 """Computes which revs are outgoing given a set of common
319 318 and a set of heads.
320 319
321 320 This is a separate function so extensions can have access to
322 321 the logic.
323 322
324 323 Returns a discovery.outgoing object.
325 324 """
326 325 cl = repo.changelog
327 326 if common:
328 327 hasnode = cl.hasnode
329 328 common = [n for n in common if hasnode(n)]
330 329 else:
331 330 common = [nullid]
332 331 if not heads:
333 332 heads = cl.heads()
334 333 return discovery.outgoing(repo, common, heads)
335 334
336 335 def _forcebundle1(op):
337 336 """return true if a pull/push must use bundle1
338 337
339 338 This function is used to allow testing of the older bundle version"""
340 339 ui = op.repo.ui
341 340 # The goal is this config is to allow developer to choose the bundle
342 341 # version used during exchanged. This is especially handy during test.
343 342 # Value is a list of bundle version to be picked from, highest version
344 343 # should be used.
345 344 #
346 345 # developer config: devel.legacy.exchange
347 346 exchange = ui.configlist('devel', 'legacy.exchange')
348 347 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
349 348 return forcebundle1 or not op.remote.capable('bundle2')
350 349
351 350 class pushoperation(object):
352 351 """A object that represent a single push operation
353 352
354 353 Its purpose is to carry push related state and very common operations.
355 354
356 355 A new pushoperation should be created at the beginning of each push and
357 356 discarded afterward.
358 357 """
359 358
360 359 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
361 360 bookmarks=(), pushvars=None):
362 361 # repo we push from
363 362 self.repo = repo
364 363 self.ui = repo.ui
365 364 # repo we push to
366 365 self.remote = remote
367 366 # force option provided
368 367 self.force = force
369 368 # revs to be pushed (None is "all")
370 369 self.revs = revs
371 370 # bookmark explicitly pushed
372 371 self.bookmarks = bookmarks
373 372 # allow push of new branch
374 373 self.newbranch = newbranch
375 374 # step already performed
376 375 # (used to check what steps have been already performed through bundle2)
377 376 self.stepsdone = set()
378 377 # Integer version of the changegroup push result
379 378 # - None means nothing to push
380 379 # - 0 means HTTP error
381 380 # - 1 means we pushed and remote head count is unchanged *or*
382 381 # we have outgoing changesets but refused to push
383 382 # - other values as described by addchangegroup()
384 383 self.cgresult = None
385 384 # Boolean value for the bookmark push
386 385 self.bkresult = None
387 386 # discover.outgoing object (contains common and outgoing data)
388 387 self.outgoing = None
389 388 # all remote topological heads before the push
390 389 self.remoteheads = None
391 390 # Details of the remote branch pre and post push
392 391 #
393 392 # mapping: {'branch': ([remoteheads],
394 393 # [newheads],
395 394 # [unsyncedheads],
396 395 # [discardedheads])}
397 396 # - branch: the branch name
398 397 # - remoteheads: the list of remote heads known locally
399 398 # None if the branch is new
400 399 # - newheads: the new remote heads (known locally) with outgoing pushed
401 400 # - unsyncedheads: the list of remote heads unknown locally.
402 401 # - discardedheads: the list of remote heads made obsolete by the push
403 402 self.pushbranchmap = None
404 403 # testable as a boolean indicating if any nodes are missing locally.
405 404 self.incoming = None
406 405 # summary of the remote phase situation
407 406 self.remotephases = None
408 407 # phases changes that must be pushed along side the changesets
409 408 self.outdatedphases = None
410 409 # phases changes that must be pushed if changeset push fails
411 410 self.fallbackoutdatedphases = None
412 411 # outgoing obsmarkers
413 412 self.outobsmarkers = set()
414 413 # outgoing bookmarks
415 414 self.outbookmarks = []
416 415 # transaction manager
417 416 self.trmanager = None
418 417 # map { pushkey partid -> callback handling failure}
419 418 # used to handle exception from mandatory pushkey part failure
420 419 self.pkfailcb = {}
421 420 # an iterable of pushvars or None
422 421 self.pushvars = pushvars
423 422
424 423 @util.propertycache
425 424 def futureheads(self):
426 425 """future remote heads if the changeset push succeeds"""
427 426 return self.outgoing.missingheads
428 427
429 428 @util.propertycache
430 429 def fallbackheads(self):
431 430 """future remote heads if the changeset push fails"""
432 431 if self.revs is None:
433 432 # not target to push, all common are relevant
434 433 return self.outgoing.commonheads
435 434 unfi = self.repo.unfiltered()
436 435 # I want cheads = heads(::missingheads and ::commonheads)
437 436 # (missingheads is revs with secret changeset filtered out)
438 437 #
439 438 # This can be expressed as:
440 439 # cheads = ( (missingheads and ::commonheads)
441 440 # + (commonheads and ::missingheads))"
442 441 # )
443 442 #
444 443 # while trying to push we already computed the following:
445 444 # common = (::commonheads)
446 445 # missing = ((commonheads::missingheads) - commonheads)
447 446 #
448 447 # We can pick:
449 448 # * missingheads part of common (::commonheads)
450 449 common = self.outgoing.common
451 450 nm = self.repo.changelog.nodemap
452 451 cheads = [node for node in self.revs if nm[node] in common]
453 452 # and
454 453 # * commonheads parents on missing
455 454 revset = unfi.set('%ln and parents(roots(%ln))',
456 455 self.outgoing.commonheads,
457 456 self.outgoing.missing)
458 457 cheads.extend(c.node() for c in revset)
459 458 return cheads
460 459
461 460 @property
462 461 def commonheads(self):
463 462 """set of all common heads after changeset bundle push"""
464 463 if self.cgresult:
465 464 return self.futureheads
466 465 else:
467 466 return self.fallbackheads
468 467
469 468 # mapping of message used when pushing bookmark
470 469 bookmsgmap = {'update': (_("updating bookmark %s\n"),
471 470 _('updating bookmark %s failed!\n')),
472 471 'export': (_("exporting bookmark %s\n"),
473 472 _('exporting bookmark %s failed!\n')),
474 473 'delete': (_("deleting remote bookmark %s\n"),
475 474 _('deleting remote bookmark %s failed!\n')),
476 475 }
477 476
478 477
479 478 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
480 479 opargs=None):
481 480 '''Push outgoing changesets (limited by revs) from a local
482 481 repository to remote. Return an integer:
483 482 - None means nothing to push
484 483 - 0 means HTTP error
485 484 - 1 means we pushed and remote head count is unchanged *or*
486 485 we have outgoing changesets but refused to push
487 486 - other values as described by addchangegroup()
488 487 '''
489 488 if opargs is None:
490 489 opargs = {}
491 490 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
492 491 **pycompat.strkwargs(opargs))
493 492 if pushop.remote.local():
494 493 missing = (set(pushop.repo.requirements)
495 494 - pushop.remote.local().supported)
496 495 if missing:
497 496 msg = _("required features are not"
498 497 " supported in the destination:"
499 498 " %s") % (', '.join(sorted(missing)))
500 499 raise error.Abort(msg)
501 500
502 501 if not pushop.remote.canpush():
503 502 raise error.Abort(_("destination does not support push"))
504 503
505 504 if not pushop.remote.capable('unbundle'):
506 505 raise error.Abort(_('cannot push: destination does not support the '
507 506 'unbundle wire protocol command'))
508 507
509 508 # get lock as we might write phase data
510 509 wlock = lock = None
511 510 try:
512 511 # bundle2 push may receive a reply bundle touching bookmarks or other
513 512 # things requiring the wlock. Take it now to ensure proper ordering.
514 513 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
515 514 if (not _forcebundle1(pushop)) and maypushback:
516 515 wlock = pushop.repo.wlock()
517 516 lock = pushop.repo.lock()
518 517 pushop.trmanager = transactionmanager(pushop.repo,
519 518 'push-response',
520 519 pushop.remote.url())
521 520 except error.LockUnavailable as err:
522 521 # source repo cannot be locked.
523 522 # We do not abort the push, but just disable the local phase
524 523 # synchronisation.
525 524 msg = 'cannot lock source repository: %s\n' % err
526 525 pushop.ui.debug(msg)
527 526
528 527 with wlock or util.nullcontextmanager(), \
529 528 lock or util.nullcontextmanager(), \
530 529 pushop.trmanager or util.nullcontextmanager():
531 530 pushop.repo.checkpush(pushop)
532 531 _pushdiscovery(pushop)
533 532 if not _forcebundle1(pushop):
534 533 _pushbundle2(pushop)
535 534 _pushchangeset(pushop)
536 535 _pushsyncphase(pushop)
537 536 _pushobsolete(pushop)
538 537 _pushbookmark(pushop)
539 538
540 539 if repo.ui.configbool('experimental', 'remotenames'):
541 540 logexchange.pullremotenames(repo, remote)
542 541
543 542 return pushop
544 543
545 544 # list of steps to perform discovery before push
546 545 pushdiscoveryorder = []
547 546
548 547 # Mapping between step name and function
549 548 #
550 549 # This exists to help extensions wrap steps if necessary
551 550 pushdiscoverymapping = {}
552 551
553 552 def pushdiscovery(stepname):
554 553 """decorator for function performing discovery before push
555 554
556 555 The function is added to the step -> function mapping and appended to the
557 556 list of steps. Beware that decorated function will be added in order (this
558 557 may matter).
559 558
560 559 You can only use this decorator for a new step, if you want to wrap a step
561 560 from an extension, change the pushdiscovery dictionary directly."""
562 561 def dec(func):
563 562 assert stepname not in pushdiscoverymapping
564 563 pushdiscoverymapping[stepname] = func
565 564 pushdiscoveryorder.append(stepname)
566 565 return func
567 566 return dec
568 567
569 568 def _pushdiscovery(pushop):
570 569 """Run all discovery steps"""
571 570 for stepname in pushdiscoveryorder:
572 571 step = pushdiscoverymapping[stepname]
573 572 step(pushop)
574 573
575 574 @pushdiscovery('changeset')
576 575 def _pushdiscoverychangeset(pushop):
577 576 """discover the changeset that need to be pushed"""
578 577 fci = discovery.findcommonincoming
579 578 if pushop.revs:
580 579 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
581 580 ancestorsof=pushop.revs)
582 581 else:
583 582 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
584 583 common, inc, remoteheads = commoninc
585 584 fco = discovery.findcommonoutgoing
586 585 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
587 586 commoninc=commoninc, force=pushop.force)
588 587 pushop.outgoing = outgoing
589 588 pushop.remoteheads = remoteheads
590 589 pushop.incoming = inc
591 590
592 591 @pushdiscovery('phase')
593 592 def _pushdiscoveryphase(pushop):
594 593 """discover the phase that needs to be pushed
595 594
596 595 (computed for both success and failure case for changesets push)"""
597 596 outgoing = pushop.outgoing
598 597 unfi = pushop.repo.unfiltered()
599 598 remotephases = listkeys(pushop.remote, 'phases')
600 599
601 600 if (pushop.ui.configbool('ui', '_usedassubrepo')
602 601 and remotephases # server supports phases
603 602 and not pushop.outgoing.missing # no changesets to be pushed
604 603 and remotephases.get('publishing', False)):
605 604 # When:
606 605 # - this is a subrepo push
607 606 # - and remote support phase
608 607 # - and no changeset are to be pushed
609 608 # - and remote is publishing
610 609 # We may be in issue 3781 case!
611 610 # We drop the possible phase synchronisation done by
612 611 # courtesy to publish changesets possibly locally draft
613 612 # on the remote.
614 613 pushop.outdatedphases = []
615 614 pushop.fallbackoutdatedphases = []
616 615 return
617 616
618 617 pushop.remotephases = phases.remotephasessummary(pushop.repo,
619 618 pushop.fallbackheads,
620 619 remotephases)
621 620 droots = pushop.remotephases.draftroots
622 621
623 622 extracond = ''
624 623 if not pushop.remotephases.publishing:
625 624 extracond = ' and public()'
626 625 revset = 'heads((%%ln::%%ln) %s)' % extracond
627 626 # Get the list of all revs draft on remote by public here.
628 627 # XXX Beware that revset break if droots is not strictly
629 628 # XXX root we may want to ensure it is but it is costly
630 629 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
631 630 if not outgoing.missing:
632 631 future = fallback
633 632 else:
634 633 # adds changeset we are going to push as draft
635 634 #
636 635 # should not be necessary for publishing server, but because of an
637 636 # issue fixed in xxxxx we have to do it anyway.
638 637 fdroots = list(unfi.set('roots(%ln + %ln::)',
639 638 outgoing.missing, droots))
640 639 fdroots = [f.node() for f in fdroots]
641 640 future = list(unfi.set(revset, fdroots, pushop.futureheads))
642 641 pushop.outdatedphases = future
643 642 pushop.fallbackoutdatedphases = fallback
644 643
645 644 @pushdiscovery('obsmarker')
646 645 def _pushdiscoveryobsmarkers(pushop):
647 646 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
648 647 return
649 648
650 649 if not pushop.repo.obsstore:
651 650 return
652 651
653 652 if 'obsolete' not in listkeys(pushop.remote, 'namespaces'):
654 653 return
655 654
656 655 repo = pushop.repo
657 656 # very naive computation, that can be quite expensive on big repo.
658 657 # However: evolution is currently slow on them anyway.
659 658 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
660 659 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
661 660
662 661 @pushdiscovery('bookmarks')
663 662 def _pushdiscoverybookmarks(pushop):
664 663 ui = pushop.ui
665 664 repo = pushop.repo.unfiltered()
666 665 remote = pushop.remote
667 666 ui.debug("checking for updated bookmarks\n")
668 667 ancestors = ()
669 668 if pushop.revs:
670 669 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
671 670 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
672 671
673 672 remotebookmark = listkeys(remote, 'bookmarks')
674 673
675 674 explicit = set([repo._bookmarks.expandname(bookmark)
676 675 for bookmark in pushop.bookmarks])
677 676
678 677 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
679 678 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
680 679
681 680 def safehex(x):
682 681 if x is None:
683 682 return x
684 683 return hex(x)
685 684
686 685 def hexifycompbookmarks(bookmarks):
687 686 return [(b, safehex(scid), safehex(dcid))
688 687 for (b, scid, dcid) in bookmarks]
689 688
690 689 comp = [hexifycompbookmarks(marks) for marks in comp]
691 690 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
692 691
693 692 def _processcompared(pushop, pushed, explicit, remotebms, comp):
694 693 """take decision on bookmark to pull from the remote bookmark
695 694
696 695 Exist to help extensions who want to alter this behavior.
697 696 """
698 697 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
699 698
700 699 repo = pushop.repo
701 700
702 701 for b, scid, dcid in advsrc:
703 702 if b in explicit:
704 703 explicit.remove(b)
705 704 if not pushed or repo[scid].rev() in pushed:
706 705 pushop.outbookmarks.append((b, dcid, scid))
707 706 # search added bookmark
708 707 for b, scid, dcid in addsrc:
709 708 if b in explicit:
710 709 explicit.remove(b)
711 710 pushop.outbookmarks.append((b, '', scid))
712 711 # search for overwritten bookmark
713 712 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
714 713 if b in explicit:
715 714 explicit.remove(b)
716 715 pushop.outbookmarks.append((b, dcid, scid))
717 716 # search for bookmark to delete
718 717 for b, scid, dcid in adddst:
719 718 if b in explicit:
720 719 explicit.remove(b)
721 720 # treat as "deleted locally"
722 721 pushop.outbookmarks.append((b, dcid, ''))
723 722 # identical bookmarks shouldn't get reported
724 723 for b, scid, dcid in same:
725 724 if b in explicit:
726 725 explicit.remove(b)
727 726
728 727 if explicit:
729 728 explicit = sorted(explicit)
730 729 # we should probably list all of them
731 730 pushop.ui.warn(_('bookmark %s does not exist on the local '
732 731 'or remote repository!\n') % explicit[0])
733 732 pushop.bkresult = 2
734 733
735 734 pushop.outbookmarks.sort()
736 735
737 736 def _pushcheckoutgoing(pushop):
738 737 outgoing = pushop.outgoing
739 738 unfi = pushop.repo.unfiltered()
740 739 if not outgoing.missing:
741 740 # nothing to push
742 741 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
743 742 return False
744 743 # something to push
745 744 if not pushop.force:
746 745 # if repo.obsstore == False --> no obsolete
747 746 # then, save the iteration
748 747 if unfi.obsstore:
749 748 # this message are here for 80 char limit reason
750 749 mso = _("push includes obsolete changeset: %s!")
751 750 mspd = _("push includes phase-divergent changeset: %s!")
752 751 mscd = _("push includes content-divergent changeset: %s!")
753 752 mst = {"orphan": _("push includes orphan changeset: %s!"),
754 753 "phase-divergent": mspd,
755 754 "content-divergent": mscd}
756 755 # If we are to push if there is at least one
757 756 # obsolete or unstable changeset in missing, at
758 757 # least one of the missinghead will be obsolete or
759 758 # unstable. So checking heads only is ok
760 759 for node in outgoing.missingheads:
761 760 ctx = unfi[node]
762 761 if ctx.obsolete():
763 762 raise error.Abort(mso % ctx)
764 763 elif ctx.isunstable():
765 764 # TODO print more than one instability in the abort
766 765 # message
767 766 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
768 767
769 768 discovery.checkheads(pushop)
770 769 return True
771 770
772 771 # List of names of steps to perform for an outgoing bundle2, order matters.
773 772 b2partsgenorder = []
774 773
775 774 # Mapping between step name and function
776 775 #
777 776 # This exists to help extensions wrap steps if necessary
778 777 b2partsgenmapping = {}
779 778
780 779 def b2partsgenerator(stepname, idx=None):
781 780 """decorator for function generating bundle2 part
782 781
783 782 The function is added to the step -> function mapping and appended to the
784 783 list of steps. Beware that decorated functions will be added in order
785 784 (this may matter).
786 785
787 786 You can only use this decorator for new steps, if you want to wrap a step
788 787 from an extension, attack the b2partsgenmapping dictionary directly."""
789 788 def dec(func):
790 789 assert stepname not in b2partsgenmapping
791 790 b2partsgenmapping[stepname] = func
792 791 if idx is None:
793 792 b2partsgenorder.append(stepname)
794 793 else:
795 794 b2partsgenorder.insert(idx, stepname)
796 795 return func
797 796 return dec
798 797
799 798 def _pushb2ctxcheckheads(pushop, bundler):
800 799 """Generate race condition checking parts
801 800
802 801 Exists as an independent function to aid extensions
803 802 """
804 803 # * 'force' do not check for push race,
805 804 # * if we don't push anything, there are nothing to check.
806 805 if not pushop.force and pushop.outgoing.missingheads:
807 806 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
808 807 emptyremote = pushop.pushbranchmap is None
809 808 if not allowunrelated or emptyremote:
810 809 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
811 810 else:
812 811 affected = set()
813 812 for branch, heads in pushop.pushbranchmap.iteritems():
814 813 remoteheads, newheads, unsyncedheads, discardedheads = heads
815 814 if remoteheads is not None:
816 815 remote = set(remoteheads)
817 816 affected |= set(discardedheads) & remote
818 817 affected |= remote - set(newheads)
819 818 if affected:
820 819 data = iter(sorted(affected))
821 820 bundler.newpart('check:updated-heads', data=data)
822 821
823 822 def _pushing(pushop):
824 823 """return True if we are pushing anything"""
825 824 return bool(pushop.outgoing.missing
826 825 or pushop.outdatedphases
827 826 or pushop.outobsmarkers
828 827 or pushop.outbookmarks)
829 828
830 829 @b2partsgenerator('check-bookmarks')
831 830 def _pushb2checkbookmarks(pushop, bundler):
832 831 """insert bookmark move checking"""
833 832 if not _pushing(pushop) or pushop.force:
834 833 return
835 834 b2caps = bundle2.bundle2caps(pushop.remote)
836 835 hasbookmarkcheck = 'bookmarks' in b2caps
837 836 if not (pushop.outbookmarks and hasbookmarkcheck):
838 837 return
839 838 data = []
840 839 for book, old, new in pushop.outbookmarks:
841 840 old = bin(old)
842 841 data.append((book, old))
843 842 checkdata = bookmod.binaryencode(data)
844 843 bundler.newpart('check:bookmarks', data=checkdata)
845 844
846 845 @b2partsgenerator('check-phases')
847 846 def _pushb2checkphases(pushop, bundler):
848 847 """insert phase move checking"""
849 848 if not _pushing(pushop) or pushop.force:
850 849 return
851 850 b2caps = bundle2.bundle2caps(pushop.remote)
852 851 hasphaseheads = 'heads' in b2caps.get('phases', ())
853 852 if pushop.remotephases is not None and hasphaseheads:
854 853 # check that the remote phase has not changed
855 854 checks = [[] for p in phases.allphases]
856 855 checks[phases.public].extend(pushop.remotephases.publicheads)
857 856 checks[phases.draft].extend(pushop.remotephases.draftroots)
858 857 if any(checks):
859 858 for nodes in checks:
860 859 nodes.sort()
861 860 checkdata = phases.binaryencode(checks)
862 861 bundler.newpart('check:phases', data=checkdata)
863 862
864 863 @b2partsgenerator('changeset')
865 864 def _pushb2ctx(pushop, bundler):
866 865 """handle changegroup push through bundle2
867 866
868 867 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
869 868 """
870 869 if 'changesets' in pushop.stepsdone:
871 870 return
872 871 pushop.stepsdone.add('changesets')
873 872 # Send known heads to the server for race detection.
874 873 if not _pushcheckoutgoing(pushop):
875 874 return
876 875 pushop.repo.prepushoutgoinghooks(pushop)
877 876
878 877 _pushb2ctxcheckheads(pushop, bundler)
879 878
880 879 b2caps = bundle2.bundle2caps(pushop.remote)
881 880 version = '01'
882 881 cgversions = b2caps.get('changegroup')
883 882 if cgversions: # 3.1 and 3.2 ship with an empty value
884 883 cgversions = [v for v in cgversions
885 884 if v in changegroup.supportedoutgoingversions(
886 885 pushop.repo)]
887 886 if not cgversions:
888 887 raise ValueError(_('no common changegroup version'))
889 888 version = max(cgversions)
890 889 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
891 890 'push')
892 891 cgpart = bundler.newpart('changegroup', data=cgstream)
893 892 if cgversions:
894 893 cgpart.addparam('version', version)
895 894 if 'treemanifest' in pushop.repo.requirements:
896 895 cgpart.addparam('treemanifest', '1')
897 896 def handlereply(op):
898 897 """extract addchangegroup returns from server reply"""
899 898 cgreplies = op.records.getreplies(cgpart.id)
900 899 assert len(cgreplies['changegroup']) == 1
901 900 pushop.cgresult = cgreplies['changegroup'][0]['return']
902 901 return handlereply
903 902
904 903 @b2partsgenerator('phase')
905 904 def _pushb2phases(pushop, bundler):
906 905 """handle phase push through bundle2"""
907 906 if 'phases' in pushop.stepsdone:
908 907 return
909 908 b2caps = bundle2.bundle2caps(pushop.remote)
910 909 ui = pushop.repo.ui
911 910
912 911 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
913 912 haspushkey = 'pushkey' in b2caps
914 913 hasphaseheads = 'heads' in b2caps.get('phases', ())
915 914
916 915 if hasphaseheads and not legacyphase:
917 916 return _pushb2phaseheads(pushop, bundler)
918 917 elif haspushkey:
919 918 return _pushb2phasespushkey(pushop, bundler)
920 919
921 920 def _pushb2phaseheads(pushop, bundler):
922 921 """push phase information through a bundle2 - binary part"""
923 922 pushop.stepsdone.add('phases')
924 923 if pushop.outdatedphases:
925 924 updates = [[] for p in phases.allphases]
926 925 updates[0].extend(h.node() for h in pushop.outdatedphases)
927 926 phasedata = phases.binaryencode(updates)
928 927 bundler.newpart('phase-heads', data=phasedata)
929 928
930 929 def _pushb2phasespushkey(pushop, bundler):
931 930 """push phase information through a bundle2 - pushkey part"""
932 931 pushop.stepsdone.add('phases')
933 932 part2node = []
934 933
935 934 def handlefailure(pushop, exc):
936 935 targetid = int(exc.partid)
937 936 for partid, node in part2node:
938 937 if partid == targetid:
939 938 raise error.Abort(_('updating %s to public failed') % node)
940 939
941 940 enc = pushkey.encode
942 941 for newremotehead in pushop.outdatedphases:
943 942 part = bundler.newpart('pushkey')
944 943 part.addparam('namespace', enc('phases'))
945 944 part.addparam('key', enc(newremotehead.hex()))
946 945 part.addparam('old', enc('%d' % phases.draft))
947 946 part.addparam('new', enc('%d' % phases.public))
948 947 part2node.append((part.id, newremotehead))
949 948 pushop.pkfailcb[part.id] = handlefailure
950 949
951 950 def handlereply(op):
952 951 for partid, node in part2node:
953 952 partrep = op.records.getreplies(partid)
954 953 results = partrep['pushkey']
955 954 assert len(results) <= 1
956 955 msg = None
957 956 if not results:
958 957 msg = _('server ignored update of %s to public!\n') % node
959 958 elif not int(results[0]['return']):
960 959 msg = _('updating %s to public failed!\n') % node
961 960 if msg is not None:
962 961 pushop.ui.warn(msg)
963 962 return handlereply
964 963
965 964 @b2partsgenerator('obsmarkers')
966 965 def _pushb2obsmarkers(pushop, bundler):
967 966 if 'obsmarkers' in pushop.stepsdone:
968 967 return
969 968 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
970 969 if obsolete.commonversion(remoteversions) is None:
971 970 return
972 971 pushop.stepsdone.add('obsmarkers')
973 972 if pushop.outobsmarkers:
974 973 markers = sorted(pushop.outobsmarkers)
975 974 bundle2.buildobsmarkerspart(bundler, markers)
976 975
977 976 @b2partsgenerator('bookmarks')
978 977 def _pushb2bookmarks(pushop, bundler):
979 978 """handle bookmark push through bundle2"""
980 979 if 'bookmarks' in pushop.stepsdone:
981 980 return
982 981 b2caps = bundle2.bundle2caps(pushop.remote)
983 982
984 983 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
985 984 legacybooks = 'bookmarks' in legacy
986 985
987 986 if not legacybooks and 'bookmarks' in b2caps:
988 987 return _pushb2bookmarkspart(pushop, bundler)
989 988 elif 'pushkey' in b2caps:
990 989 return _pushb2bookmarkspushkey(pushop, bundler)
991 990
992 991 def _bmaction(old, new):
993 992 """small utility for bookmark pushing"""
994 993 if not old:
995 994 return 'export'
996 995 elif not new:
997 996 return 'delete'
998 997 return 'update'
999 998
1000 999 def _pushb2bookmarkspart(pushop, bundler):
1001 1000 pushop.stepsdone.add('bookmarks')
1002 1001 if not pushop.outbookmarks:
1003 1002 return
1004 1003
1005 1004 allactions = []
1006 1005 data = []
1007 1006 for book, old, new in pushop.outbookmarks:
1008 1007 new = bin(new)
1009 1008 data.append((book, new))
1010 1009 allactions.append((book, _bmaction(old, new)))
1011 1010 checkdata = bookmod.binaryencode(data)
1012 1011 bundler.newpart('bookmarks', data=checkdata)
1013 1012
1014 1013 def handlereply(op):
1015 1014 ui = pushop.ui
1016 1015 # if success
1017 1016 for book, action in allactions:
1018 1017 ui.status(bookmsgmap[action][0] % book)
1019 1018
1020 1019 return handlereply
1021 1020
1022 1021 def _pushb2bookmarkspushkey(pushop, bundler):
1023 1022 pushop.stepsdone.add('bookmarks')
1024 1023 part2book = []
1025 1024 enc = pushkey.encode
1026 1025
1027 1026 def handlefailure(pushop, exc):
1028 1027 targetid = int(exc.partid)
1029 1028 for partid, book, action in part2book:
1030 1029 if partid == targetid:
1031 1030 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1032 1031 # we should not be called for part we did not generated
1033 1032 assert False
1034 1033
1035 1034 for book, old, new in pushop.outbookmarks:
1036 1035 part = bundler.newpart('pushkey')
1037 1036 part.addparam('namespace', enc('bookmarks'))
1038 1037 part.addparam('key', enc(book))
1039 1038 part.addparam('old', enc(old))
1040 1039 part.addparam('new', enc(new))
1041 1040 action = 'update'
1042 1041 if not old:
1043 1042 action = 'export'
1044 1043 elif not new:
1045 1044 action = 'delete'
1046 1045 part2book.append((part.id, book, action))
1047 1046 pushop.pkfailcb[part.id] = handlefailure
1048 1047
1049 1048 def handlereply(op):
1050 1049 ui = pushop.ui
1051 1050 for partid, book, action in part2book:
1052 1051 partrep = op.records.getreplies(partid)
1053 1052 results = partrep['pushkey']
1054 1053 assert len(results) <= 1
1055 1054 if not results:
1056 1055 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
1057 1056 else:
1058 1057 ret = int(results[0]['return'])
1059 1058 if ret:
1060 1059 ui.status(bookmsgmap[action][0] % book)
1061 1060 else:
1062 1061 ui.warn(bookmsgmap[action][1] % book)
1063 1062 if pushop.bkresult is not None:
1064 1063 pushop.bkresult = 1
1065 1064 return handlereply
1066 1065
1067 1066 @b2partsgenerator('pushvars', idx=0)
1068 1067 def _getbundlesendvars(pushop, bundler):
1069 1068 '''send shellvars via bundle2'''
1070 1069 pushvars = pushop.pushvars
1071 1070 if pushvars:
1072 1071 shellvars = {}
1073 1072 for raw in pushvars:
1074 1073 if '=' not in raw:
1075 1074 msg = ("unable to parse variable '%s', should follow "
1076 1075 "'KEY=VALUE' or 'KEY=' format")
1077 1076 raise error.Abort(msg % raw)
1078 1077 k, v = raw.split('=', 1)
1079 1078 shellvars[k] = v
1080 1079
1081 1080 part = bundler.newpart('pushvars')
1082 1081
1083 1082 for key, value in shellvars.iteritems():
1084 1083 part.addparam(key, value, mandatory=False)
1085 1084
1086 1085 def _pushbundle2(pushop):
1087 1086 """push data to the remote using bundle2
1088 1087
1089 1088 The only currently supported type of data is changegroup but this will
1090 1089 evolve in the future."""
1091 1090 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1092 1091 pushback = (pushop.trmanager
1093 1092 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1094 1093
1095 1094 # create reply capability
1096 1095 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1097 1096 allowpushback=pushback,
1098 1097 role='client'))
1099 1098 bundler.newpart('replycaps', data=capsblob)
1100 1099 replyhandlers = []
1101 1100 for partgenname in b2partsgenorder:
1102 1101 partgen = b2partsgenmapping[partgenname]
1103 1102 ret = partgen(pushop, bundler)
1104 1103 if callable(ret):
1105 1104 replyhandlers.append(ret)
1106 1105 # do not push if nothing to push
1107 1106 if bundler.nbparts <= 1:
1108 1107 return
1109 1108 stream = util.chunkbuffer(bundler.getchunks())
1110 1109 try:
1111 1110 try:
1112 1111 with pushop.remote.commandexecutor() as e:
1113 1112 reply = e.callcommand('unbundle', {
1114 1113 'bundle': stream,
1115 1114 'heads': ['force'],
1116 1115 'url': pushop.remote.url(),
1117 1116 }).result()
1118 1117 except error.BundleValueError as exc:
1119 1118 raise error.Abort(_('missing support for %s') % exc)
1120 1119 try:
1121 1120 trgetter = None
1122 1121 if pushback:
1123 1122 trgetter = pushop.trmanager.transaction
1124 1123 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1125 1124 except error.BundleValueError as exc:
1126 1125 raise error.Abort(_('missing support for %s') % exc)
1127 1126 except bundle2.AbortFromPart as exc:
1128 1127 pushop.ui.status(_('remote: %s\n') % exc)
1129 1128 if exc.hint is not None:
1130 1129 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1131 1130 raise error.Abort(_('push failed on remote'))
1132 1131 except error.PushkeyFailed as exc:
1133 1132 partid = int(exc.partid)
1134 1133 if partid not in pushop.pkfailcb:
1135 1134 raise
1136 1135 pushop.pkfailcb[partid](pushop, exc)
1137 1136 for rephand in replyhandlers:
1138 1137 rephand(op)
1139 1138
1140 1139 def _pushchangeset(pushop):
1141 1140 """Make the actual push of changeset bundle to remote repo"""
1142 1141 if 'changesets' in pushop.stepsdone:
1143 1142 return
1144 1143 pushop.stepsdone.add('changesets')
1145 1144 if not _pushcheckoutgoing(pushop):
1146 1145 return
1147 1146
1148 1147 # Should have verified this in push().
1149 1148 assert pushop.remote.capable('unbundle')
1150 1149
1151 1150 pushop.repo.prepushoutgoinghooks(pushop)
1152 1151 outgoing = pushop.outgoing
1153 1152 # TODO: get bundlecaps from remote
1154 1153 bundlecaps = None
1155 1154 # create a changegroup from local
1156 1155 if pushop.revs is None and not (outgoing.excluded
1157 1156 or pushop.repo.changelog.filteredrevs):
1158 1157 # push everything,
1159 1158 # use the fast path, no race possible on push
1160 1159 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1161 1160 fastpath=True, bundlecaps=bundlecaps)
1162 1161 else:
1163 1162 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1164 1163 'push', bundlecaps=bundlecaps)
1165 1164
1166 1165 # apply changegroup to remote
1167 1166 # local repo finds heads on server, finds out what
1168 1167 # revs it must push. once revs transferred, if server
1169 1168 # finds it has different heads (someone else won
1170 1169 # commit/push race), server aborts.
1171 1170 if pushop.force:
1172 1171 remoteheads = ['force']
1173 1172 else:
1174 1173 remoteheads = pushop.remoteheads
1175 1174 # ssh: return remote's addchangegroup()
1176 1175 # http: return remote's addchangegroup() or 0 for error
1177 1176 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1178 1177 pushop.repo.url())
1179 1178
1180 1179 def _pushsyncphase(pushop):
1181 1180 """synchronise phase information locally and remotely"""
1182 1181 cheads = pushop.commonheads
1183 1182 # even when we don't push, exchanging phase data is useful
1184 1183 remotephases = listkeys(pushop.remote, 'phases')
1185 1184 if (pushop.ui.configbool('ui', '_usedassubrepo')
1186 1185 and remotephases # server supports phases
1187 1186 and pushop.cgresult is None # nothing was pushed
1188 1187 and remotephases.get('publishing', False)):
1189 1188 # When:
1190 1189 # - this is a subrepo push
1191 1190 # - and remote support phase
1192 1191 # - and no changeset was pushed
1193 1192 # - and remote is publishing
1194 1193 # We may be in issue 3871 case!
1195 1194 # We drop the possible phase synchronisation done by
1196 1195 # courtesy to publish changesets possibly locally draft
1197 1196 # on the remote.
1198 1197 remotephases = {'publishing': 'True'}
1199 1198 if not remotephases: # old server or public only reply from non-publishing
1200 1199 _localphasemove(pushop, cheads)
1201 1200 # don't push any phase data as there is nothing to push
1202 1201 else:
1203 1202 ana = phases.analyzeremotephases(pushop.repo, cheads,
1204 1203 remotephases)
1205 1204 pheads, droots = ana
1206 1205 ### Apply remote phase on local
1207 1206 if remotephases.get('publishing', False):
1208 1207 _localphasemove(pushop, cheads)
1209 1208 else: # publish = False
1210 1209 _localphasemove(pushop, pheads)
1211 1210 _localphasemove(pushop, cheads, phases.draft)
1212 1211 ### Apply local phase on remote
1213 1212
1214 1213 if pushop.cgresult:
1215 1214 if 'phases' in pushop.stepsdone:
1216 1215 # phases already pushed though bundle2
1217 1216 return
1218 1217 outdated = pushop.outdatedphases
1219 1218 else:
1220 1219 outdated = pushop.fallbackoutdatedphases
1221 1220
1222 1221 pushop.stepsdone.add('phases')
1223 1222
1224 1223 # filter heads already turned public by the push
1225 1224 outdated = [c for c in outdated if c.node() not in pheads]
1226 1225 # fallback to independent pushkey command
1227 1226 for newremotehead in outdated:
1228 1227 with pushop.remote.commandexecutor() as e:
1229 1228 r = e.callcommand('pushkey', {
1230 1229 'namespace': 'phases',
1231 1230 'key': newremotehead.hex(),
1232 1231 'old': '%d' % phases.draft,
1233 1232 'new': '%d' % phases.public
1234 1233 }).result()
1235 1234
1236 1235 if not r:
1237 1236 pushop.ui.warn(_('updating %s to public failed!\n')
1238 1237 % newremotehead)
1239 1238
1240 1239 def _localphasemove(pushop, nodes, phase=phases.public):
1241 1240 """move <nodes> to <phase> in the local source repo"""
1242 1241 if pushop.trmanager:
1243 1242 phases.advanceboundary(pushop.repo,
1244 1243 pushop.trmanager.transaction(),
1245 1244 phase,
1246 1245 nodes)
1247 1246 else:
1248 1247 # repo is not locked, do not change any phases!
1249 1248 # Informs the user that phases should have been moved when
1250 1249 # applicable.
1251 1250 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1252 1251 phasestr = phases.phasenames[phase]
1253 1252 if actualmoves:
1254 1253 pushop.ui.status(_('cannot lock source repo, skipping '
1255 1254 'local %s phase update\n') % phasestr)
1256 1255
1257 1256 def _pushobsolete(pushop):
1258 1257 """utility function to push obsolete markers to a remote"""
1259 1258 if 'obsmarkers' in pushop.stepsdone:
1260 1259 return
1261 1260 repo = pushop.repo
1262 1261 remote = pushop.remote
1263 1262 pushop.stepsdone.add('obsmarkers')
1264 1263 if pushop.outobsmarkers:
1265 1264 pushop.ui.debug('try to push obsolete markers to remote\n')
1266 1265 rslts = []
1267 1266 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1268 1267 for key in sorted(remotedata, reverse=True):
1269 1268 # reverse sort to ensure we end with dump0
1270 1269 data = remotedata[key]
1271 1270 rslts.append(remote.pushkey('obsolete', key, '', data))
1272 1271 if [r for r in rslts if not r]:
1273 1272 msg = _('failed to push some obsolete markers!\n')
1274 1273 repo.ui.warn(msg)
1275 1274
1276 1275 def _pushbookmark(pushop):
1277 1276 """Update bookmark position on remote"""
1278 1277 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1279 1278 return
1280 1279 pushop.stepsdone.add('bookmarks')
1281 1280 ui = pushop.ui
1282 1281 remote = pushop.remote
1283 1282
1284 1283 for b, old, new in pushop.outbookmarks:
1285 1284 action = 'update'
1286 1285 if not old:
1287 1286 action = 'export'
1288 1287 elif not new:
1289 1288 action = 'delete'
1290 1289
1291 1290 with remote.commandexecutor() as e:
1292 1291 r = e.callcommand('pushkey', {
1293 1292 'namespace': 'bookmarks',
1294 1293 'key': b,
1295 1294 'old': old,
1296 1295 'new': new,
1297 1296 }).result()
1298 1297
1299 1298 if r:
1300 1299 ui.status(bookmsgmap[action][0] % b)
1301 1300 else:
1302 1301 ui.warn(bookmsgmap[action][1] % b)
1303 1302 # discovery can have set the value form invalid entry
1304 1303 if pushop.bkresult is not None:
1305 1304 pushop.bkresult = 1
1306 1305
1307 1306 class pulloperation(object):
1308 1307 """A object that represent a single pull operation
1309 1308
1310 1309 It purpose is to carry pull related state and very common operation.
1311 1310
1312 1311 A new should be created at the beginning of each pull and discarded
1313 1312 afterward.
1314 1313 """
1315 1314
1316 1315 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1317 1316 remotebookmarks=None, streamclonerequested=None):
1318 1317 # repo we pull into
1319 1318 self.repo = repo
1320 1319 # repo we pull from
1321 1320 self.remote = remote
1322 1321 # revision we try to pull (None is "all")
1323 1322 self.heads = heads
1324 1323 # bookmark pulled explicitly
1325 1324 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1326 1325 for bookmark in bookmarks]
1327 1326 # do we force pull?
1328 1327 self.force = force
1329 1328 # whether a streaming clone was requested
1330 1329 self.streamclonerequested = streamclonerequested
1331 1330 # transaction manager
1332 1331 self.trmanager = None
1333 1332 # set of common changeset between local and remote before pull
1334 1333 self.common = None
1335 1334 # set of pulled head
1336 1335 self.rheads = None
1337 1336 # list of missing changeset to fetch remotely
1338 1337 self.fetch = None
1339 1338 # remote bookmarks data
1340 1339 self.remotebookmarks = remotebookmarks
1341 1340 # result of changegroup pulling (used as return code by pull)
1342 1341 self.cgresult = None
1343 1342 # list of step already done
1344 1343 self.stepsdone = set()
1345 1344 # Whether we attempted a clone from pre-generated bundles.
1346 1345 self.clonebundleattempted = False
1347 1346
1348 1347 @util.propertycache
1349 1348 def pulledsubset(self):
1350 1349 """heads of the set of changeset target by the pull"""
1351 1350 # compute target subset
1352 1351 if self.heads is None:
1353 1352 # We pulled every thing possible
1354 1353 # sync on everything common
1355 1354 c = set(self.common)
1356 1355 ret = list(self.common)
1357 1356 for n in self.rheads:
1358 1357 if n not in c:
1359 1358 ret.append(n)
1360 1359 return ret
1361 1360 else:
1362 1361 # We pulled a specific subset
1363 1362 # sync on this subset
1364 1363 return self.heads
1365 1364
1366 1365 @util.propertycache
1367 1366 def canusebundle2(self):
1368 1367 return not _forcebundle1(self)
1369 1368
1370 1369 @util.propertycache
1371 1370 def remotebundle2caps(self):
1372 1371 return bundle2.bundle2caps(self.remote)
1373 1372
1374 1373 def gettransaction(self):
1375 1374 # deprecated; talk to trmanager directly
1376 1375 return self.trmanager.transaction()
1377 1376
1378 1377 class transactionmanager(util.transactional):
1379 1378 """An object to manage the life cycle of a transaction
1380 1379
1381 1380 It creates the transaction on demand and calls the appropriate hooks when
1382 1381 closing the transaction."""
1383 1382 def __init__(self, repo, source, url):
1384 1383 self.repo = repo
1385 1384 self.source = source
1386 1385 self.url = url
1387 1386 self._tr = None
1388 1387
1389 1388 def transaction(self):
1390 1389 """Return an open transaction object, constructing if necessary"""
1391 1390 if not self._tr:
1392 1391 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1393 1392 self._tr = self.repo.transaction(trname)
1394 1393 self._tr.hookargs['source'] = self.source
1395 1394 self._tr.hookargs['url'] = self.url
1396 1395 return self._tr
1397 1396
1398 1397 def close(self):
1399 1398 """close transaction if created"""
1400 1399 if self._tr is not None:
1401 1400 self._tr.close()
1402 1401
1403 1402 def release(self):
1404 1403 """release transaction if created"""
1405 1404 if self._tr is not None:
1406 1405 self._tr.release()
1407 1406
1408 1407 def listkeys(remote, namespace):
1409 1408 with remote.commandexecutor() as e:
1410 1409 return e.callcommand('listkeys', {'namespace': namespace}).result()
1411 1410
1412 1411 def _fullpullbundle2(repo, pullop):
1413 1412 # The server may send a partial reply, i.e. when inlining
1414 1413 # pre-computed bundles. In that case, update the common
1415 1414 # set based on the results and pull another bundle.
1416 1415 #
1417 1416 # There are two indicators that the process is finished:
1418 1417 # - no changeset has been added, or
1419 1418 # - all remote heads are known locally.
1420 1419 # The head check must use the unfiltered view as obsoletion
1421 1420 # markers can hide heads.
1422 1421 unfi = repo.unfiltered()
1423 1422 unficl = unfi.changelog
1424 1423 def headsofdiff(h1, h2):
1425 1424 """Returns heads(h1 % h2)"""
1426 1425 res = unfi.set('heads(%ln %% %ln)', h1, h2)
1427 1426 return set(ctx.node() for ctx in res)
1428 1427 def headsofunion(h1, h2):
1429 1428 """Returns heads((h1 + h2) - null)"""
1430 1429 res = unfi.set('heads((%ln + %ln - null))', h1, h2)
1431 1430 return set(ctx.node() for ctx in res)
1432 1431 while True:
1433 1432 old_heads = unficl.heads()
1434 1433 clstart = len(unficl)
1435 1434 _pullbundle2(pullop)
1436 1435 if repository.NARROW_REQUIREMENT in repo.requirements:
1437 1436 # XXX narrow clones filter the heads on the server side during
1438 1437 # XXX getbundle and result in partial replies as well.
1439 1438 # XXX Disable pull bundles in this case as band aid to avoid
1440 1439 # XXX extra round trips.
1441 1440 break
1442 1441 if clstart == len(unficl):
1443 1442 break
1444 1443 if all(unficl.hasnode(n) for n in pullop.rheads):
1445 1444 break
1446 1445 new_heads = headsofdiff(unficl.heads(), old_heads)
1447 1446 pullop.common = headsofunion(new_heads, pullop.common)
1448 1447 pullop.rheads = set(pullop.rheads) - pullop.common
1449 1448
1450 1449 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1451 1450 streamclonerequested=None):
1452 1451 """Fetch repository data from a remote.
1453 1452
1454 1453 This is the main function used to retrieve data from a remote repository.
1455 1454
1456 1455 ``repo`` is the local repository to clone into.
1457 1456 ``remote`` is a peer instance.
1458 1457 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1459 1458 default) means to pull everything from the remote.
1460 1459 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1461 1460 default, all remote bookmarks are pulled.
1462 1461 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1463 1462 initialization.
1464 1463 ``streamclonerequested`` is a boolean indicating whether a "streaming
1465 1464 clone" is requested. A "streaming clone" is essentially a raw file copy
1466 1465 of revlogs from the server. This only works when the local repository is
1467 1466 empty. The default value of ``None`` means to respect the server
1468 1467 configuration for preferring stream clones.
1469 1468
1470 1469 Returns the ``pulloperation`` created for this pull.
1471 1470 """
1472 1471 if opargs is None:
1473 1472 opargs = {}
1474 1473 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1475 1474 streamclonerequested=streamclonerequested,
1476 1475 **pycompat.strkwargs(opargs))
1477 1476
1478 1477 peerlocal = pullop.remote.local()
1479 1478 if peerlocal:
1480 1479 missing = set(peerlocal.requirements) - pullop.repo.supported
1481 1480 if missing:
1482 1481 msg = _("required features are not"
1483 1482 " supported in the destination:"
1484 1483 " %s") % (', '.join(sorted(missing)))
1485 1484 raise error.Abort(msg)
1486 1485
1487 1486 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1488 1487 with repo.wlock(), repo.lock(), pullop.trmanager:
1489 1488 # This should ideally be in _pullbundle2(). However, it needs to run
1490 1489 # before discovery to avoid extra work.
1491 1490 _maybeapplyclonebundle(pullop)
1492 1491 streamclone.maybeperformlegacystreamclone(pullop)
1493 1492 _pulldiscovery(pullop)
1494 1493 if pullop.canusebundle2:
1495 1494 _fullpullbundle2(repo, pullop)
1496 1495 _pullchangeset(pullop)
1497 1496 _pullphase(pullop)
1498 1497 _pullbookmarks(pullop)
1499 1498 _pullobsolete(pullop)
1500 1499
1501 1500 # storing remotenames
1502 1501 if repo.ui.configbool('experimental', 'remotenames'):
1503 1502 logexchange.pullremotenames(repo, remote)
1504 1503
1505 1504 return pullop
1506 1505
1507 1506 # list of steps to perform discovery before pull
1508 1507 pulldiscoveryorder = []
1509 1508
1510 1509 # Mapping between step name and function
1511 1510 #
1512 1511 # This exists to help extensions wrap steps if necessary
1513 1512 pulldiscoverymapping = {}
1514 1513
1515 1514 def pulldiscovery(stepname):
1516 1515 """decorator for function performing discovery before pull
1517 1516
1518 1517 The function is added to the step -> function mapping and appended to the
1519 1518 list of steps. Beware that decorated function will be added in order (this
1520 1519 may matter).
1521 1520
1522 1521 You can only use this decorator for a new step, if you want to wrap a step
1523 1522 from an extension, change the pulldiscovery dictionary directly."""
1524 1523 def dec(func):
1525 1524 assert stepname not in pulldiscoverymapping
1526 1525 pulldiscoverymapping[stepname] = func
1527 1526 pulldiscoveryorder.append(stepname)
1528 1527 return func
1529 1528 return dec
1530 1529
1531 1530 def _pulldiscovery(pullop):
1532 1531 """Run all discovery steps"""
1533 1532 for stepname in pulldiscoveryorder:
1534 1533 step = pulldiscoverymapping[stepname]
1535 1534 step(pullop)
1536 1535
1537 1536 @pulldiscovery('b1:bookmarks')
1538 1537 def _pullbookmarkbundle1(pullop):
1539 1538 """fetch bookmark data in bundle1 case
1540 1539
1541 1540 If not using bundle2, we have to fetch bookmarks before changeset
1542 1541 discovery to reduce the chance and impact of race conditions."""
1543 1542 if pullop.remotebookmarks is not None:
1544 1543 return
1545 1544 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1546 1545 # all known bundle2 servers now support listkeys, but lets be nice with
1547 1546 # new implementation.
1548 1547 return
1549 1548 books = listkeys(pullop.remote, 'bookmarks')
1550 1549 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1551 1550
1552 1551
1553 1552 @pulldiscovery('changegroup')
1554 1553 def _pulldiscoverychangegroup(pullop):
1555 1554 """discovery phase for the pull
1556 1555
1557 1556 Current handle changeset discovery only, will change handle all discovery
1558 1557 at some point."""
1559 1558 tmp = discovery.findcommonincoming(pullop.repo,
1560 1559 pullop.remote,
1561 1560 heads=pullop.heads,
1562 1561 force=pullop.force)
1563 1562 common, fetch, rheads = tmp
1564 1563 nm = pullop.repo.unfiltered().changelog.nodemap
1565 1564 if fetch and rheads:
1566 1565 # If a remote heads is filtered locally, put in back in common.
1567 1566 #
1568 1567 # This is a hackish solution to catch most of "common but locally
1569 1568 # hidden situation". We do not performs discovery on unfiltered
1570 1569 # repository because it end up doing a pathological amount of round
1571 1570 # trip for w huge amount of changeset we do not care about.
1572 1571 #
1573 1572 # If a set of such "common but filtered" changeset exist on the server
1574 1573 # but are not including a remote heads, we'll not be able to detect it,
1575 1574 scommon = set(common)
1576 1575 for n in rheads:
1577 1576 if n in nm:
1578 1577 if n not in scommon:
1579 1578 common.append(n)
1580 1579 if set(rheads).issubset(set(common)):
1581 1580 fetch = []
1582 1581 pullop.common = common
1583 1582 pullop.fetch = fetch
1584 1583 pullop.rheads = rheads
1585 1584
1586 1585 def _pullbundle2(pullop):
1587 1586 """pull data using bundle2
1588 1587
1589 1588 For now, the only supported data are changegroup."""
1590 1589 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1591 1590
1592 1591 # make ui easier to access
1593 1592 ui = pullop.repo.ui
1594 1593
1595 1594 # At the moment we don't do stream clones over bundle2. If that is
1596 1595 # implemented then here's where the check for that will go.
1597 1596 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1598 1597
1599 1598 # declare pull perimeters
1600 1599 kwargs['common'] = pullop.common
1601 1600 kwargs['heads'] = pullop.heads or pullop.rheads
1602 1601
1603 1602 if streaming:
1604 1603 kwargs['cg'] = False
1605 1604 kwargs['stream'] = True
1606 1605 pullop.stepsdone.add('changegroup')
1607 1606 pullop.stepsdone.add('phases')
1608 1607
1609 1608 else:
1610 1609 # pulling changegroup
1611 1610 pullop.stepsdone.add('changegroup')
1612 1611
1613 1612 kwargs['cg'] = pullop.fetch
1614 1613
1615 1614 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1616 1615 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1617 1616 if (not legacyphase and hasbinaryphase):
1618 1617 kwargs['phases'] = True
1619 1618 pullop.stepsdone.add('phases')
1620 1619
1621 1620 if 'listkeys' in pullop.remotebundle2caps:
1622 1621 if 'phases' not in pullop.stepsdone:
1623 1622 kwargs['listkeys'] = ['phases']
1624 1623
1625 1624 bookmarksrequested = False
1626 1625 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1627 1626 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1628 1627
1629 1628 if pullop.remotebookmarks is not None:
1630 1629 pullop.stepsdone.add('request-bookmarks')
1631 1630
1632 1631 if ('request-bookmarks' not in pullop.stepsdone
1633 1632 and pullop.remotebookmarks is None
1634 1633 and not legacybookmark and hasbinarybook):
1635 1634 kwargs['bookmarks'] = True
1636 1635 bookmarksrequested = True
1637 1636
1638 1637 if 'listkeys' in pullop.remotebundle2caps:
1639 1638 if 'request-bookmarks' not in pullop.stepsdone:
1640 1639 # make sure to always includes bookmark data when migrating
1641 1640 # `hg incoming --bundle` to using this function.
1642 1641 pullop.stepsdone.add('request-bookmarks')
1643 1642 kwargs.setdefault('listkeys', []).append('bookmarks')
1644 1643
1645 1644 # If this is a full pull / clone and the server supports the clone bundles
1646 1645 # feature, tell the server whether we attempted a clone bundle. The
1647 1646 # presence of this flag indicates the client supports clone bundles. This
1648 1647 # will enable the server to treat clients that support clone bundles
1649 1648 # differently from those that don't.
1650 1649 if (pullop.remote.capable('clonebundles')
1651 1650 and pullop.heads is None and list(pullop.common) == [nullid]):
1652 1651 kwargs['cbattempted'] = pullop.clonebundleattempted
1653 1652
1654 1653 if streaming:
1655 1654 pullop.repo.ui.status(_('streaming all changes\n'))
1656 1655 elif not pullop.fetch:
1657 1656 pullop.repo.ui.status(_("no changes found\n"))
1658 1657 pullop.cgresult = 0
1659 1658 else:
1660 1659 if pullop.heads is None and list(pullop.common) == [nullid]:
1661 1660 pullop.repo.ui.status(_("requesting all changes\n"))
1662 1661 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1663 1662 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1664 1663 if obsolete.commonversion(remoteversions) is not None:
1665 1664 kwargs['obsmarkers'] = True
1666 1665 pullop.stepsdone.add('obsmarkers')
1667 1666 _pullbundle2extraprepare(pullop, kwargs)
1668 1667
1669 1668 with pullop.remote.commandexecutor() as e:
1670 1669 args = dict(kwargs)
1671 1670 args['source'] = 'pull'
1672 1671 bundle = e.callcommand('getbundle', args).result()
1673 1672
1674 1673 try:
1675 1674 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
1676 1675 source='pull')
1677 1676 op.modes['bookmarks'] = 'records'
1678 1677 bundle2.processbundle(pullop.repo, bundle, op=op)
1679 1678 except bundle2.AbortFromPart as exc:
1680 1679 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1681 1680 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1682 1681 except error.BundleValueError as exc:
1683 1682 raise error.Abort(_('missing support for %s') % exc)
1684 1683
1685 1684 if pullop.fetch:
1686 1685 pullop.cgresult = bundle2.combinechangegroupresults(op)
1687 1686
1688 1687 # processing phases change
1689 1688 for namespace, value in op.records['listkeys']:
1690 1689 if namespace == 'phases':
1691 1690 _pullapplyphases(pullop, value)
1692 1691
1693 1692 # processing bookmark update
1694 1693 if bookmarksrequested:
1695 1694 books = {}
1696 1695 for record in op.records['bookmarks']:
1697 1696 books[record['bookmark']] = record["node"]
1698 1697 pullop.remotebookmarks = books
1699 1698 else:
1700 1699 for namespace, value in op.records['listkeys']:
1701 1700 if namespace == 'bookmarks':
1702 1701 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1703 1702
1704 1703 # bookmark data were either already there or pulled in the bundle
1705 1704 if pullop.remotebookmarks is not None:
1706 1705 _pullbookmarks(pullop)
1707 1706
1708 1707 def _pullbundle2extraprepare(pullop, kwargs):
1709 1708 """hook function so that extensions can extend the getbundle call"""
1710 1709
1711 1710 def _pullchangeset(pullop):
1712 1711 """pull changeset from unbundle into the local repo"""
1713 1712 # We delay the open of the transaction as late as possible so we
1714 1713 # don't open transaction for nothing or you break future useful
1715 1714 # rollback call
1716 1715 if 'changegroup' in pullop.stepsdone:
1717 1716 return
1718 1717 pullop.stepsdone.add('changegroup')
1719 1718 if not pullop.fetch:
1720 1719 pullop.repo.ui.status(_("no changes found\n"))
1721 1720 pullop.cgresult = 0
1722 1721 return
1723 1722 tr = pullop.gettransaction()
1724 1723 if pullop.heads is None and list(pullop.common) == [nullid]:
1725 1724 pullop.repo.ui.status(_("requesting all changes\n"))
1726 1725 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1727 1726 # issue1320, avoid a race if remote changed after discovery
1728 1727 pullop.heads = pullop.rheads
1729 1728
1730 1729 if pullop.remote.capable('getbundle'):
1731 1730 # TODO: get bundlecaps from remote
1732 1731 cg = pullop.remote.getbundle('pull', common=pullop.common,
1733 1732 heads=pullop.heads or pullop.rheads)
1734 1733 elif pullop.heads is None:
1735 1734 with pullop.remote.commandexecutor() as e:
1736 1735 cg = e.callcommand('changegroup', {
1737 1736 'nodes': pullop.fetch,
1738 1737 'source': 'pull',
1739 1738 }).result()
1740 1739
1741 1740 elif not pullop.remote.capable('changegroupsubset'):
1742 1741 raise error.Abort(_("partial pull cannot be done because "
1743 1742 "other repository doesn't support "
1744 1743 "changegroupsubset."))
1745 1744 else:
1746 1745 with pullop.remote.commandexecutor() as e:
1747 1746 cg = e.callcommand('changegroupsubset', {
1748 1747 'bases': pullop.fetch,
1749 1748 'heads': pullop.heads,
1750 1749 'source': 'pull',
1751 1750 }).result()
1752 1751
1753 1752 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1754 1753 pullop.remote.url())
1755 1754 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1756 1755
1757 1756 def _pullphase(pullop):
1758 1757 # Get remote phases data from remote
1759 1758 if 'phases' in pullop.stepsdone:
1760 1759 return
1761 1760 remotephases = listkeys(pullop.remote, 'phases')
1762 1761 _pullapplyphases(pullop, remotephases)
1763 1762
1764 1763 def _pullapplyphases(pullop, remotephases):
1765 1764 """apply phase movement from observed remote state"""
1766 1765 if 'phases' in pullop.stepsdone:
1767 1766 return
1768 1767 pullop.stepsdone.add('phases')
1769 1768 publishing = bool(remotephases.get('publishing', False))
1770 1769 if remotephases and not publishing:
1771 1770 # remote is new and non-publishing
1772 1771 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1773 1772 pullop.pulledsubset,
1774 1773 remotephases)
1775 1774 dheads = pullop.pulledsubset
1776 1775 else:
1777 1776 # Remote is old or publishing all common changesets
1778 1777 # should be seen as public
1779 1778 pheads = pullop.pulledsubset
1780 1779 dheads = []
1781 1780 unfi = pullop.repo.unfiltered()
1782 1781 phase = unfi._phasecache.phase
1783 1782 rev = unfi.changelog.nodemap.get
1784 1783 public = phases.public
1785 1784 draft = phases.draft
1786 1785
1787 1786 # exclude changesets already public locally and update the others
1788 1787 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1789 1788 if pheads:
1790 1789 tr = pullop.gettransaction()
1791 1790 phases.advanceboundary(pullop.repo, tr, public, pheads)
1792 1791
1793 1792 # exclude changesets already draft locally and update the others
1794 1793 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1795 1794 if dheads:
1796 1795 tr = pullop.gettransaction()
1797 1796 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1798 1797
1799 1798 def _pullbookmarks(pullop):
1800 1799 """process the remote bookmark information to update the local one"""
1801 1800 if 'bookmarks' in pullop.stepsdone:
1802 1801 return
1803 1802 pullop.stepsdone.add('bookmarks')
1804 1803 repo = pullop.repo
1805 1804 remotebookmarks = pullop.remotebookmarks
1806 1805 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1807 1806 pullop.remote.url(),
1808 1807 pullop.gettransaction,
1809 1808 explicit=pullop.explicitbookmarks)
1810 1809
1811 1810 def _pullobsolete(pullop):
1812 1811 """utility function to pull obsolete markers from a remote
1813 1812
1814 1813 The `gettransaction` is function that return the pull transaction, creating
1815 1814 one if necessary. We return the transaction to inform the calling code that
1816 1815 a new transaction have been created (when applicable).
1817 1816
1818 1817 Exists mostly to allow overriding for experimentation purpose"""
1819 1818 if 'obsmarkers' in pullop.stepsdone:
1820 1819 return
1821 1820 pullop.stepsdone.add('obsmarkers')
1822 1821 tr = None
1823 1822 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1824 1823 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1825 1824 remoteobs = listkeys(pullop.remote, 'obsolete')
1826 1825 if 'dump0' in remoteobs:
1827 1826 tr = pullop.gettransaction()
1828 1827 markers = []
1829 1828 for key in sorted(remoteobs, reverse=True):
1830 1829 if key.startswith('dump'):
1831 1830 data = util.b85decode(remoteobs[key])
1832 1831 version, newmarks = obsolete._readmarkers(data)
1833 1832 markers += newmarks
1834 1833 if markers:
1835 1834 pullop.repo.obsstore.add(tr, markers)
1836 1835 pullop.repo.invalidatevolatilesets()
1837 1836 return tr
1838 1837
1839 1838 def applynarrowacl(repo, kwargs):
1840 1839 """Apply narrow fetch access control.
1841 1840
1842 1841 This massages the named arguments for getbundle wire protocol commands
1843 1842 so requested data is filtered through access control rules.
1844 1843 """
1845 1844 ui = repo.ui
1846 1845 # TODO this assumes existence of HTTP and is a layering violation.
1847 1846 username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
1848 1847 user_includes = ui.configlist(
1849 1848 _NARROWACL_SECTION, username + '.includes',
1850 1849 ui.configlist(_NARROWACL_SECTION, 'default.includes'))
1851 1850 user_excludes = ui.configlist(
1852 1851 _NARROWACL_SECTION, username + '.excludes',
1853 1852 ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
1854 1853 if not user_includes:
1855 1854 raise error.Abort(_("{} configuration for user {} is empty")
1856 1855 .format(_NARROWACL_SECTION, username))
1857 1856
1858 1857 user_includes = [
1859 1858 'path:.' if p == '*' else 'path:' + p for p in user_includes]
1860 1859 user_excludes = [
1861 1860 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
1862 1861
1863 1862 req_includes = set(kwargs.get(r'includepats', []))
1864 1863 req_excludes = set(kwargs.get(r'excludepats', []))
1865 1864
1866 1865 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
1867 1866 req_includes, req_excludes, user_includes, user_excludes)
1868 1867
1869 1868 if invalid_includes:
1870 1869 raise error.Abort(
1871 1870 _("The following includes are not accessible for {}: {}")
1872 1871 .format(username, invalid_includes))
1873 1872
1874 1873 new_args = {}
1875 1874 new_args.update(kwargs)
1876 1875 new_args[r'narrow'] = True
1877 1876 new_args[r'includepats'] = req_includes
1878 1877 if req_excludes:
1879 1878 new_args[r'excludepats'] = req_excludes
1880 1879
1881 1880 return new_args
1882 1881
1883 1882 def _computeellipsis(repo, common, heads, known, match, depth=None):
1884 1883 """Compute the shape of a narrowed DAG.
1885 1884
1886 1885 Args:
1887 1886 repo: The repository we're transferring.
1888 1887 common: The roots of the DAG range we're transferring.
1889 1888 May be just [nullid], which means all ancestors of heads.
1890 1889 heads: The heads of the DAG range we're transferring.
1891 1890 match: The narrowmatcher that allows us to identify relevant changes.
1892 1891 depth: If not None, only consider nodes to be full nodes if they are at
1893 1892 most depth changesets away from one of heads.
1894 1893
1895 1894 Returns:
1896 1895 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
1897 1896
1898 1897 visitnodes: The list of nodes (either full or ellipsis) which
1899 1898 need to be sent to the client.
1900 1899 relevant_nodes: The set of changelog nodes which change a file inside
1901 1900 the narrowspec. The client needs these as non-ellipsis nodes.
1902 1901 ellipsisroots: A dict of {rev: parents} that is used in
1903 1902 narrowchangegroup to produce ellipsis nodes with the
1904 1903 correct parents.
1905 1904 """
1906 1905 cl = repo.changelog
1907 1906 mfl = repo.manifestlog
1908 1907
1909 cldag = dagutil.revlogdag(cl)
1910 # dagutil does not like nullid/nullrev
1911 commonrevs = cldag.internalizeall(common - set([nullid])) | set([nullrev])
1912 headsrevs = cldag.internalizeall(heads)
1908 clrev = cl.rev
1909
1910 commonrevs = {clrev(n) for n in common} | {nullrev}
1911 headsrevs = {clrev(n) for n in heads}
1912
1913 1913 if depth:
1914 1914 revdepth = {h: 0 for h in headsrevs}
1915 1915
1916 1916 ellipsisheads = collections.defaultdict(set)
1917 1917 ellipsisroots = collections.defaultdict(set)
1918 1918
1919 1919 def addroot(head, curchange):
1920 1920 """Add a root to an ellipsis head, splitting heads with 3 roots."""
1921 1921 ellipsisroots[head].add(curchange)
1922 1922 # Recursively split ellipsis heads with 3 roots by finding the
1923 1923 # roots' youngest common descendant which is an elided merge commit.
1924 1924 # That descendant takes 2 of the 3 roots as its own, and becomes a
1925 1925 # root of the head.
1926 1926 while len(ellipsisroots[head]) > 2:
1927 1927 child, roots = splithead(head)
1928 1928 splitroots(head, child, roots)
1929 1929 head = child # Recurse in case we just added a 3rd root
1930 1930
1931 1931 def splitroots(head, child, roots):
1932 1932 ellipsisroots[head].difference_update(roots)
1933 1933 ellipsisroots[head].add(child)
1934 1934 ellipsisroots[child].update(roots)
1935 1935 ellipsisroots[child].discard(child)
1936 1936
1937 1937 def splithead(head):
1938 1938 r1, r2, r3 = sorted(ellipsisroots[head])
1939 1939 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
1940 1940 mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
1941 1941 nr1, head, nr2, head)
1942 1942 for j in mid:
1943 1943 if j == nr2:
1944 1944 return nr2, (nr1, nr2)
1945 1945 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
1946 1946 return j, (nr1, nr2)
1947 1947 raise error.Abort(_('Failed to split up ellipsis node! head: %d, '
1948 1948 'roots: %d %d %d') % (head, r1, r2, r3))
1949 1949
1950 1950 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
1951 1951 visit = reversed(missing)
1952 1952 relevant_nodes = set()
1953 1953 visitnodes = [cl.node(m) for m in missing]
1954 1954 required = set(headsrevs) | known
1955 1955 for rev in visit:
1956 1956 clrev = cl.changelogrevision(rev)
1957 ps = cldag.parents(rev)
1957 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
1958 1958 if depth is not None:
1959 1959 curdepth = revdepth[rev]
1960 1960 for p in ps:
1961 1961 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
1962 1962 needed = False
1963 1963 shallow_enough = depth is None or revdepth[rev] <= depth
1964 1964 if shallow_enough:
1965 1965 curmf = mfl[clrev.manifest].read()
1966 1966 if ps:
1967 1967 # We choose to not trust the changed files list in
1968 1968 # changesets because it's not always correct. TODO: could
1969 1969 # we trust it for the non-merge case?
1970 1970 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
1971 1971 needed = bool(curmf.diff(p1mf, match))
1972 1972 if not needed and len(ps) > 1:
1973 1973 # For merge changes, the list of changed files is not
1974 1974 # helpful, since we need to emit the merge if a file
1975 1975 # in the narrow spec has changed on either side of the
1976 1976 # merge. As a result, we do a manifest diff to check.
1977 1977 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
1978 1978 needed = bool(curmf.diff(p2mf, match))
1979 1979 else:
1980 1980 # For a root node, we need to include the node if any
1981 1981 # files in the node match the narrowspec.
1982 1982 needed = any(curmf.walk(match))
1983 1983
1984 1984 if needed:
1985 1985 for head in ellipsisheads[rev]:
1986 1986 addroot(head, rev)
1987 1987 for p in ps:
1988 1988 required.add(p)
1989 1989 relevant_nodes.add(cl.node(rev))
1990 1990 else:
1991 1991 if not ps:
1992 1992 ps = [nullrev]
1993 1993 if rev in required:
1994 1994 for head in ellipsisheads[rev]:
1995 1995 addroot(head, rev)
1996 1996 for p in ps:
1997 1997 ellipsisheads[p].add(rev)
1998 1998 else:
1999 1999 for p in ps:
2000 2000 ellipsisheads[p] |= ellipsisheads[rev]
2001 2001
2002 2002 # add common changesets as roots of their reachable ellipsis heads
2003 2003 for c in commonrevs:
2004 2004 for head in ellipsisheads[c]:
2005 2005 addroot(head, c)
2006 2006 return visitnodes, relevant_nodes, ellipsisroots
2007 2007
2008 2008 def caps20to10(repo, role):
2009 2009 """return a set with appropriate options to use bundle20 during getbundle"""
2010 2010 caps = {'HG20'}
2011 2011 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2012 2012 caps.add('bundle2=' + urlreq.quote(capsblob))
2013 2013 return caps
2014 2014
2015 2015 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2016 2016 getbundle2partsorder = []
2017 2017
2018 2018 # Mapping between step name and function
2019 2019 #
2020 2020 # This exists to help extensions wrap steps if necessary
2021 2021 getbundle2partsmapping = {}
2022 2022
2023 2023 def getbundle2partsgenerator(stepname, idx=None):
2024 2024 """decorator for function generating bundle2 part for getbundle
2025 2025
2026 2026 The function is added to the step -> function mapping and appended to the
2027 2027 list of steps. Beware that decorated functions will be added in order
2028 2028 (this may matter).
2029 2029
2030 2030 You can only use this decorator for new steps, if you want to wrap a step
2031 2031 from an extension, attack the getbundle2partsmapping dictionary directly."""
2032 2032 def dec(func):
2033 2033 assert stepname not in getbundle2partsmapping
2034 2034 getbundle2partsmapping[stepname] = func
2035 2035 if idx is None:
2036 2036 getbundle2partsorder.append(stepname)
2037 2037 else:
2038 2038 getbundle2partsorder.insert(idx, stepname)
2039 2039 return func
2040 2040 return dec
2041 2041
2042 2042 def bundle2requested(bundlecaps):
2043 2043 if bundlecaps is not None:
2044 2044 return any(cap.startswith('HG2') for cap in bundlecaps)
2045 2045 return False
2046 2046
2047 2047 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
2048 2048 **kwargs):
2049 2049 """Return chunks constituting a bundle's raw data.
2050 2050
2051 2051 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2052 2052 passed.
2053 2053
2054 2054 Returns a 2-tuple of a dict with metadata about the generated bundle
2055 2055 and an iterator over raw chunks (of varying sizes).
2056 2056 """
2057 2057 kwargs = pycompat.byteskwargs(kwargs)
2058 2058 info = {}
2059 2059 usebundle2 = bundle2requested(bundlecaps)
2060 2060 # bundle10 case
2061 2061 if not usebundle2:
2062 2062 if bundlecaps and not kwargs.get('cg', True):
2063 2063 raise ValueError(_('request for bundle10 must include changegroup'))
2064 2064
2065 2065 if kwargs:
2066 2066 raise ValueError(_('unsupported getbundle arguments: %s')
2067 2067 % ', '.join(sorted(kwargs.keys())))
2068 2068 outgoing = _computeoutgoing(repo, heads, common)
2069 2069 info['bundleversion'] = 1
2070 2070 return info, changegroup.makestream(repo, outgoing, '01', source,
2071 2071 bundlecaps=bundlecaps)
2072 2072
2073 2073 # bundle20 case
2074 2074 info['bundleversion'] = 2
2075 2075 b2caps = {}
2076 2076 for bcaps in bundlecaps:
2077 2077 if bcaps.startswith('bundle2='):
2078 2078 blob = urlreq.unquote(bcaps[len('bundle2='):])
2079 2079 b2caps.update(bundle2.decodecaps(blob))
2080 2080 bundler = bundle2.bundle20(repo.ui, b2caps)
2081 2081
2082 2082 kwargs['heads'] = heads
2083 2083 kwargs['common'] = common
2084 2084
2085 2085 for name in getbundle2partsorder:
2086 2086 func = getbundle2partsmapping[name]
2087 2087 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
2088 2088 **pycompat.strkwargs(kwargs))
2089 2089
2090 2090 info['prefercompressed'] = bundler.prefercompressed
2091 2091
2092 2092 return info, bundler.getchunks()
2093 2093
2094 2094 @getbundle2partsgenerator('stream2')
2095 2095 def _getbundlestream2(bundler, repo, *args, **kwargs):
2096 2096 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2097 2097
2098 2098 @getbundle2partsgenerator('changegroup')
2099 2099 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
2100 2100 b2caps=None, heads=None, common=None, **kwargs):
2101 2101 """add a changegroup part to the requested bundle"""
2102 2102 if not kwargs.get(r'cg', True):
2103 2103 return
2104 2104
2105 2105 version = '01'
2106 2106 cgversions = b2caps.get('changegroup')
2107 2107 if cgversions: # 3.1 and 3.2 ship with an empty value
2108 2108 cgversions = [v for v in cgversions
2109 2109 if v in changegroup.supportedoutgoingversions(repo)]
2110 2110 if not cgversions:
2111 2111 raise ValueError(_('no common changegroup version'))
2112 2112 version = max(cgversions)
2113 2113
2114 2114 outgoing = _computeoutgoing(repo, heads, common)
2115 2115 if not outgoing.missing:
2116 2116 return
2117 2117
2118 2118 if kwargs.get(r'narrow', False):
2119 2119 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
2120 2120 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
2121 2121 filematcher = narrowspec.match(repo.root, include=include,
2122 2122 exclude=exclude)
2123 2123 else:
2124 2124 filematcher = None
2125 2125
2126 2126 cgstream = changegroup.makestream(repo, outgoing, version, source,
2127 2127 bundlecaps=bundlecaps,
2128 2128 filematcher=filematcher)
2129 2129
2130 2130 part = bundler.newpart('changegroup', data=cgstream)
2131 2131 if cgversions:
2132 2132 part.addparam('version', version)
2133 2133
2134 2134 part.addparam('nbchanges', '%d' % len(outgoing.missing),
2135 2135 mandatory=False)
2136 2136
2137 2137 if 'treemanifest' in repo.requirements:
2138 2138 part.addparam('treemanifest', '1')
2139 2139
2140 2140 if kwargs.get(r'narrow', False) and (include or exclude):
2141 2141 narrowspecpart = bundler.newpart('narrow:spec')
2142 2142 if include:
2143 2143 narrowspecpart.addparam(
2144 2144 'include', '\n'.join(include), mandatory=True)
2145 2145 if exclude:
2146 2146 narrowspecpart.addparam(
2147 2147 'exclude', '\n'.join(exclude), mandatory=True)
2148 2148
2149 2149 @getbundle2partsgenerator('bookmarks')
2150 2150 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
2151 2151 b2caps=None, **kwargs):
2152 2152 """add a bookmark part to the requested bundle"""
2153 2153 if not kwargs.get(r'bookmarks', False):
2154 2154 return
2155 2155 if 'bookmarks' not in b2caps:
2156 2156 raise ValueError(_('no common bookmarks exchange method'))
2157 2157 books = bookmod.listbinbookmarks(repo)
2158 2158 data = bookmod.binaryencode(books)
2159 2159 if data:
2160 2160 bundler.newpart('bookmarks', data=data)
2161 2161
2162 2162 @getbundle2partsgenerator('listkeys')
2163 2163 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
2164 2164 b2caps=None, **kwargs):
2165 2165 """add parts containing listkeys namespaces to the requested bundle"""
2166 2166 listkeys = kwargs.get(r'listkeys', ())
2167 2167 for namespace in listkeys:
2168 2168 part = bundler.newpart('listkeys')
2169 2169 part.addparam('namespace', namespace)
2170 2170 keys = repo.listkeys(namespace).items()
2171 2171 part.data = pushkey.encodekeys(keys)
2172 2172
2173 2173 @getbundle2partsgenerator('obsmarkers')
2174 2174 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
2175 2175 b2caps=None, heads=None, **kwargs):
2176 2176 """add an obsolescence markers part to the requested bundle"""
2177 2177 if kwargs.get(r'obsmarkers', False):
2178 2178 if heads is None:
2179 2179 heads = repo.heads()
2180 2180 subset = [c.node() for c in repo.set('::%ln', heads)]
2181 2181 markers = repo.obsstore.relevantmarkers(subset)
2182 2182 markers = sorted(markers)
2183 2183 bundle2.buildobsmarkerspart(bundler, markers)
2184 2184
2185 2185 @getbundle2partsgenerator('phases')
2186 2186 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
2187 2187 b2caps=None, heads=None, **kwargs):
2188 2188 """add phase heads part to the requested bundle"""
2189 2189 if kwargs.get(r'phases', False):
2190 2190 if not 'heads' in b2caps.get('phases'):
2191 2191 raise ValueError(_('no common phases exchange method'))
2192 2192 if heads is None:
2193 2193 heads = repo.heads()
2194 2194
2195 2195 headsbyphase = collections.defaultdict(set)
2196 2196 if repo.publishing():
2197 2197 headsbyphase[phases.public] = heads
2198 2198 else:
2199 2199 # find the appropriate heads to move
2200 2200
2201 2201 phase = repo._phasecache.phase
2202 2202 node = repo.changelog.node
2203 2203 rev = repo.changelog.rev
2204 2204 for h in heads:
2205 2205 headsbyphase[phase(repo, rev(h))].add(h)
2206 2206 seenphases = list(headsbyphase.keys())
2207 2207
2208 2208 # We do not handle anything but public and draft phase for now)
2209 2209 if seenphases:
2210 2210 assert max(seenphases) <= phases.draft
2211 2211
2212 2212 # if client is pulling non-public changesets, we need to find
2213 2213 # intermediate public heads.
2214 2214 draftheads = headsbyphase.get(phases.draft, set())
2215 2215 if draftheads:
2216 2216 publicheads = headsbyphase.get(phases.public, set())
2217 2217
2218 2218 revset = 'heads(only(%ln, %ln) and public())'
2219 2219 extraheads = repo.revs(revset, draftheads, publicheads)
2220 2220 for r in extraheads:
2221 2221 headsbyphase[phases.public].add(node(r))
2222 2222
2223 2223 # transform data in a format used by the encoding function
2224 2224 phasemapping = []
2225 2225 for phase in phases.allphases:
2226 2226 phasemapping.append(sorted(headsbyphase[phase]))
2227 2227
2228 2228 # generate the actual part
2229 2229 phasedata = phases.binaryencode(phasemapping)
2230 2230 bundler.newpart('phase-heads', data=phasedata)
2231 2231
2232 2232 @getbundle2partsgenerator('hgtagsfnodes')
2233 2233 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
2234 2234 b2caps=None, heads=None, common=None,
2235 2235 **kwargs):
2236 2236 """Transfer the .hgtags filenodes mapping.
2237 2237
2238 2238 Only values for heads in this bundle will be transferred.
2239 2239
2240 2240 The part data consists of pairs of 20 byte changeset node and .hgtags
2241 2241 filenodes raw values.
2242 2242 """
2243 2243 # Don't send unless:
2244 2244 # - changeset are being exchanged,
2245 2245 # - the client supports it.
2246 2246 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
2247 2247 return
2248 2248
2249 2249 outgoing = _computeoutgoing(repo, heads, common)
2250 2250 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2251 2251
2252 2252 @getbundle2partsgenerator('cache:rev-branch-cache')
2253 2253 def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
2254 2254 b2caps=None, heads=None, common=None,
2255 2255 **kwargs):
2256 2256 """Transfer the rev-branch-cache mapping
2257 2257
2258 2258 The payload is a series of data related to each branch
2259 2259
2260 2260 1) branch name length
2261 2261 2) number of open heads
2262 2262 3) number of closed heads
2263 2263 4) open heads nodes
2264 2264 5) closed heads nodes
2265 2265 """
2266 2266 # Don't send unless:
2267 2267 # - changeset are being exchanged,
2268 2268 # - the client supports it.
2269 2269 # - narrow bundle isn't in play (not currently compatible).
2270 2270 if (not kwargs.get(r'cg', True)
2271 2271 or 'rev-branch-cache' not in b2caps
2272 2272 or kwargs.get(r'narrow', False)
2273 2273 or repo.ui.has_section(_NARROWACL_SECTION)):
2274 2274 return
2275 2275
2276 2276 outgoing = _computeoutgoing(repo, heads, common)
2277 2277 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2278 2278
2279 2279 def check_heads(repo, their_heads, context):
2280 2280 """check if the heads of a repo have been modified
2281 2281
2282 2282 Used by peer for unbundling.
2283 2283 """
2284 2284 heads = repo.heads()
2285 2285 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
2286 2286 if not (their_heads == ['force'] or their_heads == heads or
2287 2287 their_heads == ['hashed', heads_hash]):
2288 2288 # someone else committed/pushed/unbundled while we
2289 2289 # were transferring data
2290 2290 raise error.PushRaced('repository changed while %s - '
2291 2291 'please try again' % context)
2292 2292
2293 2293 def unbundle(repo, cg, heads, source, url):
2294 2294 """Apply a bundle to a repo.
2295 2295
2296 2296 this function makes sure the repo is locked during the application and have
2297 2297 mechanism to check that no push race occurred between the creation of the
2298 2298 bundle and its application.
2299 2299
2300 2300 If the push was raced as PushRaced exception is raised."""
2301 2301 r = 0
2302 2302 # need a transaction when processing a bundle2 stream
2303 2303 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2304 2304 lockandtr = [None, None, None]
2305 2305 recordout = None
2306 2306 # quick fix for output mismatch with bundle2 in 3.4
2307 2307 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
2308 2308 if url.startswith('remote:http:') or url.startswith('remote:https:'):
2309 2309 captureoutput = True
2310 2310 try:
2311 2311 # note: outside bundle1, 'heads' is expected to be empty and this
2312 2312 # 'check_heads' call wil be a no-op
2313 2313 check_heads(repo, heads, 'uploading changes')
2314 2314 # push can proceed
2315 2315 if not isinstance(cg, bundle2.unbundle20):
2316 2316 # legacy case: bundle1 (changegroup 01)
2317 2317 txnname = "\n".join([source, util.hidepassword(url)])
2318 2318 with repo.lock(), repo.transaction(txnname) as tr:
2319 2319 op = bundle2.applybundle(repo, cg, tr, source, url)
2320 2320 r = bundle2.combinechangegroupresults(op)
2321 2321 else:
2322 2322 r = None
2323 2323 try:
2324 2324 def gettransaction():
2325 2325 if not lockandtr[2]:
2326 2326 lockandtr[0] = repo.wlock()
2327 2327 lockandtr[1] = repo.lock()
2328 2328 lockandtr[2] = repo.transaction(source)
2329 2329 lockandtr[2].hookargs['source'] = source
2330 2330 lockandtr[2].hookargs['url'] = url
2331 2331 lockandtr[2].hookargs['bundle2'] = '1'
2332 2332 return lockandtr[2]
2333 2333
2334 2334 # Do greedy locking by default until we're satisfied with lazy
2335 2335 # locking.
2336 2336 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
2337 2337 gettransaction()
2338 2338
2339 2339 op = bundle2.bundleoperation(repo, gettransaction,
2340 2340 captureoutput=captureoutput,
2341 2341 source='push')
2342 2342 try:
2343 2343 op = bundle2.processbundle(repo, cg, op=op)
2344 2344 finally:
2345 2345 r = op.reply
2346 2346 if captureoutput and r is not None:
2347 2347 repo.ui.pushbuffer(error=True, subproc=True)
2348 2348 def recordout(output):
2349 2349 r.newpart('output', data=output, mandatory=False)
2350 2350 if lockandtr[2] is not None:
2351 2351 lockandtr[2].close()
2352 2352 except BaseException as exc:
2353 2353 exc.duringunbundle2 = True
2354 2354 if captureoutput and r is not None:
2355 2355 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2356 2356 def recordout(output):
2357 2357 part = bundle2.bundlepart('output', data=output,
2358 2358 mandatory=False)
2359 2359 parts.append(part)
2360 2360 raise
2361 2361 finally:
2362 2362 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2363 2363 if recordout is not None:
2364 2364 recordout(repo.ui.popbuffer())
2365 2365 return r
2366 2366
2367 2367 def _maybeapplyclonebundle(pullop):
2368 2368 """Apply a clone bundle from a remote, if possible."""
2369 2369
2370 2370 repo = pullop.repo
2371 2371 remote = pullop.remote
2372 2372
2373 2373 if not repo.ui.configbool('ui', 'clonebundles'):
2374 2374 return
2375 2375
2376 2376 # Only run if local repo is empty.
2377 2377 if len(repo):
2378 2378 return
2379 2379
2380 2380 if pullop.heads:
2381 2381 return
2382 2382
2383 2383 if not remote.capable('clonebundles'):
2384 2384 return
2385 2385
2386 2386 with remote.commandexecutor() as e:
2387 2387 res = e.callcommand('clonebundles', {}).result()
2388 2388
2389 2389 # If we call the wire protocol command, that's good enough to record the
2390 2390 # attempt.
2391 2391 pullop.clonebundleattempted = True
2392 2392
2393 2393 entries = parseclonebundlesmanifest(repo, res)
2394 2394 if not entries:
2395 2395 repo.ui.note(_('no clone bundles available on remote; '
2396 2396 'falling back to regular clone\n'))
2397 2397 return
2398 2398
2399 2399 entries = filterclonebundleentries(
2400 2400 repo, entries, streamclonerequested=pullop.streamclonerequested)
2401 2401
2402 2402 if not entries:
2403 2403 # There is a thundering herd concern here. However, if a server
2404 2404 # operator doesn't advertise bundles appropriate for its clients,
2405 2405 # they deserve what's coming. Furthermore, from a client's
2406 2406 # perspective, no automatic fallback would mean not being able to
2407 2407 # clone!
2408 2408 repo.ui.warn(_('no compatible clone bundles available on server; '
2409 2409 'falling back to regular clone\n'))
2410 2410 repo.ui.warn(_('(you may want to report this to the server '
2411 2411 'operator)\n'))
2412 2412 return
2413 2413
2414 2414 entries = sortclonebundleentries(repo.ui, entries)
2415 2415
2416 2416 url = entries[0]['URL']
2417 2417 repo.ui.status(_('applying clone bundle from %s\n') % url)
2418 2418 if trypullbundlefromurl(repo.ui, repo, url):
2419 2419 repo.ui.status(_('finished applying clone bundle\n'))
2420 2420 # Bundle failed.
2421 2421 #
2422 2422 # We abort by default to avoid the thundering herd of
2423 2423 # clients flooding a server that was expecting expensive
2424 2424 # clone load to be offloaded.
2425 2425 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2426 2426 repo.ui.warn(_('falling back to normal clone\n'))
2427 2427 else:
2428 2428 raise error.Abort(_('error applying bundle'),
2429 2429 hint=_('if this error persists, consider contacting '
2430 2430 'the server operator or disable clone '
2431 2431 'bundles via '
2432 2432 '"--config ui.clonebundles=false"'))
2433 2433
2434 2434 def parseclonebundlesmanifest(repo, s):
2435 2435 """Parses the raw text of a clone bundles manifest.
2436 2436
2437 2437 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2438 2438 to the URL and other keys are the attributes for the entry.
2439 2439 """
2440 2440 m = []
2441 2441 for line in s.splitlines():
2442 2442 fields = line.split()
2443 2443 if not fields:
2444 2444 continue
2445 2445 attrs = {'URL': fields[0]}
2446 2446 for rawattr in fields[1:]:
2447 2447 key, value = rawattr.split('=', 1)
2448 2448 key = urlreq.unquote(key)
2449 2449 value = urlreq.unquote(value)
2450 2450 attrs[key] = value
2451 2451
2452 2452 # Parse BUNDLESPEC into components. This makes client-side
2453 2453 # preferences easier to specify since you can prefer a single
2454 2454 # component of the BUNDLESPEC.
2455 2455 if key == 'BUNDLESPEC':
2456 2456 try:
2457 2457 bundlespec = parsebundlespec(repo, value)
2458 2458 attrs['COMPRESSION'] = bundlespec.compression
2459 2459 attrs['VERSION'] = bundlespec.version
2460 2460 except error.InvalidBundleSpecification:
2461 2461 pass
2462 2462 except error.UnsupportedBundleSpecification:
2463 2463 pass
2464 2464
2465 2465 m.append(attrs)
2466 2466
2467 2467 return m
2468 2468
2469 2469 def isstreamclonespec(bundlespec):
2470 2470 # Stream clone v1
2471 2471 if (bundlespec.wirecompression == 'UN' and bundlespec.wireversion == 's1'):
2472 2472 return True
2473 2473
2474 2474 # Stream clone v2
2475 2475 if (bundlespec.wirecompression == 'UN' and \
2476 2476 bundlespec.wireversion == '02' and \
2477 2477 bundlespec.contentopts.get('streamv2')):
2478 2478 return True
2479 2479
2480 2480 return False
2481 2481
2482 2482 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2483 2483 """Remove incompatible clone bundle manifest entries.
2484 2484
2485 2485 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2486 2486 and returns a new list consisting of only the entries that this client
2487 2487 should be able to apply.
2488 2488
2489 2489 There is no guarantee we'll be able to apply all returned entries because
2490 2490 the metadata we use to filter on may be missing or wrong.
2491 2491 """
2492 2492 newentries = []
2493 2493 for entry in entries:
2494 2494 spec = entry.get('BUNDLESPEC')
2495 2495 if spec:
2496 2496 try:
2497 2497 bundlespec = parsebundlespec(repo, spec, strict=True)
2498 2498
2499 2499 # If a stream clone was requested, filter out non-streamclone
2500 2500 # entries.
2501 2501 if streamclonerequested and not isstreamclonespec(bundlespec):
2502 2502 repo.ui.debug('filtering %s because not a stream clone\n' %
2503 2503 entry['URL'])
2504 2504 continue
2505 2505
2506 2506 except error.InvalidBundleSpecification as e:
2507 2507 repo.ui.debug(stringutil.forcebytestr(e) + '\n')
2508 2508 continue
2509 2509 except error.UnsupportedBundleSpecification as e:
2510 2510 repo.ui.debug('filtering %s because unsupported bundle '
2511 2511 'spec: %s\n' % (
2512 2512 entry['URL'], stringutil.forcebytestr(e)))
2513 2513 continue
2514 2514 # If we don't have a spec and requested a stream clone, we don't know
2515 2515 # what the entry is so don't attempt to apply it.
2516 2516 elif streamclonerequested:
2517 2517 repo.ui.debug('filtering %s because cannot determine if a stream '
2518 2518 'clone bundle\n' % entry['URL'])
2519 2519 continue
2520 2520
2521 2521 if 'REQUIRESNI' in entry and not sslutil.hassni:
2522 2522 repo.ui.debug('filtering %s because SNI not supported\n' %
2523 2523 entry['URL'])
2524 2524 continue
2525 2525
2526 2526 newentries.append(entry)
2527 2527
2528 2528 return newentries
2529 2529
2530 2530 class clonebundleentry(object):
2531 2531 """Represents an item in a clone bundles manifest.
2532 2532
2533 2533 This rich class is needed to support sorting since sorted() in Python 3
2534 2534 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2535 2535 won't work.
2536 2536 """
2537 2537
2538 2538 def __init__(self, value, prefers):
2539 2539 self.value = value
2540 2540 self.prefers = prefers
2541 2541
2542 2542 def _cmp(self, other):
2543 2543 for prefkey, prefvalue in self.prefers:
2544 2544 avalue = self.value.get(prefkey)
2545 2545 bvalue = other.value.get(prefkey)
2546 2546
2547 2547 # Special case for b missing attribute and a matches exactly.
2548 2548 if avalue is not None and bvalue is None and avalue == prefvalue:
2549 2549 return -1
2550 2550
2551 2551 # Special case for a missing attribute and b matches exactly.
2552 2552 if bvalue is not None and avalue is None and bvalue == prefvalue:
2553 2553 return 1
2554 2554
2555 2555 # We can't compare unless attribute present on both.
2556 2556 if avalue is None or bvalue is None:
2557 2557 continue
2558 2558
2559 2559 # Same values should fall back to next attribute.
2560 2560 if avalue == bvalue:
2561 2561 continue
2562 2562
2563 2563 # Exact matches come first.
2564 2564 if avalue == prefvalue:
2565 2565 return -1
2566 2566 if bvalue == prefvalue:
2567 2567 return 1
2568 2568
2569 2569 # Fall back to next attribute.
2570 2570 continue
2571 2571
2572 2572 # If we got here we couldn't sort by attributes and prefers. Fall
2573 2573 # back to index order.
2574 2574 return 0
2575 2575
2576 2576 def __lt__(self, other):
2577 2577 return self._cmp(other) < 0
2578 2578
2579 2579 def __gt__(self, other):
2580 2580 return self._cmp(other) > 0
2581 2581
2582 2582 def __eq__(self, other):
2583 2583 return self._cmp(other) == 0
2584 2584
2585 2585 def __le__(self, other):
2586 2586 return self._cmp(other) <= 0
2587 2587
2588 2588 def __ge__(self, other):
2589 2589 return self._cmp(other) >= 0
2590 2590
2591 2591 def __ne__(self, other):
2592 2592 return self._cmp(other) != 0
2593 2593
2594 2594 def sortclonebundleentries(ui, entries):
2595 2595 prefers = ui.configlist('ui', 'clonebundleprefers')
2596 2596 if not prefers:
2597 2597 return list(entries)
2598 2598
2599 2599 prefers = [p.split('=', 1) for p in prefers]
2600 2600
2601 2601 items = sorted(clonebundleentry(v, prefers) for v in entries)
2602 2602 return [i.value for i in items]
2603 2603
2604 2604 def trypullbundlefromurl(ui, repo, url):
2605 2605 """Attempt to apply a bundle from a URL."""
2606 2606 with repo.lock(), repo.transaction('bundleurl') as tr:
2607 2607 try:
2608 2608 fh = urlmod.open(ui, url)
2609 2609 cg = readbundle(ui, fh, 'stream')
2610 2610
2611 2611 if isinstance(cg, streamclone.streamcloneapplier):
2612 2612 cg.apply(repo)
2613 2613 else:
2614 2614 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2615 2615 return True
2616 2616 except urlerr.httperror as e:
2617 2617 ui.warn(_('HTTP error fetching bundle: %s\n') %
2618 2618 stringutil.forcebytestr(e))
2619 2619 except urlerr.urlerror as e:
2620 2620 ui.warn(_('error fetching bundle: %s\n') %
2621 2621 stringutil.forcebytestr(e.reason))
2622 2622
2623 2623 return False
General Comments 0
You need to be logged in to leave comments. Login now