##// END OF EJS Templates
remotenames: synchronise remotenames after push also...
Pulkit Goyal -
r38634:4d5fb406 default
parent child Browse files
Show More
@@ -1,2418 +1,2421 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import hashlib
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 bin,
16 16 hex,
17 17 nullid,
18 18 )
19 19 from .thirdparty import (
20 20 attr,
21 21 )
22 22 from . import (
23 23 bookmarks as bookmod,
24 24 bundle2,
25 25 changegroup,
26 26 discovery,
27 27 error,
28 28 lock as lockmod,
29 29 logexchange,
30 30 obsolete,
31 31 phases,
32 32 pushkey,
33 33 pycompat,
34 34 scmutil,
35 35 sslutil,
36 36 streamclone,
37 37 url as urlmod,
38 38 util,
39 39 )
40 40 from .utils import (
41 41 stringutil,
42 42 )
43 43
44 44 urlerr = util.urlerr
45 45 urlreq = util.urlreq
46 46
47 47 # Maps bundle version human names to changegroup versions.
48 48 _bundlespeccgversions = {'v1': '01',
49 49 'v2': '02',
50 50 'packed1': 's1',
51 51 'bundle2': '02', #legacy
52 52 }
53 53
54 54 # Maps bundle version with content opts to choose which part to bundle
55 55 _bundlespeccontentopts = {
56 56 'v1': {
57 57 'changegroup': True,
58 58 'cg.version': '01',
59 59 'obsolescence': False,
60 60 'phases': False,
61 61 'tagsfnodescache': False,
62 62 'revbranchcache': False
63 63 },
64 64 'v2': {
65 65 'changegroup': True,
66 66 'cg.version': '02',
67 67 'obsolescence': False,
68 68 'phases': False,
69 69 'tagsfnodescache': True,
70 70 'revbranchcache': True
71 71 },
72 72 'packed1' : {
73 73 'cg.version': 's1'
74 74 }
75 75 }
76 76 _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
77 77
78 78 _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
79 79 "tagsfnodescache": False,
80 80 "revbranchcache": False}}
81 81
82 82 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
83 83 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
84 84
85 85 @attr.s
86 86 class bundlespec(object):
87 87 compression = attr.ib()
88 88 wirecompression = attr.ib()
89 89 version = attr.ib()
90 90 wireversion = attr.ib()
91 91 params = attr.ib()
92 92 contentopts = attr.ib()
93 93
94 94 def parsebundlespec(repo, spec, strict=True):
95 95 """Parse a bundle string specification into parts.
96 96
97 97 Bundle specifications denote a well-defined bundle/exchange format.
98 98 The content of a given specification should not change over time in
99 99 order to ensure that bundles produced by a newer version of Mercurial are
100 100 readable from an older version.
101 101
102 102 The string currently has the form:
103 103
104 104 <compression>-<type>[;<parameter0>[;<parameter1>]]
105 105
106 106 Where <compression> is one of the supported compression formats
107 107 and <type> is (currently) a version string. A ";" can follow the type and
108 108 all text afterwards is interpreted as URI encoded, ";" delimited key=value
109 109 pairs.
110 110
111 111 If ``strict`` is True (the default) <compression> is required. Otherwise,
112 112 it is optional.
113 113
114 114 Returns a bundlespec object of (compression, version, parameters).
115 115 Compression will be ``None`` if not in strict mode and a compression isn't
116 116 defined.
117 117
118 118 An ``InvalidBundleSpecification`` is raised when the specification is
119 119 not syntactically well formed.
120 120
121 121 An ``UnsupportedBundleSpecification`` is raised when the compression or
122 122 bundle type/version is not recognized.
123 123
124 124 Note: this function will likely eventually return a more complex data
125 125 structure, including bundle2 part information.
126 126 """
127 127 def parseparams(s):
128 128 if ';' not in s:
129 129 return s, {}
130 130
131 131 params = {}
132 132 version, paramstr = s.split(';', 1)
133 133
134 134 for p in paramstr.split(';'):
135 135 if '=' not in p:
136 136 raise error.InvalidBundleSpecification(
137 137 _('invalid bundle specification: '
138 138 'missing "=" in parameter: %s') % p)
139 139
140 140 key, value = p.split('=', 1)
141 141 key = urlreq.unquote(key)
142 142 value = urlreq.unquote(value)
143 143 params[key] = value
144 144
145 145 return version, params
146 146
147 147
148 148 if strict and '-' not in spec:
149 149 raise error.InvalidBundleSpecification(
150 150 _('invalid bundle specification; '
151 151 'must be prefixed with compression: %s') % spec)
152 152
153 153 if '-' in spec:
154 154 compression, version = spec.split('-', 1)
155 155
156 156 if compression not in util.compengines.supportedbundlenames:
157 157 raise error.UnsupportedBundleSpecification(
158 158 _('%s compression is not supported') % compression)
159 159
160 160 version, params = parseparams(version)
161 161
162 162 if version not in _bundlespeccgversions:
163 163 raise error.UnsupportedBundleSpecification(
164 164 _('%s is not a recognized bundle version') % version)
165 165 else:
166 166 # Value could be just the compression or just the version, in which
167 167 # case some defaults are assumed (but only when not in strict mode).
168 168 assert not strict
169 169
170 170 spec, params = parseparams(spec)
171 171
172 172 if spec in util.compengines.supportedbundlenames:
173 173 compression = spec
174 174 version = 'v1'
175 175 # Generaldelta repos require v2.
176 176 if 'generaldelta' in repo.requirements:
177 177 version = 'v2'
178 178 # Modern compression engines require v2.
179 179 if compression not in _bundlespecv1compengines:
180 180 version = 'v2'
181 181 elif spec in _bundlespeccgversions:
182 182 if spec == 'packed1':
183 183 compression = 'none'
184 184 else:
185 185 compression = 'bzip2'
186 186 version = spec
187 187 else:
188 188 raise error.UnsupportedBundleSpecification(
189 189 _('%s is not a recognized bundle specification') % spec)
190 190
191 191 # Bundle version 1 only supports a known set of compression engines.
192 192 if version == 'v1' and compression not in _bundlespecv1compengines:
193 193 raise error.UnsupportedBundleSpecification(
194 194 _('compression engine %s is not supported on v1 bundles') %
195 195 compression)
196 196
197 197 # The specification for packed1 can optionally declare the data formats
198 198 # required to apply it. If we see this metadata, compare against what the
199 199 # repo supports and error if the bundle isn't compatible.
200 200 if version == 'packed1' and 'requirements' in params:
201 201 requirements = set(params['requirements'].split(','))
202 202 missingreqs = requirements - repo.supportedformats
203 203 if missingreqs:
204 204 raise error.UnsupportedBundleSpecification(
205 205 _('missing support for repository features: %s') %
206 206 ', '.join(sorted(missingreqs)))
207 207
208 208 # Compute contentopts based on the version
209 209 contentopts = _bundlespeccontentopts.get(version, {}).copy()
210 210
211 211 # Process the variants
212 212 if "stream" in params and params["stream"] == "v2":
213 213 variant = _bundlespecvariants["streamv2"]
214 214 contentopts.update(variant)
215 215
216 216 engine = util.compengines.forbundlename(compression)
217 217 compression, wirecompression = engine.bundletype()
218 218 wireversion = _bundlespeccgversions[version]
219 219
220 220 return bundlespec(compression, wirecompression, version, wireversion,
221 221 params, contentopts)
222 222
223 223 def readbundle(ui, fh, fname, vfs=None):
224 224 header = changegroup.readexactly(fh, 4)
225 225
226 226 alg = None
227 227 if not fname:
228 228 fname = "stream"
229 229 if not header.startswith('HG') and header.startswith('\0'):
230 230 fh = changegroup.headerlessfixup(fh, header)
231 231 header = "HG10"
232 232 alg = 'UN'
233 233 elif vfs:
234 234 fname = vfs.join(fname)
235 235
236 236 magic, version = header[0:2], header[2:4]
237 237
238 238 if magic != 'HG':
239 239 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
240 240 if version == '10':
241 241 if alg is None:
242 242 alg = changegroup.readexactly(fh, 2)
243 243 return changegroup.cg1unpacker(fh, alg)
244 244 elif version.startswith('2'):
245 245 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
246 246 elif version == 'S1':
247 247 return streamclone.streamcloneapplier(fh)
248 248 else:
249 249 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
250 250
251 251 def getbundlespec(ui, fh):
252 252 """Infer the bundlespec from a bundle file handle.
253 253
254 254 The input file handle is seeked and the original seek position is not
255 255 restored.
256 256 """
257 257 def speccompression(alg):
258 258 try:
259 259 return util.compengines.forbundletype(alg).bundletype()[0]
260 260 except KeyError:
261 261 return None
262 262
263 263 b = readbundle(ui, fh, None)
264 264 if isinstance(b, changegroup.cg1unpacker):
265 265 alg = b._type
266 266 if alg == '_truncatedBZ':
267 267 alg = 'BZ'
268 268 comp = speccompression(alg)
269 269 if not comp:
270 270 raise error.Abort(_('unknown compression algorithm: %s') % alg)
271 271 return '%s-v1' % comp
272 272 elif isinstance(b, bundle2.unbundle20):
273 273 if 'Compression' in b.params:
274 274 comp = speccompression(b.params['Compression'])
275 275 if not comp:
276 276 raise error.Abort(_('unknown compression algorithm: %s') % comp)
277 277 else:
278 278 comp = 'none'
279 279
280 280 version = None
281 281 for part in b.iterparts():
282 282 if part.type == 'changegroup':
283 283 version = part.params['version']
284 284 if version in ('01', '02'):
285 285 version = 'v2'
286 286 else:
287 287 raise error.Abort(_('changegroup version %s does not have '
288 288 'a known bundlespec') % version,
289 289 hint=_('try upgrading your Mercurial '
290 290 'client'))
291 291 elif part.type == 'stream2' and version is None:
292 292 # A stream2 part requires to be part of a v2 bundle
293 293 version = "v2"
294 294 requirements = urlreq.unquote(part.params['requirements'])
295 295 splitted = requirements.split()
296 296 params = bundle2._formatrequirementsparams(splitted)
297 297 return 'none-v2;stream=v2;%s' % params
298 298
299 299 if not version:
300 300 raise error.Abort(_('could not identify changegroup version in '
301 301 'bundle'))
302 302
303 303 return '%s-%s' % (comp, version)
304 304 elif isinstance(b, streamclone.streamcloneapplier):
305 305 requirements = streamclone.readbundle1header(fh)[2]
306 306 formatted = bundle2._formatrequirementsparams(requirements)
307 307 return 'none-packed1;%s' % formatted
308 308 else:
309 309 raise error.Abort(_('unknown bundle type: %s') % b)
310 310
311 311 def _computeoutgoing(repo, heads, common):
312 312 """Computes which revs are outgoing given a set of common
313 313 and a set of heads.
314 314
315 315 This is a separate function so extensions can have access to
316 316 the logic.
317 317
318 318 Returns a discovery.outgoing object.
319 319 """
320 320 cl = repo.changelog
321 321 if common:
322 322 hasnode = cl.hasnode
323 323 common = [n for n in common if hasnode(n)]
324 324 else:
325 325 common = [nullid]
326 326 if not heads:
327 327 heads = cl.heads()
328 328 return discovery.outgoing(repo, common, heads)
329 329
330 330 def _forcebundle1(op):
331 331 """return true if a pull/push must use bundle1
332 332
333 333 This function is used to allow testing of the older bundle version"""
334 334 ui = op.repo.ui
335 335 # The goal is this config is to allow developer to choose the bundle
336 336 # version used during exchanged. This is especially handy during test.
337 337 # Value is a list of bundle version to be picked from, highest version
338 338 # should be used.
339 339 #
340 340 # developer config: devel.legacy.exchange
341 341 exchange = ui.configlist('devel', 'legacy.exchange')
342 342 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
343 343 return forcebundle1 or not op.remote.capable('bundle2')
344 344
345 345 class pushoperation(object):
346 346 """A object that represent a single push operation
347 347
348 348 Its purpose is to carry push related state and very common operations.
349 349
350 350 A new pushoperation should be created at the beginning of each push and
351 351 discarded afterward.
352 352 """
353 353
354 354 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
355 355 bookmarks=(), pushvars=None):
356 356 # repo we push from
357 357 self.repo = repo
358 358 self.ui = repo.ui
359 359 # repo we push to
360 360 self.remote = remote
361 361 # force option provided
362 362 self.force = force
363 363 # revs to be pushed (None is "all")
364 364 self.revs = revs
365 365 # bookmark explicitly pushed
366 366 self.bookmarks = bookmarks
367 367 # allow push of new branch
368 368 self.newbranch = newbranch
369 369 # step already performed
370 370 # (used to check what steps have been already performed through bundle2)
371 371 self.stepsdone = set()
372 372 # Integer version of the changegroup push result
373 373 # - None means nothing to push
374 374 # - 0 means HTTP error
375 375 # - 1 means we pushed and remote head count is unchanged *or*
376 376 # we have outgoing changesets but refused to push
377 377 # - other values as described by addchangegroup()
378 378 self.cgresult = None
379 379 # Boolean value for the bookmark push
380 380 self.bkresult = None
381 381 # discover.outgoing object (contains common and outgoing data)
382 382 self.outgoing = None
383 383 # all remote topological heads before the push
384 384 self.remoteheads = None
385 385 # Details of the remote branch pre and post push
386 386 #
387 387 # mapping: {'branch': ([remoteheads],
388 388 # [newheads],
389 389 # [unsyncedheads],
390 390 # [discardedheads])}
391 391 # - branch: the branch name
392 392 # - remoteheads: the list of remote heads known locally
393 393 # None if the branch is new
394 394 # - newheads: the new remote heads (known locally) with outgoing pushed
395 395 # - unsyncedheads: the list of remote heads unknown locally.
396 396 # - discardedheads: the list of remote heads made obsolete by the push
397 397 self.pushbranchmap = None
398 398 # testable as a boolean indicating if any nodes are missing locally.
399 399 self.incoming = None
400 400 # summary of the remote phase situation
401 401 self.remotephases = None
402 402 # phases changes that must be pushed along side the changesets
403 403 self.outdatedphases = None
404 404 # phases changes that must be pushed if changeset push fails
405 405 self.fallbackoutdatedphases = None
406 406 # outgoing obsmarkers
407 407 self.outobsmarkers = set()
408 408 # outgoing bookmarks
409 409 self.outbookmarks = []
410 410 # transaction manager
411 411 self.trmanager = None
412 412 # map { pushkey partid -> callback handling failure}
413 413 # used to handle exception from mandatory pushkey part failure
414 414 self.pkfailcb = {}
415 415 # an iterable of pushvars or None
416 416 self.pushvars = pushvars
417 417
418 418 @util.propertycache
419 419 def futureheads(self):
420 420 """future remote heads if the changeset push succeeds"""
421 421 return self.outgoing.missingheads
422 422
423 423 @util.propertycache
424 424 def fallbackheads(self):
425 425 """future remote heads if the changeset push fails"""
426 426 if self.revs is None:
427 427 # not target to push, all common are relevant
428 428 return self.outgoing.commonheads
429 429 unfi = self.repo.unfiltered()
430 430 # I want cheads = heads(::missingheads and ::commonheads)
431 431 # (missingheads is revs with secret changeset filtered out)
432 432 #
433 433 # This can be expressed as:
434 434 # cheads = ( (missingheads and ::commonheads)
435 435 # + (commonheads and ::missingheads))"
436 436 # )
437 437 #
438 438 # while trying to push we already computed the following:
439 439 # common = (::commonheads)
440 440 # missing = ((commonheads::missingheads) - commonheads)
441 441 #
442 442 # We can pick:
443 443 # * missingheads part of common (::commonheads)
444 444 common = self.outgoing.common
445 445 nm = self.repo.changelog.nodemap
446 446 cheads = [node for node in self.revs if nm[node] in common]
447 447 # and
448 448 # * commonheads parents on missing
449 449 revset = unfi.set('%ln and parents(roots(%ln))',
450 450 self.outgoing.commonheads,
451 451 self.outgoing.missing)
452 452 cheads.extend(c.node() for c in revset)
453 453 return cheads
454 454
455 455 @property
456 456 def commonheads(self):
457 457 """set of all common heads after changeset bundle push"""
458 458 if self.cgresult:
459 459 return self.futureheads
460 460 else:
461 461 return self.fallbackheads
462 462
463 463 # mapping of message used when pushing bookmark
464 464 bookmsgmap = {'update': (_("updating bookmark %s\n"),
465 465 _('updating bookmark %s failed!\n')),
466 466 'export': (_("exporting bookmark %s\n"),
467 467 _('exporting bookmark %s failed!\n')),
468 468 'delete': (_("deleting remote bookmark %s\n"),
469 469 _('deleting remote bookmark %s failed!\n')),
470 470 }
471 471
472 472
473 473 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
474 474 opargs=None):
475 475 '''Push outgoing changesets (limited by revs) from a local
476 476 repository to remote. Return an integer:
477 477 - None means nothing to push
478 478 - 0 means HTTP error
479 479 - 1 means we pushed and remote head count is unchanged *or*
480 480 we have outgoing changesets but refused to push
481 481 - other values as described by addchangegroup()
482 482 '''
483 483 if opargs is None:
484 484 opargs = {}
485 485 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
486 486 **pycompat.strkwargs(opargs))
487 487 if pushop.remote.local():
488 488 missing = (set(pushop.repo.requirements)
489 489 - pushop.remote.local().supported)
490 490 if missing:
491 491 msg = _("required features are not"
492 492 " supported in the destination:"
493 493 " %s") % (', '.join(sorted(missing)))
494 494 raise error.Abort(msg)
495 495
496 496 if not pushop.remote.canpush():
497 497 raise error.Abort(_("destination does not support push"))
498 498
499 499 if not pushop.remote.capable('unbundle'):
500 500 raise error.Abort(_('cannot push: destination does not support the '
501 501 'unbundle wire protocol command'))
502 502
503 503 # get lock as we might write phase data
504 504 wlock = lock = None
505 505 try:
506 506 # bundle2 push may receive a reply bundle touching bookmarks or other
507 507 # things requiring the wlock. Take it now to ensure proper ordering.
508 508 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
509 509 if (not _forcebundle1(pushop)) and maypushback:
510 510 wlock = pushop.repo.wlock()
511 511 lock = pushop.repo.lock()
512 512 pushop.trmanager = transactionmanager(pushop.repo,
513 513 'push-response',
514 514 pushop.remote.url())
515 515 except error.LockUnavailable as err:
516 516 # source repo cannot be locked.
517 517 # We do not abort the push, but just disable the local phase
518 518 # synchronisation.
519 519 msg = 'cannot lock source repository: %s\n' % err
520 520 pushop.ui.debug(msg)
521 521
522 522 with wlock or util.nullcontextmanager(), \
523 523 lock or util.nullcontextmanager(), \
524 524 pushop.trmanager or util.nullcontextmanager():
525 525 pushop.repo.checkpush(pushop)
526 526 _pushdiscovery(pushop)
527 527 if not _forcebundle1(pushop):
528 528 _pushbundle2(pushop)
529 529 _pushchangeset(pushop)
530 530 _pushsyncphase(pushop)
531 531 _pushobsolete(pushop)
532 532 _pushbookmark(pushop)
533 533
534 if repo.ui.configbool('experimental', 'remotenames'):
535 logexchange.pullremotenames(repo, remote)
536
534 537 return pushop
535 538
536 539 # list of steps to perform discovery before push
537 540 pushdiscoveryorder = []
538 541
539 542 # Mapping between step name and function
540 543 #
541 544 # This exists to help extensions wrap steps if necessary
542 545 pushdiscoverymapping = {}
543 546
544 547 def pushdiscovery(stepname):
545 548 """decorator for function performing discovery before push
546 549
547 550 The function is added to the step -> function mapping and appended to the
548 551 list of steps. Beware that decorated function will be added in order (this
549 552 may matter).
550 553
551 554 You can only use this decorator for a new step, if you want to wrap a step
552 555 from an extension, change the pushdiscovery dictionary directly."""
553 556 def dec(func):
554 557 assert stepname not in pushdiscoverymapping
555 558 pushdiscoverymapping[stepname] = func
556 559 pushdiscoveryorder.append(stepname)
557 560 return func
558 561 return dec
559 562
560 563 def _pushdiscovery(pushop):
561 564 """Run all discovery steps"""
562 565 for stepname in pushdiscoveryorder:
563 566 step = pushdiscoverymapping[stepname]
564 567 step(pushop)
565 568
566 569 @pushdiscovery('changeset')
567 570 def _pushdiscoverychangeset(pushop):
568 571 """discover the changeset that need to be pushed"""
569 572 fci = discovery.findcommonincoming
570 573 if pushop.revs:
571 574 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
572 575 ancestorsof=pushop.revs)
573 576 else:
574 577 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
575 578 common, inc, remoteheads = commoninc
576 579 fco = discovery.findcommonoutgoing
577 580 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
578 581 commoninc=commoninc, force=pushop.force)
579 582 pushop.outgoing = outgoing
580 583 pushop.remoteheads = remoteheads
581 584 pushop.incoming = inc
582 585
583 586 @pushdiscovery('phase')
584 587 def _pushdiscoveryphase(pushop):
585 588 """discover the phase that needs to be pushed
586 589
587 590 (computed for both success and failure case for changesets push)"""
588 591 outgoing = pushop.outgoing
589 592 unfi = pushop.repo.unfiltered()
590 593 remotephases = listkeys(pushop.remote, 'phases')
591 594
592 595 if (pushop.ui.configbool('ui', '_usedassubrepo')
593 596 and remotephases # server supports phases
594 597 and not pushop.outgoing.missing # no changesets to be pushed
595 598 and remotephases.get('publishing', False)):
596 599 # When:
597 600 # - this is a subrepo push
598 601 # - and remote support phase
599 602 # - and no changeset are to be pushed
600 603 # - and remote is publishing
601 604 # We may be in issue 3781 case!
602 605 # We drop the possible phase synchronisation done by
603 606 # courtesy to publish changesets possibly locally draft
604 607 # on the remote.
605 608 pushop.outdatedphases = []
606 609 pushop.fallbackoutdatedphases = []
607 610 return
608 611
609 612 pushop.remotephases = phases.remotephasessummary(pushop.repo,
610 613 pushop.fallbackheads,
611 614 remotephases)
612 615 droots = pushop.remotephases.draftroots
613 616
614 617 extracond = ''
615 618 if not pushop.remotephases.publishing:
616 619 extracond = ' and public()'
617 620 revset = 'heads((%%ln::%%ln) %s)' % extracond
618 621 # Get the list of all revs draft on remote by public here.
619 622 # XXX Beware that revset break if droots is not strictly
620 623 # XXX root we may want to ensure it is but it is costly
621 624 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
622 625 if not outgoing.missing:
623 626 future = fallback
624 627 else:
625 628 # adds changeset we are going to push as draft
626 629 #
627 630 # should not be necessary for publishing server, but because of an
628 631 # issue fixed in xxxxx we have to do it anyway.
629 632 fdroots = list(unfi.set('roots(%ln + %ln::)',
630 633 outgoing.missing, droots))
631 634 fdroots = [f.node() for f in fdroots]
632 635 future = list(unfi.set(revset, fdroots, pushop.futureheads))
633 636 pushop.outdatedphases = future
634 637 pushop.fallbackoutdatedphases = fallback
635 638
636 639 @pushdiscovery('obsmarker')
637 640 def _pushdiscoveryobsmarkers(pushop):
638 641 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
639 642 return
640 643
641 644 if not pushop.repo.obsstore:
642 645 return
643 646
644 647 if 'obsolete' not in listkeys(pushop.remote, 'namespaces'):
645 648 return
646 649
647 650 repo = pushop.repo
648 651 # very naive computation, that can be quite expensive on big repo.
649 652 # However: evolution is currently slow on them anyway.
650 653 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
651 654 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
652 655
653 656 @pushdiscovery('bookmarks')
654 657 def _pushdiscoverybookmarks(pushop):
655 658 ui = pushop.ui
656 659 repo = pushop.repo.unfiltered()
657 660 remote = pushop.remote
658 661 ui.debug("checking for updated bookmarks\n")
659 662 ancestors = ()
660 663 if pushop.revs:
661 664 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
662 665 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
663 666
664 667 remotebookmark = listkeys(remote, 'bookmarks')
665 668
666 669 explicit = set([repo._bookmarks.expandname(bookmark)
667 670 for bookmark in pushop.bookmarks])
668 671
669 672 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
670 673 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
671 674
672 675 def safehex(x):
673 676 if x is None:
674 677 return x
675 678 return hex(x)
676 679
677 680 def hexifycompbookmarks(bookmarks):
678 681 return [(b, safehex(scid), safehex(dcid))
679 682 for (b, scid, dcid) in bookmarks]
680 683
681 684 comp = [hexifycompbookmarks(marks) for marks in comp]
682 685 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
683 686
684 687 def _processcompared(pushop, pushed, explicit, remotebms, comp):
685 688 """take decision on bookmark to pull from the remote bookmark
686 689
687 690 Exist to help extensions who want to alter this behavior.
688 691 """
689 692 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
690 693
691 694 repo = pushop.repo
692 695
693 696 for b, scid, dcid in advsrc:
694 697 if b in explicit:
695 698 explicit.remove(b)
696 699 if not pushed or repo[scid].rev() in pushed:
697 700 pushop.outbookmarks.append((b, dcid, scid))
698 701 # search added bookmark
699 702 for b, scid, dcid in addsrc:
700 703 if b in explicit:
701 704 explicit.remove(b)
702 705 pushop.outbookmarks.append((b, '', scid))
703 706 # search for overwritten bookmark
704 707 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
705 708 if b in explicit:
706 709 explicit.remove(b)
707 710 pushop.outbookmarks.append((b, dcid, scid))
708 711 # search for bookmark to delete
709 712 for b, scid, dcid in adddst:
710 713 if b in explicit:
711 714 explicit.remove(b)
712 715 # treat as "deleted locally"
713 716 pushop.outbookmarks.append((b, dcid, ''))
714 717 # identical bookmarks shouldn't get reported
715 718 for b, scid, dcid in same:
716 719 if b in explicit:
717 720 explicit.remove(b)
718 721
719 722 if explicit:
720 723 explicit = sorted(explicit)
721 724 # we should probably list all of them
722 725 pushop.ui.warn(_('bookmark %s does not exist on the local '
723 726 'or remote repository!\n') % explicit[0])
724 727 pushop.bkresult = 2
725 728
726 729 pushop.outbookmarks.sort()
727 730
728 731 def _pushcheckoutgoing(pushop):
729 732 outgoing = pushop.outgoing
730 733 unfi = pushop.repo.unfiltered()
731 734 if not outgoing.missing:
732 735 # nothing to push
733 736 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
734 737 return False
735 738 # something to push
736 739 if not pushop.force:
737 740 # if repo.obsstore == False --> no obsolete
738 741 # then, save the iteration
739 742 if unfi.obsstore:
740 743 # this message are here for 80 char limit reason
741 744 mso = _("push includes obsolete changeset: %s!")
742 745 mspd = _("push includes phase-divergent changeset: %s!")
743 746 mscd = _("push includes content-divergent changeset: %s!")
744 747 mst = {"orphan": _("push includes orphan changeset: %s!"),
745 748 "phase-divergent": mspd,
746 749 "content-divergent": mscd}
747 750 # If we are to push if there is at least one
748 751 # obsolete or unstable changeset in missing, at
749 752 # least one of the missinghead will be obsolete or
750 753 # unstable. So checking heads only is ok
751 754 for node in outgoing.missingheads:
752 755 ctx = unfi[node]
753 756 if ctx.obsolete():
754 757 raise error.Abort(mso % ctx)
755 758 elif ctx.isunstable():
756 759 # TODO print more than one instability in the abort
757 760 # message
758 761 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
759 762
760 763 discovery.checkheads(pushop)
761 764 return True
762 765
763 766 # List of names of steps to perform for an outgoing bundle2, order matters.
764 767 b2partsgenorder = []
765 768
766 769 # Mapping between step name and function
767 770 #
768 771 # This exists to help extensions wrap steps if necessary
769 772 b2partsgenmapping = {}
770 773
771 774 def b2partsgenerator(stepname, idx=None):
772 775 """decorator for function generating bundle2 part
773 776
774 777 The function is added to the step -> function mapping and appended to the
775 778 list of steps. Beware that decorated functions will be added in order
776 779 (this may matter).
777 780
778 781 You can only use this decorator for new steps, if you want to wrap a step
779 782 from an extension, attack the b2partsgenmapping dictionary directly."""
780 783 def dec(func):
781 784 assert stepname not in b2partsgenmapping
782 785 b2partsgenmapping[stepname] = func
783 786 if idx is None:
784 787 b2partsgenorder.append(stepname)
785 788 else:
786 789 b2partsgenorder.insert(idx, stepname)
787 790 return func
788 791 return dec
789 792
790 793 def _pushb2ctxcheckheads(pushop, bundler):
791 794 """Generate race condition checking parts
792 795
793 796 Exists as an independent function to aid extensions
794 797 """
795 798 # * 'force' do not check for push race,
796 799 # * if we don't push anything, there are nothing to check.
797 800 if not pushop.force and pushop.outgoing.missingheads:
798 801 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
799 802 emptyremote = pushop.pushbranchmap is None
800 803 if not allowunrelated or emptyremote:
801 804 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
802 805 else:
803 806 affected = set()
804 807 for branch, heads in pushop.pushbranchmap.iteritems():
805 808 remoteheads, newheads, unsyncedheads, discardedheads = heads
806 809 if remoteheads is not None:
807 810 remote = set(remoteheads)
808 811 affected |= set(discardedheads) & remote
809 812 affected |= remote - set(newheads)
810 813 if affected:
811 814 data = iter(sorted(affected))
812 815 bundler.newpart('check:updated-heads', data=data)
813 816
814 817 def _pushing(pushop):
815 818 """return True if we are pushing anything"""
816 819 return bool(pushop.outgoing.missing
817 820 or pushop.outdatedphases
818 821 or pushop.outobsmarkers
819 822 or pushop.outbookmarks)
820 823
821 824 @b2partsgenerator('check-bookmarks')
822 825 def _pushb2checkbookmarks(pushop, bundler):
823 826 """insert bookmark move checking"""
824 827 if not _pushing(pushop) or pushop.force:
825 828 return
826 829 b2caps = bundle2.bundle2caps(pushop.remote)
827 830 hasbookmarkcheck = 'bookmarks' in b2caps
828 831 if not (pushop.outbookmarks and hasbookmarkcheck):
829 832 return
830 833 data = []
831 834 for book, old, new in pushop.outbookmarks:
832 835 old = bin(old)
833 836 data.append((book, old))
834 837 checkdata = bookmod.binaryencode(data)
835 838 bundler.newpart('check:bookmarks', data=checkdata)
836 839
837 840 @b2partsgenerator('check-phases')
838 841 def _pushb2checkphases(pushop, bundler):
839 842 """insert phase move checking"""
840 843 if not _pushing(pushop) or pushop.force:
841 844 return
842 845 b2caps = bundle2.bundle2caps(pushop.remote)
843 846 hasphaseheads = 'heads' in b2caps.get('phases', ())
844 847 if pushop.remotephases is not None and hasphaseheads:
845 848 # check that the remote phase has not changed
846 849 checks = [[] for p in phases.allphases]
847 850 checks[phases.public].extend(pushop.remotephases.publicheads)
848 851 checks[phases.draft].extend(pushop.remotephases.draftroots)
849 852 if any(checks):
850 853 for nodes in checks:
851 854 nodes.sort()
852 855 checkdata = phases.binaryencode(checks)
853 856 bundler.newpart('check:phases', data=checkdata)
854 857
855 858 @b2partsgenerator('changeset')
856 859 def _pushb2ctx(pushop, bundler):
857 860 """handle changegroup push through bundle2
858 861
859 862 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
860 863 """
861 864 if 'changesets' in pushop.stepsdone:
862 865 return
863 866 pushop.stepsdone.add('changesets')
864 867 # Send known heads to the server for race detection.
865 868 if not _pushcheckoutgoing(pushop):
866 869 return
867 870 pushop.repo.prepushoutgoinghooks(pushop)
868 871
869 872 _pushb2ctxcheckheads(pushop, bundler)
870 873
871 874 b2caps = bundle2.bundle2caps(pushop.remote)
872 875 version = '01'
873 876 cgversions = b2caps.get('changegroup')
874 877 if cgversions: # 3.1 and 3.2 ship with an empty value
875 878 cgversions = [v for v in cgversions
876 879 if v in changegroup.supportedoutgoingversions(
877 880 pushop.repo)]
878 881 if not cgversions:
879 882 raise ValueError(_('no common changegroup version'))
880 883 version = max(cgversions)
881 884 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
882 885 'push')
883 886 cgpart = bundler.newpart('changegroup', data=cgstream)
884 887 if cgversions:
885 888 cgpart.addparam('version', version)
886 889 if 'treemanifest' in pushop.repo.requirements:
887 890 cgpart.addparam('treemanifest', '1')
888 891 def handlereply(op):
889 892 """extract addchangegroup returns from server reply"""
890 893 cgreplies = op.records.getreplies(cgpart.id)
891 894 assert len(cgreplies['changegroup']) == 1
892 895 pushop.cgresult = cgreplies['changegroup'][0]['return']
893 896 return handlereply
894 897
895 898 @b2partsgenerator('phase')
896 899 def _pushb2phases(pushop, bundler):
897 900 """handle phase push through bundle2"""
898 901 if 'phases' in pushop.stepsdone:
899 902 return
900 903 b2caps = bundle2.bundle2caps(pushop.remote)
901 904 ui = pushop.repo.ui
902 905
903 906 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
904 907 haspushkey = 'pushkey' in b2caps
905 908 hasphaseheads = 'heads' in b2caps.get('phases', ())
906 909
907 910 if hasphaseheads and not legacyphase:
908 911 return _pushb2phaseheads(pushop, bundler)
909 912 elif haspushkey:
910 913 return _pushb2phasespushkey(pushop, bundler)
911 914
912 915 def _pushb2phaseheads(pushop, bundler):
913 916 """push phase information through a bundle2 - binary part"""
914 917 pushop.stepsdone.add('phases')
915 918 if pushop.outdatedphases:
916 919 updates = [[] for p in phases.allphases]
917 920 updates[0].extend(h.node() for h in pushop.outdatedphases)
918 921 phasedata = phases.binaryencode(updates)
919 922 bundler.newpart('phase-heads', data=phasedata)
920 923
921 924 def _pushb2phasespushkey(pushop, bundler):
922 925 """push phase information through a bundle2 - pushkey part"""
923 926 pushop.stepsdone.add('phases')
924 927 part2node = []
925 928
926 929 def handlefailure(pushop, exc):
927 930 targetid = int(exc.partid)
928 931 for partid, node in part2node:
929 932 if partid == targetid:
930 933 raise error.Abort(_('updating %s to public failed') % node)
931 934
932 935 enc = pushkey.encode
933 936 for newremotehead in pushop.outdatedphases:
934 937 part = bundler.newpart('pushkey')
935 938 part.addparam('namespace', enc('phases'))
936 939 part.addparam('key', enc(newremotehead.hex()))
937 940 part.addparam('old', enc('%d' % phases.draft))
938 941 part.addparam('new', enc('%d' % phases.public))
939 942 part2node.append((part.id, newremotehead))
940 943 pushop.pkfailcb[part.id] = handlefailure
941 944
942 945 def handlereply(op):
943 946 for partid, node in part2node:
944 947 partrep = op.records.getreplies(partid)
945 948 results = partrep['pushkey']
946 949 assert len(results) <= 1
947 950 msg = None
948 951 if not results:
949 952 msg = _('server ignored update of %s to public!\n') % node
950 953 elif not int(results[0]['return']):
951 954 msg = _('updating %s to public failed!\n') % node
952 955 if msg is not None:
953 956 pushop.ui.warn(msg)
954 957 return handlereply
955 958
956 959 @b2partsgenerator('obsmarkers')
957 960 def _pushb2obsmarkers(pushop, bundler):
958 961 if 'obsmarkers' in pushop.stepsdone:
959 962 return
960 963 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
961 964 if obsolete.commonversion(remoteversions) is None:
962 965 return
963 966 pushop.stepsdone.add('obsmarkers')
964 967 if pushop.outobsmarkers:
965 968 markers = sorted(pushop.outobsmarkers)
966 969 bundle2.buildobsmarkerspart(bundler, markers)
967 970
968 971 @b2partsgenerator('bookmarks')
969 972 def _pushb2bookmarks(pushop, bundler):
970 973 """handle bookmark push through bundle2"""
971 974 if 'bookmarks' in pushop.stepsdone:
972 975 return
973 976 b2caps = bundle2.bundle2caps(pushop.remote)
974 977
975 978 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
976 979 legacybooks = 'bookmarks' in legacy
977 980
978 981 if not legacybooks and 'bookmarks' in b2caps:
979 982 return _pushb2bookmarkspart(pushop, bundler)
980 983 elif 'pushkey' in b2caps:
981 984 return _pushb2bookmarkspushkey(pushop, bundler)
982 985
983 986 def _bmaction(old, new):
984 987 """small utility for bookmark pushing"""
985 988 if not old:
986 989 return 'export'
987 990 elif not new:
988 991 return 'delete'
989 992 return 'update'
990 993
991 994 def _pushb2bookmarkspart(pushop, bundler):
992 995 pushop.stepsdone.add('bookmarks')
993 996 if not pushop.outbookmarks:
994 997 return
995 998
996 999 allactions = []
997 1000 data = []
998 1001 for book, old, new in pushop.outbookmarks:
999 1002 new = bin(new)
1000 1003 data.append((book, new))
1001 1004 allactions.append((book, _bmaction(old, new)))
1002 1005 checkdata = bookmod.binaryencode(data)
1003 1006 bundler.newpart('bookmarks', data=checkdata)
1004 1007
1005 1008 def handlereply(op):
1006 1009 ui = pushop.ui
1007 1010 # if success
1008 1011 for book, action in allactions:
1009 1012 ui.status(bookmsgmap[action][0] % book)
1010 1013
1011 1014 return handlereply
1012 1015
1013 1016 def _pushb2bookmarkspushkey(pushop, bundler):
1014 1017 pushop.stepsdone.add('bookmarks')
1015 1018 part2book = []
1016 1019 enc = pushkey.encode
1017 1020
1018 1021 def handlefailure(pushop, exc):
1019 1022 targetid = int(exc.partid)
1020 1023 for partid, book, action in part2book:
1021 1024 if partid == targetid:
1022 1025 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1023 1026 # we should not be called for part we did not generated
1024 1027 assert False
1025 1028
1026 1029 for book, old, new in pushop.outbookmarks:
1027 1030 part = bundler.newpart('pushkey')
1028 1031 part.addparam('namespace', enc('bookmarks'))
1029 1032 part.addparam('key', enc(book))
1030 1033 part.addparam('old', enc(old))
1031 1034 part.addparam('new', enc(new))
1032 1035 action = 'update'
1033 1036 if not old:
1034 1037 action = 'export'
1035 1038 elif not new:
1036 1039 action = 'delete'
1037 1040 part2book.append((part.id, book, action))
1038 1041 pushop.pkfailcb[part.id] = handlefailure
1039 1042
1040 1043 def handlereply(op):
1041 1044 ui = pushop.ui
1042 1045 for partid, book, action in part2book:
1043 1046 partrep = op.records.getreplies(partid)
1044 1047 results = partrep['pushkey']
1045 1048 assert len(results) <= 1
1046 1049 if not results:
1047 1050 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
1048 1051 else:
1049 1052 ret = int(results[0]['return'])
1050 1053 if ret:
1051 1054 ui.status(bookmsgmap[action][0] % book)
1052 1055 else:
1053 1056 ui.warn(bookmsgmap[action][1] % book)
1054 1057 if pushop.bkresult is not None:
1055 1058 pushop.bkresult = 1
1056 1059 return handlereply
1057 1060
1058 1061 @b2partsgenerator('pushvars', idx=0)
1059 1062 def _getbundlesendvars(pushop, bundler):
1060 1063 '''send shellvars via bundle2'''
1061 1064 pushvars = pushop.pushvars
1062 1065 if pushvars:
1063 1066 shellvars = {}
1064 1067 for raw in pushvars:
1065 1068 if '=' not in raw:
1066 1069 msg = ("unable to parse variable '%s', should follow "
1067 1070 "'KEY=VALUE' or 'KEY=' format")
1068 1071 raise error.Abort(msg % raw)
1069 1072 k, v = raw.split('=', 1)
1070 1073 shellvars[k] = v
1071 1074
1072 1075 part = bundler.newpart('pushvars')
1073 1076
1074 1077 for key, value in shellvars.iteritems():
1075 1078 part.addparam(key, value, mandatory=False)
1076 1079
1077 1080 def _pushbundle2(pushop):
1078 1081 """push data to the remote using bundle2
1079 1082
1080 1083 The only currently supported type of data is changegroup but this will
1081 1084 evolve in the future."""
1082 1085 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1083 1086 pushback = (pushop.trmanager
1084 1087 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1085 1088
1086 1089 # create reply capability
1087 1090 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1088 1091 allowpushback=pushback,
1089 1092 role='client'))
1090 1093 bundler.newpart('replycaps', data=capsblob)
1091 1094 replyhandlers = []
1092 1095 for partgenname in b2partsgenorder:
1093 1096 partgen = b2partsgenmapping[partgenname]
1094 1097 ret = partgen(pushop, bundler)
1095 1098 if callable(ret):
1096 1099 replyhandlers.append(ret)
1097 1100 # do not push if nothing to push
1098 1101 if bundler.nbparts <= 1:
1099 1102 return
1100 1103 stream = util.chunkbuffer(bundler.getchunks())
1101 1104 try:
1102 1105 try:
1103 1106 with pushop.remote.commandexecutor() as e:
1104 1107 reply = e.callcommand('unbundle', {
1105 1108 'bundle': stream,
1106 1109 'heads': ['force'],
1107 1110 'url': pushop.remote.url(),
1108 1111 }).result()
1109 1112 except error.BundleValueError as exc:
1110 1113 raise error.Abort(_('missing support for %s') % exc)
1111 1114 try:
1112 1115 trgetter = None
1113 1116 if pushback:
1114 1117 trgetter = pushop.trmanager.transaction
1115 1118 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1116 1119 except error.BundleValueError as exc:
1117 1120 raise error.Abort(_('missing support for %s') % exc)
1118 1121 except bundle2.AbortFromPart as exc:
1119 1122 pushop.ui.status(_('remote: %s\n') % exc)
1120 1123 if exc.hint is not None:
1121 1124 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1122 1125 raise error.Abort(_('push failed on remote'))
1123 1126 except error.PushkeyFailed as exc:
1124 1127 partid = int(exc.partid)
1125 1128 if partid not in pushop.pkfailcb:
1126 1129 raise
1127 1130 pushop.pkfailcb[partid](pushop, exc)
1128 1131 for rephand in replyhandlers:
1129 1132 rephand(op)
1130 1133
1131 1134 def _pushchangeset(pushop):
1132 1135 """Make the actual push of changeset bundle to remote repo"""
1133 1136 if 'changesets' in pushop.stepsdone:
1134 1137 return
1135 1138 pushop.stepsdone.add('changesets')
1136 1139 if not _pushcheckoutgoing(pushop):
1137 1140 return
1138 1141
1139 1142 # Should have verified this in push().
1140 1143 assert pushop.remote.capable('unbundle')
1141 1144
1142 1145 pushop.repo.prepushoutgoinghooks(pushop)
1143 1146 outgoing = pushop.outgoing
1144 1147 # TODO: get bundlecaps from remote
1145 1148 bundlecaps = None
1146 1149 # create a changegroup from local
1147 1150 if pushop.revs is None and not (outgoing.excluded
1148 1151 or pushop.repo.changelog.filteredrevs):
1149 1152 # push everything,
1150 1153 # use the fast path, no race possible on push
1151 1154 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1152 1155 fastpath=True, bundlecaps=bundlecaps)
1153 1156 else:
1154 1157 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1155 1158 'push', bundlecaps=bundlecaps)
1156 1159
1157 1160 # apply changegroup to remote
1158 1161 # local repo finds heads on server, finds out what
1159 1162 # revs it must push. once revs transferred, if server
1160 1163 # finds it has different heads (someone else won
1161 1164 # commit/push race), server aborts.
1162 1165 if pushop.force:
1163 1166 remoteheads = ['force']
1164 1167 else:
1165 1168 remoteheads = pushop.remoteheads
1166 1169 # ssh: return remote's addchangegroup()
1167 1170 # http: return remote's addchangegroup() or 0 for error
1168 1171 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1169 1172 pushop.repo.url())
1170 1173
1171 1174 def _pushsyncphase(pushop):
1172 1175 """synchronise phase information locally and remotely"""
1173 1176 cheads = pushop.commonheads
1174 1177 # even when we don't push, exchanging phase data is useful
1175 1178 remotephases = listkeys(pushop.remote, 'phases')
1176 1179 if (pushop.ui.configbool('ui', '_usedassubrepo')
1177 1180 and remotephases # server supports phases
1178 1181 and pushop.cgresult is None # nothing was pushed
1179 1182 and remotephases.get('publishing', False)):
1180 1183 # When:
1181 1184 # - this is a subrepo push
1182 1185 # - and remote support phase
1183 1186 # - and no changeset was pushed
1184 1187 # - and remote is publishing
1185 1188 # We may be in issue 3871 case!
1186 1189 # We drop the possible phase synchronisation done by
1187 1190 # courtesy to publish changesets possibly locally draft
1188 1191 # on the remote.
1189 1192 remotephases = {'publishing': 'True'}
1190 1193 if not remotephases: # old server or public only reply from non-publishing
1191 1194 _localphasemove(pushop, cheads)
1192 1195 # don't push any phase data as there is nothing to push
1193 1196 else:
1194 1197 ana = phases.analyzeremotephases(pushop.repo, cheads,
1195 1198 remotephases)
1196 1199 pheads, droots = ana
1197 1200 ### Apply remote phase on local
1198 1201 if remotephases.get('publishing', False):
1199 1202 _localphasemove(pushop, cheads)
1200 1203 else: # publish = False
1201 1204 _localphasemove(pushop, pheads)
1202 1205 _localphasemove(pushop, cheads, phases.draft)
1203 1206 ### Apply local phase on remote
1204 1207
1205 1208 if pushop.cgresult:
1206 1209 if 'phases' in pushop.stepsdone:
1207 1210 # phases already pushed though bundle2
1208 1211 return
1209 1212 outdated = pushop.outdatedphases
1210 1213 else:
1211 1214 outdated = pushop.fallbackoutdatedphases
1212 1215
1213 1216 pushop.stepsdone.add('phases')
1214 1217
1215 1218 # filter heads already turned public by the push
1216 1219 outdated = [c for c in outdated if c.node() not in pheads]
1217 1220 # fallback to independent pushkey command
1218 1221 for newremotehead in outdated:
1219 1222 with pushop.remote.commandexecutor() as e:
1220 1223 r = e.callcommand('pushkey', {
1221 1224 'namespace': 'phases',
1222 1225 'key': newremotehead.hex(),
1223 1226 'old': '%d' % phases.draft,
1224 1227 'new': '%d' % phases.public
1225 1228 }).result()
1226 1229
1227 1230 if not r:
1228 1231 pushop.ui.warn(_('updating %s to public failed!\n')
1229 1232 % newremotehead)
1230 1233
1231 1234 def _localphasemove(pushop, nodes, phase=phases.public):
1232 1235 """move <nodes> to <phase> in the local source repo"""
1233 1236 if pushop.trmanager:
1234 1237 phases.advanceboundary(pushop.repo,
1235 1238 pushop.trmanager.transaction(),
1236 1239 phase,
1237 1240 nodes)
1238 1241 else:
1239 1242 # repo is not locked, do not change any phases!
1240 1243 # Informs the user that phases should have been moved when
1241 1244 # applicable.
1242 1245 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1243 1246 phasestr = phases.phasenames[phase]
1244 1247 if actualmoves:
1245 1248 pushop.ui.status(_('cannot lock source repo, skipping '
1246 1249 'local %s phase update\n') % phasestr)
1247 1250
1248 1251 def _pushobsolete(pushop):
1249 1252 """utility function to push obsolete markers to a remote"""
1250 1253 if 'obsmarkers' in pushop.stepsdone:
1251 1254 return
1252 1255 repo = pushop.repo
1253 1256 remote = pushop.remote
1254 1257 pushop.stepsdone.add('obsmarkers')
1255 1258 if pushop.outobsmarkers:
1256 1259 pushop.ui.debug('try to push obsolete markers to remote\n')
1257 1260 rslts = []
1258 1261 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1259 1262 for key in sorted(remotedata, reverse=True):
1260 1263 # reverse sort to ensure we end with dump0
1261 1264 data = remotedata[key]
1262 1265 rslts.append(remote.pushkey('obsolete', key, '', data))
1263 1266 if [r for r in rslts if not r]:
1264 1267 msg = _('failed to push some obsolete markers!\n')
1265 1268 repo.ui.warn(msg)
1266 1269
1267 1270 def _pushbookmark(pushop):
1268 1271 """Update bookmark position on remote"""
1269 1272 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1270 1273 return
1271 1274 pushop.stepsdone.add('bookmarks')
1272 1275 ui = pushop.ui
1273 1276 remote = pushop.remote
1274 1277
1275 1278 for b, old, new in pushop.outbookmarks:
1276 1279 action = 'update'
1277 1280 if not old:
1278 1281 action = 'export'
1279 1282 elif not new:
1280 1283 action = 'delete'
1281 1284
1282 1285 with remote.commandexecutor() as e:
1283 1286 r = e.callcommand('pushkey', {
1284 1287 'namespace': 'bookmarks',
1285 1288 'key': b,
1286 1289 'old': old,
1287 1290 'new': new,
1288 1291 }).result()
1289 1292
1290 1293 if r:
1291 1294 ui.status(bookmsgmap[action][0] % b)
1292 1295 else:
1293 1296 ui.warn(bookmsgmap[action][1] % b)
1294 1297 # discovery can have set the value form invalid entry
1295 1298 if pushop.bkresult is not None:
1296 1299 pushop.bkresult = 1
1297 1300
1298 1301 class pulloperation(object):
1299 1302 """A object that represent a single pull operation
1300 1303
1301 1304 It purpose is to carry pull related state and very common operation.
1302 1305
1303 1306 A new should be created at the beginning of each pull and discarded
1304 1307 afterward.
1305 1308 """
1306 1309
1307 1310 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1308 1311 remotebookmarks=None, streamclonerequested=None):
1309 1312 # repo we pull into
1310 1313 self.repo = repo
1311 1314 # repo we pull from
1312 1315 self.remote = remote
1313 1316 # revision we try to pull (None is "all")
1314 1317 self.heads = heads
1315 1318 # bookmark pulled explicitly
1316 1319 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1317 1320 for bookmark in bookmarks]
1318 1321 # do we force pull?
1319 1322 self.force = force
1320 1323 # whether a streaming clone was requested
1321 1324 self.streamclonerequested = streamclonerequested
1322 1325 # transaction manager
1323 1326 self.trmanager = None
1324 1327 # set of common changeset between local and remote before pull
1325 1328 self.common = None
1326 1329 # set of pulled head
1327 1330 self.rheads = None
1328 1331 # list of missing changeset to fetch remotely
1329 1332 self.fetch = None
1330 1333 # remote bookmarks data
1331 1334 self.remotebookmarks = remotebookmarks
1332 1335 # result of changegroup pulling (used as return code by pull)
1333 1336 self.cgresult = None
1334 1337 # list of step already done
1335 1338 self.stepsdone = set()
1336 1339 # Whether we attempted a clone from pre-generated bundles.
1337 1340 self.clonebundleattempted = False
1338 1341
1339 1342 @util.propertycache
1340 1343 def pulledsubset(self):
1341 1344 """heads of the set of changeset target by the pull"""
1342 1345 # compute target subset
1343 1346 if self.heads is None:
1344 1347 # We pulled every thing possible
1345 1348 # sync on everything common
1346 1349 c = set(self.common)
1347 1350 ret = list(self.common)
1348 1351 for n in self.rheads:
1349 1352 if n not in c:
1350 1353 ret.append(n)
1351 1354 return ret
1352 1355 else:
1353 1356 # We pulled a specific subset
1354 1357 # sync on this subset
1355 1358 return self.heads
1356 1359
1357 1360 @util.propertycache
1358 1361 def canusebundle2(self):
1359 1362 return not _forcebundle1(self)
1360 1363
1361 1364 @util.propertycache
1362 1365 def remotebundle2caps(self):
1363 1366 return bundle2.bundle2caps(self.remote)
1364 1367
1365 1368 def gettransaction(self):
1366 1369 # deprecated; talk to trmanager directly
1367 1370 return self.trmanager.transaction()
1368 1371
1369 1372 class transactionmanager(util.transactional):
1370 1373 """An object to manage the life cycle of a transaction
1371 1374
1372 1375 It creates the transaction on demand and calls the appropriate hooks when
1373 1376 closing the transaction."""
1374 1377 def __init__(self, repo, source, url):
1375 1378 self.repo = repo
1376 1379 self.source = source
1377 1380 self.url = url
1378 1381 self._tr = None
1379 1382
1380 1383 def transaction(self):
1381 1384 """Return an open transaction object, constructing if necessary"""
1382 1385 if not self._tr:
1383 1386 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1384 1387 self._tr = self.repo.transaction(trname)
1385 1388 self._tr.hookargs['source'] = self.source
1386 1389 self._tr.hookargs['url'] = self.url
1387 1390 return self._tr
1388 1391
1389 1392 def close(self):
1390 1393 """close transaction if created"""
1391 1394 if self._tr is not None:
1392 1395 self._tr.close()
1393 1396
1394 1397 def release(self):
1395 1398 """release transaction if created"""
1396 1399 if self._tr is not None:
1397 1400 self._tr.release()
1398 1401
1399 1402 def listkeys(remote, namespace):
1400 1403 with remote.commandexecutor() as e:
1401 1404 return e.callcommand('listkeys', {'namespace': namespace}).result()
1402 1405
1403 1406 def _fullpullbundle2(repo, pullop):
1404 1407 # The server may send a partial reply, i.e. when inlining
1405 1408 # pre-computed bundles. In that case, update the common
1406 1409 # set based on the results and pull another bundle.
1407 1410 #
1408 1411 # There are two indicators that the process is finished:
1409 1412 # - no changeset has been added, or
1410 1413 # - all remote heads are known locally.
1411 1414 # The head check must use the unfiltered view as obsoletion
1412 1415 # markers can hide heads.
1413 1416 unfi = repo.unfiltered()
1414 1417 unficl = unfi.changelog
1415 1418 def headsofdiff(h1, h2):
1416 1419 """Returns heads(h1 % h2)"""
1417 1420 res = unfi.set('heads(%ln %% %ln)', h1, h2)
1418 1421 return set(ctx.node() for ctx in res)
1419 1422 def headsofunion(h1, h2):
1420 1423 """Returns heads((h1 + h2) - null)"""
1421 1424 res = unfi.set('heads((%ln + %ln - null))', h1, h2)
1422 1425 return set(ctx.node() for ctx in res)
1423 1426 while True:
1424 1427 old_heads = unficl.heads()
1425 1428 clstart = len(unficl)
1426 1429 _pullbundle2(pullop)
1427 1430 if changegroup.NARROW_REQUIREMENT in repo.requirements:
1428 1431 # XXX narrow clones filter the heads on the server side during
1429 1432 # XXX getbundle and result in partial replies as well.
1430 1433 # XXX Disable pull bundles in this case as band aid to avoid
1431 1434 # XXX extra round trips.
1432 1435 break
1433 1436 if clstart == len(unficl):
1434 1437 break
1435 1438 if all(unficl.hasnode(n) for n in pullop.rheads):
1436 1439 break
1437 1440 new_heads = headsofdiff(unficl.heads(), old_heads)
1438 1441 pullop.common = headsofunion(new_heads, pullop.common)
1439 1442 pullop.rheads = set(pullop.rheads) - pullop.common
1440 1443
1441 1444 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1442 1445 streamclonerequested=None):
1443 1446 """Fetch repository data from a remote.
1444 1447
1445 1448 This is the main function used to retrieve data from a remote repository.
1446 1449
1447 1450 ``repo`` is the local repository to clone into.
1448 1451 ``remote`` is a peer instance.
1449 1452 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1450 1453 default) means to pull everything from the remote.
1451 1454 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1452 1455 default, all remote bookmarks are pulled.
1453 1456 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1454 1457 initialization.
1455 1458 ``streamclonerequested`` is a boolean indicating whether a "streaming
1456 1459 clone" is requested. A "streaming clone" is essentially a raw file copy
1457 1460 of revlogs from the server. This only works when the local repository is
1458 1461 empty. The default value of ``None`` means to respect the server
1459 1462 configuration for preferring stream clones.
1460 1463
1461 1464 Returns the ``pulloperation`` created for this pull.
1462 1465 """
1463 1466 if opargs is None:
1464 1467 opargs = {}
1465 1468 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1466 1469 streamclonerequested=streamclonerequested,
1467 1470 **pycompat.strkwargs(opargs))
1468 1471
1469 1472 peerlocal = pullop.remote.local()
1470 1473 if peerlocal:
1471 1474 missing = set(peerlocal.requirements) - pullop.repo.supported
1472 1475 if missing:
1473 1476 msg = _("required features are not"
1474 1477 " supported in the destination:"
1475 1478 " %s") % (', '.join(sorted(missing)))
1476 1479 raise error.Abort(msg)
1477 1480
1478 1481 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1479 1482 with repo.wlock(), repo.lock(), pullop.trmanager:
1480 1483 # This should ideally be in _pullbundle2(). However, it needs to run
1481 1484 # before discovery to avoid extra work.
1482 1485 _maybeapplyclonebundle(pullop)
1483 1486 streamclone.maybeperformlegacystreamclone(pullop)
1484 1487 _pulldiscovery(pullop)
1485 1488 if pullop.canusebundle2:
1486 1489 _fullpullbundle2(repo, pullop)
1487 1490 _pullchangeset(pullop)
1488 1491 _pullphase(pullop)
1489 1492 _pullbookmarks(pullop)
1490 1493 _pullobsolete(pullop)
1491 1494
1492 1495 # storing remotenames
1493 1496 if repo.ui.configbool('experimental', 'remotenames'):
1494 1497 logexchange.pullremotenames(repo, remote)
1495 1498
1496 1499 return pullop
1497 1500
1498 1501 # list of steps to perform discovery before pull
1499 1502 pulldiscoveryorder = []
1500 1503
1501 1504 # Mapping between step name and function
1502 1505 #
1503 1506 # This exists to help extensions wrap steps if necessary
1504 1507 pulldiscoverymapping = {}
1505 1508
1506 1509 def pulldiscovery(stepname):
1507 1510 """decorator for function performing discovery before pull
1508 1511
1509 1512 The function is added to the step -> function mapping and appended to the
1510 1513 list of steps. Beware that decorated function will be added in order (this
1511 1514 may matter).
1512 1515
1513 1516 You can only use this decorator for a new step, if you want to wrap a step
1514 1517 from an extension, change the pulldiscovery dictionary directly."""
1515 1518 def dec(func):
1516 1519 assert stepname not in pulldiscoverymapping
1517 1520 pulldiscoverymapping[stepname] = func
1518 1521 pulldiscoveryorder.append(stepname)
1519 1522 return func
1520 1523 return dec
1521 1524
1522 1525 def _pulldiscovery(pullop):
1523 1526 """Run all discovery steps"""
1524 1527 for stepname in pulldiscoveryorder:
1525 1528 step = pulldiscoverymapping[stepname]
1526 1529 step(pullop)
1527 1530
1528 1531 @pulldiscovery('b1:bookmarks')
1529 1532 def _pullbookmarkbundle1(pullop):
1530 1533 """fetch bookmark data in bundle1 case
1531 1534
1532 1535 If not using bundle2, we have to fetch bookmarks before changeset
1533 1536 discovery to reduce the chance and impact of race conditions."""
1534 1537 if pullop.remotebookmarks is not None:
1535 1538 return
1536 1539 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1537 1540 # all known bundle2 servers now support listkeys, but lets be nice with
1538 1541 # new implementation.
1539 1542 return
1540 1543 books = listkeys(pullop.remote, 'bookmarks')
1541 1544 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1542 1545
1543 1546
1544 1547 @pulldiscovery('changegroup')
1545 1548 def _pulldiscoverychangegroup(pullop):
1546 1549 """discovery phase for the pull
1547 1550
1548 1551 Current handle changeset discovery only, will change handle all discovery
1549 1552 at some point."""
1550 1553 tmp = discovery.findcommonincoming(pullop.repo,
1551 1554 pullop.remote,
1552 1555 heads=pullop.heads,
1553 1556 force=pullop.force)
1554 1557 common, fetch, rheads = tmp
1555 1558 nm = pullop.repo.unfiltered().changelog.nodemap
1556 1559 if fetch and rheads:
1557 1560 # If a remote heads is filtered locally, put in back in common.
1558 1561 #
1559 1562 # This is a hackish solution to catch most of "common but locally
1560 1563 # hidden situation". We do not performs discovery on unfiltered
1561 1564 # repository because it end up doing a pathological amount of round
1562 1565 # trip for w huge amount of changeset we do not care about.
1563 1566 #
1564 1567 # If a set of such "common but filtered" changeset exist on the server
1565 1568 # but are not including a remote heads, we'll not be able to detect it,
1566 1569 scommon = set(common)
1567 1570 for n in rheads:
1568 1571 if n in nm:
1569 1572 if n not in scommon:
1570 1573 common.append(n)
1571 1574 if set(rheads).issubset(set(common)):
1572 1575 fetch = []
1573 1576 pullop.common = common
1574 1577 pullop.fetch = fetch
1575 1578 pullop.rheads = rheads
1576 1579
1577 1580 def _pullbundle2(pullop):
1578 1581 """pull data using bundle2
1579 1582
1580 1583 For now, the only supported data are changegroup."""
1581 1584 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1582 1585
1583 1586 # make ui easier to access
1584 1587 ui = pullop.repo.ui
1585 1588
1586 1589 # At the moment we don't do stream clones over bundle2. If that is
1587 1590 # implemented then here's where the check for that will go.
1588 1591 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1589 1592
1590 1593 # declare pull perimeters
1591 1594 kwargs['common'] = pullop.common
1592 1595 kwargs['heads'] = pullop.heads or pullop.rheads
1593 1596
1594 1597 if streaming:
1595 1598 kwargs['cg'] = False
1596 1599 kwargs['stream'] = True
1597 1600 pullop.stepsdone.add('changegroup')
1598 1601 pullop.stepsdone.add('phases')
1599 1602
1600 1603 else:
1601 1604 # pulling changegroup
1602 1605 pullop.stepsdone.add('changegroup')
1603 1606
1604 1607 kwargs['cg'] = pullop.fetch
1605 1608
1606 1609 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1607 1610 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1608 1611 if (not legacyphase and hasbinaryphase):
1609 1612 kwargs['phases'] = True
1610 1613 pullop.stepsdone.add('phases')
1611 1614
1612 1615 if 'listkeys' in pullop.remotebundle2caps:
1613 1616 if 'phases' not in pullop.stepsdone:
1614 1617 kwargs['listkeys'] = ['phases']
1615 1618
1616 1619 bookmarksrequested = False
1617 1620 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1618 1621 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1619 1622
1620 1623 if pullop.remotebookmarks is not None:
1621 1624 pullop.stepsdone.add('request-bookmarks')
1622 1625
1623 1626 if ('request-bookmarks' not in pullop.stepsdone
1624 1627 and pullop.remotebookmarks is None
1625 1628 and not legacybookmark and hasbinarybook):
1626 1629 kwargs['bookmarks'] = True
1627 1630 bookmarksrequested = True
1628 1631
1629 1632 if 'listkeys' in pullop.remotebundle2caps:
1630 1633 if 'request-bookmarks' not in pullop.stepsdone:
1631 1634 # make sure to always includes bookmark data when migrating
1632 1635 # `hg incoming --bundle` to using this function.
1633 1636 pullop.stepsdone.add('request-bookmarks')
1634 1637 kwargs.setdefault('listkeys', []).append('bookmarks')
1635 1638
1636 1639 # If this is a full pull / clone and the server supports the clone bundles
1637 1640 # feature, tell the server whether we attempted a clone bundle. The
1638 1641 # presence of this flag indicates the client supports clone bundles. This
1639 1642 # will enable the server to treat clients that support clone bundles
1640 1643 # differently from those that don't.
1641 1644 if (pullop.remote.capable('clonebundles')
1642 1645 and pullop.heads is None and list(pullop.common) == [nullid]):
1643 1646 kwargs['cbattempted'] = pullop.clonebundleattempted
1644 1647
1645 1648 if streaming:
1646 1649 pullop.repo.ui.status(_('streaming all changes\n'))
1647 1650 elif not pullop.fetch:
1648 1651 pullop.repo.ui.status(_("no changes found\n"))
1649 1652 pullop.cgresult = 0
1650 1653 else:
1651 1654 if pullop.heads is None and list(pullop.common) == [nullid]:
1652 1655 pullop.repo.ui.status(_("requesting all changes\n"))
1653 1656 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1654 1657 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1655 1658 if obsolete.commonversion(remoteversions) is not None:
1656 1659 kwargs['obsmarkers'] = True
1657 1660 pullop.stepsdone.add('obsmarkers')
1658 1661 _pullbundle2extraprepare(pullop, kwargs)
1659 1662
1660 1663 with pullop.remote.commandexecutor() as e:
1661 1664 args = dict(kwargs)
1662 1665 args['source'] = 'pull'
1663 1666 bundle = e.callcommand('getbundle', args).result()
1664 1667
1665 1668 try:
1666 1669 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
1667 1670 source='pull')
1668 1671 op.modes['bookmarks'] = 'records'
1669 1672 bundle2.processbundle(pullop.repo, bundle, op=op)
1670 1673 except bundle2.AbortFromPart as exc:
1671 1674 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1672 1675 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1673 1676 except error.BundleValueError as exc:
1674 1677 raise error.Abort(_('missing support for %s') % exc)
1675 1678
1676 1679 if pullop.fetch:
1677 1680 pullop.cgresult = bundle2.combinechangegroupresults(op)
1678 1681
1679 1682 # processing phases change
1680 1683 for namespace, value in op.records['listkeys']:
1681 1684 if namespace == 'phases':
1682 1685 _pullapplyphases(pullop, value)
1683 1686
1684 1687 # processing bookmark update
1685 1688 if bookmarksrequested:
1686 1689 books = {}
1687 1690 for record in op.records['bookmarks']:
1688 1691 books[record['bookmark']] = record["node"]
1689 1692 pullop.remotebookmarks = books
1690 1693 else:
1691 1694 for namespace, value in op.records['listkeys']:
1692 1695 if namespace == 'bookmarks':
1693 1696 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1694 1697
1695 1698 # bookmark data were either already there or pulled in the bundle
1696 1699 if pullop.remotebookmarks is not None:
1697 1700 _pullbookmarks(pullop)
1698 1701
1699 1702 def _pullbundle2extraprepare(pullop, kwargs):
1700 1703 """hook function so that extensions can extend the getbundle call"""
1701 1704
1702 1705 def _pullchangeset(pullop):
1703 1706 """pull changeset from unbundle into the local repo"""
1704 1707 # We delay the open of the transaction as late as possible so we
1705 1708 # don't open transaction for nothing or you break future useful
1706 1709 # rollback call
1707 1710 if 'changegroup' in pullop.stepsdone:
1708 1711 return
1709 1712 pullop.stepsdone.add('changegroup')
1710 1713 if not pullop.fetch:
1711 1714 pullop.repo.ui.status(_("no changes found\n"))
1712 1715 pullop.cgresult = 0
1713 1716 return
1714 1717 tr = pullop.gettransaction()
1715 1718 if pullop.heads is None and list(pullop.common) == [nullid]:
1716 1719 pullop.repo.ui.status(_("requesting all changes\n"))
1717 1720 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1718 1721 # issue1320, avoid a race if remote changed after discovery
1719 1722 pullop.heads = pullop.rheads
1720 1723
1721 1724 if pullop.remote.capable('getbundle'):
1722 1725 # TODO: get bundlecaps from remote
1723 1726 cg = pullop.remote.getbundle('pull', common=pullop.common,
1724 1727 heads=pullop.heads or pullop.rheads)
1725 1728 elif pullop.heads is None:
1726 1729 with pullop.remote.commandexecutor() as e:
1727 1730 cg = e.callcommand('changegroup', {
1728 1731 'nodes': pullop.fetch,
1729 1732 'source': 'pull',
1730 1733 }).result()
1731 1734
1732 1735 elif not pullop.remote.capable('changegroupsubset'):
1733 1736 raise error.Abort(_("partial pull cannot be done because "
1734 1737 "other repository doesn't support "
1735 1738 "changegroupsubset."))
1736 1739 else:
1737 1740 with pullop.remote.commandexecutor() as e:
1738 1741 cg = e.callcommand('changegroupsubset', {
1739 1742 'bases': pullop.fetch,
1740 1743 'heads': pullop.heads,
1741 1744 'source': 'pull',
1742 1745 }).result()
1743 1746
1744 1747 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1745 1748 pullop.remote.url())
1746 1749 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1747 1750
1748 1751 def _pullphase(pullop):
1749 1752 # Get remote phases data from remote
1750 1753 if 'phases' in pullop.stepsdone:
1751 1754 return
1752 1755 remotephases = listkeys(pullop.remote, 'phases')
1753 1756 _pullapplyphases(pullop, remotephases)
1754 1757
1755 1758 def _pullapplyphases(pullop, remotephases):
1756 1759 """apply phase movement from observed remote state"""
1757 1760 if 'phases' in pullop.stepsdone:
1758 1761 return
1759 1762 pullop.stepsdone.add('phases')
1760 1763 publishing = bool(remotephases.get('publishing', False))
1761 1764 if remotephases and not publishing:
1762 1765 # remote is new and non-publishing
1763 1766 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1764 1767 pullop.pulledsubset,
1765 1768 remotephases)
1766 1769 dheads = pullop.pulledsubset
1767 1770 else:
1768 1771 # Remote is old or publishing all common changesets
1769 1772 # should be seen as public
1770 1773 pheads = pullop.pulledsubset
1771 1774 dheads = []
1772 1775 unfi = pullop.repo.unfiltered()
1773 1776 phase = unfi._phasecache.phase
1774 1777 rev = unfi.changelog.nodemap.get
1775 1778 public = phases.public
1776 1779 draft = phases.draft
1777 1780
1778 1781 # exclude changesets already public locally and update the others
1779 1782 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1780 1783 if pheads:
1781 1784 tr = pullop.gettransaction()
1782 1785 phases.advanceboundary(pullop.repo, tr, public, pheads)
1783 1786
1784 1787 # exclude changesets already draft locally and update the others
1785 1788 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1786 1789 if dheads:
1787 1790 tr = pullop.gettransaction()
1788 1791 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1789 1792
1790 1793 def _pullbookmarks(pullop):
1791 1794 """process the remote bookmark information to update the local one"""
1792 1795 if 'bookmarks' in pullop.stepsdone:
1793 1796 return
1794 1797 pullop.stepsdone.add('bookmarks')
1795 1798 repo = pullop.repo
1796 1799 remotebookmarks = pullop.remotebookmarks
1797 1800 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1798 1801 pullop.remote.url(),
1799 1802 pullop.gettransaction,
1800 1803 explicit=pullop.explicitbookmarks)
1801 1804
1802 1805 def _pullobsolete(pullop):
1803 1806 """utility function to pull obsolete markers from a remote
1804 1807
1805 1808 The `gettransaction` is function that return the pull transaction, creating
1806 1809 one if necessary. We return the transaction to inform the calling code that
1807 1810 a new transaction have been created (when applicable).
1808 1811
1809 1812 Exists mostly to allow overriding for experimentation purpose"""
1810 1813 if 'obsmarkers' in pullop.stepsdone:
1811 1814 return
1812 1815 pullop.stepsdone.add('obsmarkers')
1813 1816 tr = None
1814 1817 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1815 1818 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1816 1819 remoteobs = listkeys(pullop.remote, 'obsolete')
1817 1820 if 'dump0' in remoteobs:
1818 1821 tr = pullop.gettransaction()
1819 1822 markers = []
1820 1823 for key in sorted(remoteobs, reverse=True):
1821 1824 if key.startswith('dump'):
1822 1825 data = util.b85decode(remoteobs[key])
1823 1826 version, newmarks = obsolete._readmarkers(data)
1824 1827 markers += newmarks
1825 1828 if markers:
1826 1829 pullop.repo.obsstore.add(tr, markers)
1827 1830 pullop.repo.invalidatevolatilesets()
1828 1831 return tr
1829 1832
1830 1833 def caps20to10(repo, role):
1831 1834 """return a set with appropriate options to use bundle20 during getbundle"""
1832 1835 caps = {'HG20'}
1833 1836 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
1834 1837 caps.add('bundle2=' + urlreq.quote(capsblob))
1835 1838 return caps
1836 1839
1837 1840 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1838 1841 getbundle2partsorder = []
1839 1842
1840 1843 # Mapping between step name and function
1841 1844 #
1842 1845 # This exists to help extensions wrap steps if necessary
1843 1846 getbundle2partsmapping = {}
1844 1847
1845 1848 def getbundle2partsgenerator(stepname, idx=None):
1846 1849 """decorator for function generating bundle2 part for getbundle
1847 1850
1848 1851 The function is added to the step -> function mapping and appended to the
1849 1852 list of steps. Beware that decorated functions will be added in order
1850 1853 (this may matter).
1851 1854
1852 1855 You can only use this decorator for new steps, if you want to wrap a step
1853 1856 from an extension, attack the getbundle2partsmapping dictionary directly."""
1854 1857 def dec(func):
1855 1858 assert stepname not in getbundle2partsmapping
1856 1859 getbundle2partsmapping[stepname] = func
1857 1860 if idx is None:
1858 1861 getbundle2partsorder.append(stepname)
1859 1862 else:
1860 1863 getbundle2partsorder.insert(idx, stepname)
1861 1864 return func
1862 1865 return dec
1863 1866
1864 1867 def bundle2requested(bundlecaps):
1865 1868 if bundlecaps is not None:
1866 1869 return any(cap.startswith('HG2') for cap in bundlecaps)
1867 1870 return False
1868 1871
1869 1872 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1870 1873 **kwargs):
1871 1874 """Return chunks constituting a bundle's raw data.
1872 1875
1873 1876 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1874 1877 passed.
1875 1878
1876 1879 Returns a 2-tuple of a dict with metadata about the generated bundle
1877 1880 and an iterator over raw chunks (of varying sizes).
1878 1881 """
1879 1882 kwargs = pycompat.byteskwargs(kwargs)
1880 1883 info = {}
1881 1884 usebundle2 = bundle2requested(bundlecaps)
1882 1885 # bundle10 case
1883 1886 if not usebundle2:
1884 1887 if bundlecaps and not kwargs.get('cg', True):
1885 1888 raise ValueError(_('request for bundle10 must include changegroup'))
1886 1889
1887 1890 if kwargs:
1888 1891 raise ValueError(_('unsupported getbundle arguments: %s')
1889 1892 % ', '.join(sorted(kwargs.keys())))
1890 1893 outgoing = _computeoutgoing(repo, heads, common)
1891 1894 info['bundleversion'] = 1
1892 1895 return info, changegroup.makestream(repo, outgoing, '01', source,
1893 1896 bundlecaps=bundlecaps)
1894 1897
1895 1898 # bundle20 case
1896 1899 info['bundleversion'] = 2
1897 1900 b2caps = {}
1898 1901 for bcaps in bundlecaps:
1899 1902 if bcaps.startswith('bundle2='):
1900 1903 blob = urlreq.unquote(bcaps[len('bundle2='):])
1901 1904 b2caps.update(bundle2.decodecaps(blob))
1902 1905 bundler = bundle2.bundle20(repo.ui, b2caps)
1903 1906
1904 1907 kwargs['heads'] = heads
1905 1908 kwargs['common'] = common
1906 1909
1907 1910 for name in getbundle2partsorder:
1908 1911 func = getbundle2partsmapping[name]
1909 1912 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1910 1913 **pycompat.strkwargs(kwargs))
1911 1914
1912 1915 info['prefercompressed'] = bundler.prefercompressed
1913 1916
1914 1917 return info, bundler.getchunks()
1915 1918
1916 1919 @getbundle2partsgenerator('stream2')
1917 1920 def _getbundlestream2(bundler, repo, *args, **kwargs):
1918 1921 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
1919 1922
1920 1923 @getbundle2partsgenerator('changegroup')
1921 1924 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1922 1925 b2caps=None, heads=None, common=None, **kwargs):
1923 1926 """add a changegroup part to the requested bundle"""
1924 1927 cgstream = None
1925 1928 if kwargs.get(r'cg', True):
1926 1929 # build changegroup bundle here.
1927 1930 version = '01'
1928 1931 cgversions = b2caps.get('changegroup')
1929 1932 if cgversions: # 3.1 and 3.2 ship with an empty value
1930 1933 cgversions = [v for v in cgversions
1931 1934 if v in changegroup.supportedoutgoingversions(repo)]
1932 1935 if not cgversions:
1933 1936 raise ValueError(_('no common changegroup version'))
1934 1937 version = max(cgversions)
1935 1938 outgoing = _computeoutgoing(repo, heads, common)
1936 1939 if outgoing.missing:
1937 1940 cgstream = changegroup.makestream(repo, outgoing, version, source,
1938 1941 bundlecaps=bundlecaps)
1939 1942
1940 1943 if cgstream:
1941 1944 part = bundler.newpart('changegroup', data=cgstream)
1942 1945 if cgversions:
1943 1946 part.addparam('version', version)
1944 1947 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1945 1948 mandatory=False)
1946 1949 if 'treemanifest' in repo.requirements:
1947 1950 part.addparam('treemanifest', '1')
1948 1951
1949 1952 @getbundle2partsgenerator('bookmarks')
1950 1953 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
1951 1954 b2caps=None, **kwargs):
1952 1955 """add a bookmark part to the requested bundle"""
1953 1956 if not kwargs.get(r'bookmarks', False):
1954 1957 return
1955 1958 if 'bookmarks' not in b2caps:
1956 1959 raise ValueError(_('no common bookmarks exchange method'))
1957 1960 books = bookmod.listbinbookmarks(repo)
1958 1961 data = bookmod.binaryencode(books)
1959 1962 if data:
1960 1963 bundler.newpart('bookmarks', data=data)
1961 1964
1962 1965 @getbundle2partsgenerator('listkeys')
1963 1966 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1964 1967 b2caps=None, **kwargs):
1965 1968 """add parts containing listkeys namespaces to the requested bundle"""
1966 1969 listkeys = kwargs.get(r'listkeys', ())
1967 1970 for namespace in listkeys:
1968 1971 part = bundler.newpart('listkeys')
1969 1972 part.addparam('namespace', namespace)
1970 1973 keys = repo.listkeys(namespace).items()
1971 1974 part.data = pushkey.encodekeys(keys)
1972 1975
1973 1976 @getbundle2partsgenerator('obsmarkers')
1974 1977 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1975 1978 b2caps=None, heads=None, **kwargs):
1976 1979 """add an obsolescence markers part to the requested bundle"""
1977 1980 if kwargs.get(r'obsmarkers', False):
1978 1981 if heads is None:
1979 1982 heads = repo.heads()
1980 1983 subset = [c.node() for c in repo.set('::%ln', heads)]
1981 1984 markers = repo.obsstore.relevantmarkers(subset)
1982 1985 markers = sorted(markers)
1983 1986 bundle2.buildobsmarkerspart(bundler, markers)
1984 1987
1985 1988 @getbundle2partsgenerator('phases')
1986 1989 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1987 1990 b2caps=None, heads=None, **kwargs):
1988 1991 """add phase heads part to the requested bundle"""
1989 1992 if kwargs.get(r'phases', False):
1990 1993 if not 'heads' in b2caps.get('phases'):
1991 1994 raise ValueError(_('no common phases exchange method'))
1992 1995 if heads is None:
1993 1996 heads = repo.heads()
1994 1997
1995 1998 headsbyphase = collections.defaultdict(set)
1996 1999 if repo.publishing():
1997 2000 headsbyphase[phases.public] = heads
1998 2001 else:
1999 2002 # find the appropriate heads to move
2000 2003
2001 2004 phase = repo._phasecache.phase
2002 2005 node = repo.changelog.node
2003 2006 rev = repo.changelog.rev
2004 2007 for h in heads:
2005 2008 headsbyphase[phase(repo, rev(h))].add(h)
2006 2009 seenphases = list(headsbyphase.keys())
2007 2010
2008 2011 # We do not handle anything but public and draft phase for now)
2009 2012 if seenphases:
2010 2013 assert max(seenphases) <= phases.draft
2011 2014
2012 2015 # if client is pulling non-public changesets, we need to find
2013 2016 # intermediate public heads.
2014 2017 draftheads = headsbyphase.get(phases.draft, set())
2015 2018 if draftheads:
2016 2019 publicheads = headsbyphase.get(phases.public, set())
2017 2020
2018 2021 revset = 'heads(only(%ln, %ln) and public())'
2019 2022 extraheads = repo.revs(revset, draftheads, publicheads)
2020 2023 for r in extraheads:
2021 2024 headsbyphase[phases.public].add(node(r))
2022 2025
2023 2026 # transform data in a format used by the encoding function
2024 2027 phasemapping = []
2025 2028 for phase in phases.allphases:
2026 2029 phasemapping.append(sorted(headsbyphase[phase]))
2027 2030
2028 2031 # generate the actual part
2029 2032 phasedata = phases.binaryencode(phasemapping)
2030 2033 bundler.newpart('phase-heads', data=phasedata)
2031 2034
2032 2035 @getbundle2partsgenerator('hgtagsfnodes')
2033 2036 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
2034 2037 b2caps=None, heads=None, common=None,
2035 2038 **kwargs):
2036 2039 """Transfer the .hgtags filenodes mapping.
2037 2040
2038 2041 Only values for heads in this bundle will be transferred.
2039 2042
2040 2043 The part data consists of pairs of 20 byte changeset node and .hgtags
2041 2044 filenodes raw values.
2042 2045 """
2043 2046 # Don't send unless:
2044 2047 # - changeset are being exchanged,
2045 2048 # - the client supports it.
2046 2049 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
2047 2050 return
2048 2051
2049 2052 outgoing = _computeoutgoing(repo, heads, common)
2050 2053 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2051 2054
2052 2055 @getbundle2partsgenerator('cache:rev-branch-cache')
2053 2056 def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
2054 2057 b2caps=None, heads=None, common=None,
2055 2058 **kwargs):
2056 2059 """Transfer the rev-branch-cache mapping
2057 2060
2058 2061 The payload is a series of data related to each branch
2059 2062
2060 2063 1) branch name length
2061 2064 2) number of open heads
2062 2065 3) number of closed heads
2063 2066 4) open heads nodes
2064 2067 5) closed heads nodes
2065 2068 """
2066 2069 # Don't send unless:
2067 2070 # - changeset are being exchanged,
2068 2071 # - the client supports it.
2069 2072 if not (kwargs.get(r'cg', True)) or 'rev-branch-cache' not in b2caps:
2070 2073 return
2071 2074 outgoing = _computeoutgoing(repo, heads, common)
2072 2075 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2073 2076
2074 2077 def check_heads(repo, their_heads, context):
2075 2078 """check if the heads of a repo have been modified
2076 2079
2077 2080 Used by peer for unbundling.
2078 2081 """
2079 2082 heads = repo.heads()
2080 2083 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
2081 2084 if not (their_heads == ['force'] or their_heads == heads or
2082 2085 their_heads == ['hashed', heads_hash]):
2083 2086 # someone else committed/pushed/unbundled while we
2084 2087 # were transferring data
2085 2088 raise error.PushRaced('repository changed while %s - '
2086 2089 'please try again' % context)
2087 2090
2088 2091 def unbundle(repo, cg, heads, source, url):
2089 2092 """Apply a bundle to a repo.
2090 2093
2091 2094 this function makes sure the repo is locked during the application and have
2092 2095 mechanism to check that no push race occurred between the creation of the
2093 2096 bundle and its application.
2094 2097
2095 2098 If the push was raced as PushRaced exception is raised."""
2096 2099 r = 0
2097 2100 # need a transaction when processing a bundle2 stream
2098 2101 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2099 2102 lockandtr = [None, None, None]
2100 2103 recordout = None
2101 2104 # quick fix for output mismatch with bundle2 in 3.4
2102 2105 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
2103 2106 if url.startswith('remote:http:') or url.startswith('remote:https:'):
2104 2107 captureoutput = True
2105 2108 try:
2106 2109 # note: outside bundle1, 'heads' is expected to be empty and this
2107 2110 # 'check_heads' call wil be a no-op
2108 2111 check_heads(repo, heads, 'uploading changes')
2109 2112 # push can proceed
2110 2113 if not isinstance(cg, bundle2.unbundle20):
2111 2114 # legacy case: bundle1 (changegroup 01)
2112 2115 txnname = "\n".join([source, util.hidepassword(url)])
2113 2116 with repo.lock(), repo.transaction(txnname) as tr:
2114 2117 op = bundle2.applybundle(repo, cg, tr, source, url)
2115 2118 r = bundle2.combinechangegroupresults(op)
2116 2119 else:
2117 2120 r = None
2118 2121 try:
2119 2122 def gettransaction():
2120 2123 if not lockandtr[2]:
2121 2124 lockandtr[0] = repo.wlock()
2122 2125 lockandtr[1] = repo.lock()
2123 2126 lockandtr[2] = repo.transaction(source)
2124 2127 lockandtr[2].hookargs['source'] = source
2125 2128 lockandtr[2].hookargs['url'] = url
2126 2129 lockandtr[2].hookargs['bundle2'] = '1'
2127 2130 return lockandtr[2]
2128 2131
2129 2132 # Do greedy locking by default until we're satisfied with lazy
2130 2133 # locking.
2131 2134 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
2132 2135 gettransaction()
2133 2136
2134 2137 op = bundle2.bundleoperation(repo, gettransaction,
2135 2138 captureoutput=captureoutput,
2136 2139 source='push')
2137 2140 try:
2138 2141 op = bundle2.processbundle(repo, cg, op=op)
2139 2142 finally:
2140 2143 r = op.reply
2141 2144 if captureoutput and r is not None:
2142 2145 repo.ui.pushbuffer(error=True, subproc=True)
2143 2146 def recordout(output):
2144 2147 r.newpart('output', data=output, mandatory=False)
2145 2148 if lockandtr[2] is not None:
2146 2149 lockandtr[2].close()
2147 2150 except BaseException as exc:
2148 2151 exc.duringunbundle2 = True
2149 2152 if captureoutput and r is not None:
2150 2153 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2151 2154 def recordout(output):
2152 2155 part = bundle2.bundlepart('output', data=output,
2153 2156 mandatory=False)
2154 2157 parts.append(part)
2155 2158 raise
2156 2159 finally:
2157 2160 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2158 2161 if recordout is not None:
2159 2162 recordout(repo.ui.popbuffer())
2160 2163 return r
2161 2164
2162 2165 def _maybeapplyclonebundle(pullop):
2163 2166 """Apply a clone bundle from a remote, if possible."""
2164 2167
2165 2168 repo = pullop.repo
2166 2169 remote = pullop.remote
2167 2170
2168 2171 if not repo.ui.configbool('ui', 'clonebundles'):
2169 2172 return
2170 2173
2171 2174 # Only run if local repo is empty.
2172 2175 if len(repo):
2173 2176 return
2174 2177
2175 2178 if pullop.heads:
2176 2179 return
2177 2180
2178 2181 if not remote.capable('clonebundles'):
2179 2182 return
2180 2183
2181 2184 with remote.commandexecutor() as e:
2182 2185 res = e.callcommand('clonebundles', {}).result()
2183 2186
2184 2187 # If we call the wire protocol command, that's good enough to record the
2185 2188 # attempt.
2186 2189 pullop.clonebundleattempted = True
2187 2190
2188 2191 entries = parseclonebundlesmanifest(repo, res)
2189 2192 if not entries:
2190 2193 repo.ui.note(_('no clone bundles available on remote; '
2191 2194 'falling back to regular clone\n'))
2192 2195 return
2193 2196
2194 2197 entries = filterclonebundleentries(
2195 2198 repo, entries, streamclonerequested=pullop.streamclonerequested)
2196 2199
2197 2200 if not entries:
2198 2201 # There is a thundering herd concern here. However, if a server
2199 2202 # operator doesn't advertise bundles appropriate for its clients,
2200 2203 # they deserve what's coming. Furthermore, from a client's
2201 2204 # perspective, no automatic fallback would mean not being able to
2202 2205 # clone!
2203 2206 repo.ui.warn(_('no compatible clone bundles available on server; '
2204 2207 'falling back to regular clone\n'))
2205 2208 repo.ui.warn(_('(you may want to report this to the server '
2206 2209 'operator)\n'))
2207 2210 return
2208 2211
2209 2212 entries = sortclonebundleentries(repo.ui, entries)
2210 2213
2211 2214 url = entries[0]['URL']
2212 2215 repo.ui.status(_('applying clone bundle from %s\n') % url)
2213 2216 if trypullbundlefromurl(repo.ui, repo, url):
2214 2217 repo.ui.status(_('finished applying clone bundle\n'))
2215 2218 # Bundle failed.
2216 2219 #
2217 2220 # We abort by default to avoid the thundering herd of
2218 2221 # clients flooding a server that was expecting expensive
2219 2222 # clone load to be offloaded.
2220 2223 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2221 2224 repo.ui.warn(_('falling back to normal clone\n'))
2222 2225 else:
2223 2226 raise error.Abort(_('error applying bundle'),
2224 2227 hint=_('if this error persists, consider contacting '
2225 2228 'the server operator or disable clone '
2226 2229 'bundles via '
2227 2230 '"--config ui.clonebundles=false"'))
2228 2231
2229 2232 def parseclonebundlesmanifest(repo, s):
2230 2233 """Parses the raw text of a clone bundles manifest.
2231 2234
2232 2235 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2233 2236 to the URL and other keys are the attributes for the entry.
2234 2237 """
2235 2238 m = []
2236 2239 for line in s.splitlines():
2237 2240 fields = line.split()
2238 2241 if not fields:
2239 2242 continue
2240 2243 attrs = {'URL': fields[0]}
2241 2244 for rawattr in fields[1:]:
2242 2245 key, value = rawattr.split('=', 1)
2243 2246 key = urlreq.unquote(key)
2244 2247 value = urlreq.unquote(value)
2245 2248 attrs[key] = value
2246 2249
2247 2250 # Parse BUNDLESPEC into components. This makes client-side
2248 2251 # preferences easier to specify since you can prefer a single
2249 2252 # component of the BUNDLESPEC.
2250 2253 if key == 'BUNDLESPEC':
2251 2254 try:
2252 2255 bundlespec = parsebundlespec(repo, value)
2253 2256 attrs['COMPRESSION'] = bundlespec.compression
2254 2257 attrs['VERSION'] = bundlespec.version
2255 2258 except error.InvalidBundleSpecification:
2256 2259 pass
2257 2260 except error.UnsupportedBundleSpecification:
2258 2261 pass
2259 2262
2260 2263 m.append(attrs)
2261 2264
2262 2265 return m
2263 2266
2264 2267 def isstreamclonespec(bundlespec):
2265 2268 # Stream clone v1
2266 2269 if (bundlespec.wirecompression == 'UN' and bundlespec.wireversion == 's1'):
2267 2270 return True
2268 2271
2269 2272 # Stream clone v2
2270 2273 if (bundlespec.wirecompression == 'UN' and \
2271 2274 bundlespec.wireversion == '02' and \
2272 2275 bundlespec.contentopts.get('streamv2')):
2273 2276 return True
2274 2277
2275 2278 return False
2276 2279
2277 2280 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2278 2281 """Remove incompatible clone bundle manifest entries.
2279 2282
2280 2283 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2281 2284 and returns a new list consisting of only the entries that this client
2282 2285 should be able to apply.
2283 2286
2284 2287 There is no guarantee we'll be able to apply all returned entries because
2285 2288 the metadata we use to filter on may be missing or wrong.
2286 2289 """
2287 2290 newentries = []
2288 2291 for entry in entries:
2289 2292 spec = entry.get('BUNDLESPEC')
2290 2293 if spec:
2291 2294 try:
2292 2295 bundlespec = parsebundlespec(repo, spec, strict=True)
2293 2296
2294 2297 # If a stream clone was requested, filter out non-streamclone
2295 2298 # entries.
2296 2299 if streamclonerequested and not isstreamclonespec(bundlespec):
2297 2300 repo.ui.debug('filtering %s because not a stream clone\n' %
2298 2301 entry['URL'])
2299 2302 continue
2300 2303
2301 2304 except error.InvalidBundleSpecification as e:
2302 2305 repo.ui.debug(stringutil.forcebytestr(e) + '\n')
2303 2306 continue
2304 2307 except error.UnsupportedBundleSpecification as e:
2305 2308 repo.ui.debug('filtering %s because unsupported bundle '
2306 2309 'spec: %s\n' % (
2307 2310 entry['URL'], stringutil.forcebytestr(e)))
2308 2311 continue
2309 2312 # If we don't have a spec and requested a stream clone, we don't know
2310 2313 # what the entry is so don't attempt to apply it.
2311 2314 elif streamclonerequested:
2312 2315 repo.ui.debug('filtering %s because cannot determine if a stream '
2313 2316 'clone bundle\n' % entry['URL'])
2314 2317 continue
2315 2318
2316 2319 if 'REQUIRESNI' in entry and not sslutil.hassni:
2317 2320 repo.ui.debug('filtering %s because SNI not supported\n' %
2318 2321 entry['URL'])
2319 2322 continue
2320 2323
2321 2324 newentries.append(entry)
2322 2325
2323 2326 return newentries
2324 2327
2325 2328 class clonebundleentry(object):
2326 2329 """Represents an item in a clone bundles manifest.
2327 2330
2328 2331 This rich class is needed to support sorting since sorted() in Python 3
2329 2332 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2330 2333 won't work.
2331 2334 """
2332 2335
2333 2336 def __init__(self, value, prefers):
2334 2337 self.value = value
2335 2338 self.prefers = prefers
2336 2339
2337 2340 def _cmp(self, other):
2338 2341 for prefkey, prefvalue in self.prefers:
2339 2342 avalue = self.value.get(prefkey)
2340 2343 bvalue = other.value.get(prefkey)
2341 2344
2342 2345 # Special case for b missing attribute and a matches exactly.
2343 2346 if avalue is not None and bvalue is None and avalue == prefvalue:
2344 2347 return -1
2345 2348
2346 2349 # Special case for a missing attribute and b matches exactly.
2347 2350 if bvalue is not None and avalue is None and bvalue == prefvalue:
2348 2351 return 1
2349 2352
2350 2353 # We can't compare unless attribute present on both.
2351 2354 if avalue is None or bvalue is None:
2352 2355 continue
2353 2356
2354 2357 # Same values should fall back to next attribute.
2355 2358 if avalue == bvalue:
2356 2359 continue
2357 2360
2358 2361 # Exact matches come first.
2359 2362 if avalue == prefvalue:
2360 2363 return -1
2361 2364 if bvalue == prefvalue:
2362 2365 return 1
2363 2366
2364 2367 # Fall back to next attribute.
2365 2368 continue
2366 2369
2367 2370 # If we got here we couldn't sort by attributes and prefers. Fall
2368 2371 # back to index order.
2369 2372 return 0
2370 2373
2371 2374 def __lt__(self, other):
2372 2375 return self._cmp(other) < 0
2373 2376
2374 2377 def __gt__(self, other):
2375 2378 return self._cmp(other) > 0
2376 2379
2377 2380 def __eq__(self, other):
2378 2381 return self._cmp(other) == 0
2379 2382
2380 2383 def __le__(self, other):
2381 2384 return self._cmp(other) <= 0
2382 2385
2383 2386 def __ge__(self, other):
2384 2387 return self._cmp(other) >= 0
2385 2388
2386 2389 def __ne__(self, other):
2387 2390 return self._cmp(other) != 0
2388 2391
2389 2392 def sortclonebundleentries(ui, entries):
2390 2393 prefers = ui.configlist('ui', 'clonebundleprefers')
2391 2394 if not prefers:
2392 2395 return list(entries)
2393 2396
2394 2397 prefers = [p.split('=', 1) for p in prefers]
2395 2398
2396 2399 items = sorted(clonebundleentry(v, prefers) for v in entries)
2397 2400 return [i.value for i in items]
2398 2401
2399 2402 def trypullbundlefromurl(ui, repo, url):
2400 2403 """Attempt to apply a bundle from a URL."""
2401 2404 with repo.lock(), repo.transaction('bundleurl') as tr:
2402 2405 try:
2403 2406 fh = urlmod.open(ui, url)
2404 2407 cg = readbundle(ui, fh, 'stream')
2405 2408
2406 2409 if isinstance(cg, streamclone.streamcloneapplier):
2407 2410 cg.apply(repo)
2408 2411 else:
2409 2412 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2410 2413 return True
2411 2414 except urlerr.httperror as e:
2412 2415 ui.warn(_('HTTP error fetching bundle: %s\n') %
2413 2416 stringutil.forcebytestr(e))
2414 2417 except urlerr.urlerror as e:
2415 2418 ui.warn(_('error fetching bundle: %s\n') %
2416 2419 stringutil.forcebytestr(e.reason))
2417 2420
2418 2421 return False
@@ -1,341 +1,397 b''
1 1 Testing the functionality to pull remotenames
2 2 =============================================
3 3
4 4 $ cat >> $HGRCPATH << EOF
5 5 > [ui]
6 6 > ssh = $PYTHON "$TESTDIR/dummyssh"
7 7 > [alias]
8 8 > glog = log -G -T '{rev}:{node|short} {desc}'
9 9 > [extensions]
10 10 > remotenames =
11 11 > show =
12 12 > EOF
13 13
14 14 Making a server repo
15 15 --------------------
16 16
17 17 $ hg init server
18 18 $ cd server
19 19 $ for ch in a b c d e f g h; do
20 20 > echo "foo" >> $ch
21 21 > hg ci -Aqm "Added "$ch
22 22 > done
23 23 $ hg glog
24 24 @ 7:ec2426147f0e Added h
25 25 |
26 26 o 6:87d6d6676308 Added g
27 27 |
28 28 o 5:825660c69f0c Added f
29 29 |
30 30 o 4:aa98ab95a928 Added e
31 31 |
32 32 o 3:62615734edd5 Added d
33 33 |
34 34 o 2:28ad74487de9 Added c
35 35 |
36 36 o 1:29becc82797a Added b
37 37 |
38 38 o 0:18d04c59bb5d Added a
39 39
40 40 $ hg bookmark -r 3 foo
41 41 $ hg bookmark -r 6 bar
42 42 $ hg up 4
43 43 0 files updated, 0 files merged, 3 files removed, 0 files unresolved
44 44 $ hg branch wat
45 45 marked working directory as branch wat
46 46 (branches are permanent and global, did you want a bookmark?)
47 47 $ echo foo >> bar
48 48 $ hg ci -Aqm "added bar"
49 49
50 50 Making a client repo
51 51 --------------------
52 52
53 53 $ cd ..
54 54
55 55 $ hg clone ssh://user@dummy/server client
56 56 requesting all changes
57 57 adding changesets
58 58 adding manifests
59 59 adding file changes
60 60 added 9 changesets with 9 changes to 9 files (+1 heads)
61 61 new changesets 18d04c59bb5d:3e1487808078
62 62 updating to branch default
63 63 8 files updated, 0 files merged, 0 files removed, 0 files unresolved
64 64
65 65 $ cd client
66 66 $ cat .hg/logexchange/bookmarks
67 67 0
68 68
69 69 87d6d66763085b629e6d7ed56778c79827273022\x00default\x00bar (esc)
70 70 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00default\x00foo (esc)
71 71
72 72 $ cat .hg/logexchange/branches
73 73 0
74 74
75 75 ec2426147f0e39dbc9cef599b066be6035ce691d\x00default\x00default (esc)
76 76 3e1487808078543b0af6d10dadf5d46943578db0\x00default\x00wat (esc)
77 77
78 78 $ hg show work
79 79 o 3e14 (wat) (default/wat) added bar
80 80 |
81 81 ~
82 82 @ ec24 (default/default) Added h
83 83 |
84 84 ~
85 85
86 86 $ hg update "default/wat"
87 87 1 files updated, 0 files merged, 3 files removed, 0 files unresolved
88 88 $ hg identify
89 89 3e1487808078 (wat) tip
90 90
91 91 Making a new server
92 92 -------------------
93 93
94 94 $ cd ..
95 95 $ hg init server2
96 96 $ cd server2
97 97 $ hg pull ../server/
98 98 pulling from ../server/
99 99 requesting all changes
100 100 adding changesets
101 101 adding manifests
102 102 adding file changes
103 103 added 9 changesets with 9 changes to 9 files (+1 heads)
104 104 adding remote bookmark bar
105 105 adding remote bookmark foo
106 106 new changesets 18d04c59bb5d:3e1487808078
107 107 (run 'hg heads' to see heads)
108 108
109 109 Pulling form the new server
110 110 ---------------------------
111 111 $ cd ../client/
112 112 $ hg pull ../server2/
113 113 pulling from ../server2/
114 114 searching for changes
115 115 no changes found
116 116 $ cat .hg/logexchange/bookmarks
117 117 0
118 118
119 119 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00default\x00foo (esc)
120 120 87d6d66763085b629e6d7ed56778c79827273022\x00default\x00bar (esc)
121 121 87d6d66763085b629e6d7ed56778c79827273022\x00$TESTTMP/server2\x00bar (esc)
122 122 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00$TESTTMP/server2\x00foo (esc)
123 123
124 124 $ cat .hg/logexchange/branches
125 125 0
126 126
127 127 3e1487808078543b0af6d10dadf5d46943578db0\x00default\x00wat (esc)
128 128 ec2426147f0e39dbc9cef599b066be6035ce691d\x00default\x00default (esc)
129 129 ec2426147f0e39dbc9cef599b066be6035ce691d\x00$TESTTMP/server2\x00default (esc)
130 130 3e1487808078543b0af6d10dadf5d46943578db0\x00$TESTTMP/server2\x00wat (esc)
131 131
132 132 $ hg log -G
133 133 @ changeset: 8:3e1487808078
134 134 | branch: wat
135 135 | tag: tip
136 136 | remote branch: $TESTTMP/server2/wat
137 137 | remote branch: default/wat
138 138 | parent: 4:aa98ab95a928
139 139 | user: test
140 140 | date: Thu Jan 01 00:00:00 1970 +0000
141 141 | summary: added bar
142 142 |
143 143 | o changeset: 7:ec2426147f0e
144 144 | | remote branch: $TESTTMP/server2/default
145 145 | | remote branch: default/default
146 146 | | user: test
147 147 | | date: Thu Jan 01 00:00:00 1970 +0000
148 148 | | summary: Added h
149 149 | |
150 150 | o changeset: 6:87d6d6676308
151 151 | | bookmark: bar
152 152 | | remote bookmark: $TESTTMP/server2/bar
153 153 | | remote bookmark: default/bar
154 154 | | hoisted name: bar
155 155 | | user: test
156 156 | | date: Thu Jan 01 00:00:00 1970 +0000
157 157 | | summary: Added g
158 158 | |
159 159 | o changeset: 5:825660c69f0c
160 160 |/ user: test
161 161 | date: Thu Jan 01 00:00:00 1970 +0000
162 162 | summary: Added f
163 163 |
164 164 o changeset: 4:aa98ab95a928
165 165 | user: test
166 166 | date: Thu Jan 01 00:00:00 1970 +0000
167 167 | summary: Added e
168 168 |
169 169 o changeset: 3:62615734edd5
170 170 | bookmark: foo
171 171 | remote bookmark: $TESTTMP/server2/foo
172 172 | remote bookmark: default/foo
173 173 | hoisted name: foo
174 174 | user: test
175 175 | date: Thu Jan 01 00:00:00 1970 +0000
176 176 | summary: Added d
177 177 |
178 178 o changeset: 2:28ad74487de9
179 179 | user: test
180 180 | date: Thu Jan 01 00:00:00 1970 +0000
181 181 | summary: Added c
182 182 |
183 183 o changeset: 1:29becc82797a
184 184 | user: test
185 185 | date: Thu Jan 01 00:00:00 1970 +0000
186 186 | summary: Added b
187 187 |
188 188 o changeset: 0:18d04c59bb5d
189 189 user: test
190 190 date: Thu Jan 01 00:00:00 1970 +0000
191 191 summary: Added a
192 192
193 193 Testing the templates provided by remotenames extension
194 194
195 195 `remotenames` keyword
196 196
197 197 $ hg log -G -T "{rev}:{node|short} {remotenames}\n"
198 198 @ 8:3e1487808078 $TESTTMP/server2/wat default/wat
199 199 |
200 200 | o 7:ec2426147f0e $TESTTMP/server2/default default/default
201 201 | |
202 202 | o 6:87d6d6676308 $TESTTMP/server2/bar default/bar
203 203 | |
204 204 | o 5:825660c69f0c
205 205 |/
206 206 o 4:aa98ab95a928
207 207 |
208 208 o 3:62615734edd5 $TESTTMP/server2/foo default/foo
209 209 |
210 210 o 2:28ad74487de9
211 211 |
212 212 o 1:29becc82797a
213 213 |
214 214 o 0:18d04c59bb5d
215 215
216 216 `remotebookmarks` and `remotebranches` keywords
217 217
218 218 $ hg log -G -T "{rev}:{node|short} [{remotebookmarks}] ({remotebranches})"
219 219 @ 8:3e1487808078 [] ($TESTTMP/server2/wat default/wat)
220 220 |
221 221 | o 7:ec2426147f0e [] ($TESTTMP/server2/default default/default)
222 222 | |
223 223 | o 6:87d6d6676308 [$TESTTMP/server2/bar default/bar] ()
224 224 | |
225 225 | o 5:825660c69f0c [] ()
226 226 |/
227 227 o 4:aa98ab95a928 [] ()
228 228 |
229 229 o 3:62615734edd5 [$TESTTMP/server2/foo default/foo] ()
230 230 |
231 231 o 2:28ad74487de9 [] ()
232 232 |
233 233 o 1:29becc82797a [] ()
234 234 |
235 235 o 0:18d04c59bb5d [] ()
236 236
237 237 The `hoistednames` template keyword
238 238
239 239 $ hg log -GT "{rev}:{node|short} ({hoistednames})"
240 240 @ 8:3e1487808078 ()
241 241 |
242 242 | o 7:ec2426147f0e ()
243 243 | |
244 244 | o 6:87d6d6676308 (bar)
245 245 | |
246 246 | o 5:825660c69f0c ()
247 247 |/
248 248 o 4:aa98ab95a928 ()
249 249 |
250 250 o 3:62615734edd5 (foo)
251 251 |
252 252 o 2:28ad74487de9 ()
253 253 |
254 254 o 1:29becc82797a ()
255 255 |
256 256 o 0:18d04c59bb5d ()
257 257
258 258
259 259 Testing the revsets provided by remotenames extension
260 260
261 261 `remotenames` revset
262 262
263 263 $ hg log -r "remotenames()" -GT "{rev}:{node|short} {remotenames}\n"
264 264 @ 8:3e1487808078 $TESTTMP/server2/wat default/wat
265 265 :
266 266 : o 7:ec2426147f0e $TESTTMP/server2/default default/default
267 267 : |
268 268 : o 6:87d6d6676308 $TESTTMP/server2/bar default/bar
269 269 :/
270 270 o 3:62615734edd5 $TESTTMP/server2/foo default/foo
271 271 |
272 272 ~
273 273
274 274 `remotebranches` revset
275 275
276 276 $ hg log -r "remotebranches()" -GT "{rev}:{node|short} {remotenames}\n"
277 277 @ 8:3e1487808078 $TESTTMP/server2/wat default/wat
278 278 |
279 279 ~
280 280 o 7:ec2426147f0e $TESTTMP/server2/default default/default
281 281 |
282 282 ~
283 283
284 284 `remotebookmarks` revset
285 285
286 286 $ hg log -r "remotebookmarks()" -GT "{rev}:{node|short} {remotenames}\n"
287 287 o 6:87d6d6676308 $TESTTMP/server2/bar default/bar
288 288 :
289 289 o 3:62615734edd5 $TESTTMP/server2/foo default/foo
290 290 |
291 291 ~
292 292
293 293 Updating to revision using hoisted name
294 294
295 295 Deleting local bookmark to make sure we update to hoisted name only
296 296
297 297 $ hg bookmark -d bar
298 298
299 299 $ hg up bar
300 300 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
301 301
302 302 $ hg log -r .
303 303 changeset: 6:87d6d6676308
304 304 remote bookmark: $TESTTMP/server2/bar
305 305 remote bookmark: default/bar
306 306 hoisted name: bar
307 307 user: test
308 308 date: Thu Jan 01 00:00:00 1970 +0000
309 309 summary: Added g
310 310
311 311 When both local bookmark and hoisted name exists but on different revs
312 312
313 313 $ hg up 8
314 314 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
315 315
316 316 $ hg bookmark foo
317 317 moving bookmark 'foo' forward from 62615734edd5
318 318
319 319 Local bookmark should take precedence over hoisted name
320 320
321 321 $ hg up foo
322 322 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
323 323
324 324 $ hg log -r .
325 325 changeset: 8:3e1487808078
326 326 branch: wat
327 327 bookmark: foo
328 328 tag: tip
329 329 remote branch: $TESTTMP/server2/wat
330 330 remote branch: default/wat
331 331 parent: 4:aa98ab95a928
332 332 user: test
333 333 date: Thu Jan 01 00:00:00 1970 +0000
334 334 summary: added bar
335 335
336 336 $ hg bookmarks
337 337 $TESTTMP/server2/bar 6:87d6d6676308
338 338 $TESTTMP/server2/foo 3:62615734edd5
339 339 default/bar 6:87d6d6676308
340 340 default/foo 3:62615734edd5
341 341 * foo 8:3e1487808078
342
343 Testing the remotenames sychronization during `hg push`
344 -------------------------------------------------------
345
346 $ cd ../server/
347 $ hg bookmark foo
348 moving bookmark 'foo' forward from 62615734edd5
349
350 After the push, default/foo should move to rev 8
351 $ cd ../client/
352 $ hg push
353 pushing to ssh://user@dummy/server
354 searching for changes
355 no changes found
356 [1]
357 $ hg log -Gr 'remotenames()'
358 @ changeset: 8:3e1487808078
359 : branch: wat
360 : bookmark: foo
361 : tag: tip
362 : remote bookmark: default/foo
363 : hoisted name: foo
364 : remote branch: $TESTTMP/server2/wat
365 : remote branch: default/wat
366 : parent: 4:aa98ab95a928
367 : user: test
368 : date: Thu Jan 01 00:00:00 1970 +0000
369 : summary: added bar
370 :
371 : o changeset: 7:ec2426147f0e
372 : | remote branch: $TESTTMP/server2/default
373 : | remote branch: default/default
374 : | user: test
375 : | date: Thu Jan 01 00:00:00 1970 +0000
376 : | summary: Added h
377 : |
378 : o changeset: 6:87d6d6676308
379 :/ remote bookmark: $TESTTMP/server2/bar
380 : remote bookmark: default/bar
381 : hoisted name: bar
382 : user: test
383 : date: Thu Jan 01 00:00:00 1970 +0000
384 : summary: Added g
385 :
386 o changeset: 3:62615734edd5
387 | remote bookmark: $TESTTMP/server2/foo
388 ~ user: test
389 date: Thu Jan 01 00:00:00 1970 +0000
390 summary: Added d
391
392 $ hg bookmarks
393 $TESTTMP/server2/bar 6:87d6d6676308
394 $TESTTMP/server2/foo 3:62615734edd5
395 default/bar 6:87d6d6676308
396 default/foo 8:3e1487808078
397 * foo 8:3e1487808078
General Comments 0
You need to be logged in to leave comments. Login now