##// END OF EJS Templates
py3: add a r'' prefix in mercurial/exchange.py...
Pulkit Goyal -
r40388:dd816e53 default
parent child Browse files
Show More
@@ -1,2657 +1,2657 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import hashlib
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 bin,
16 16 hex,
17 17 nullid,
18 18 nullrev,
19 19 )
20 20 from .thirdparty import (
21 21 attr,
22 22 )
23 23 from . import (
24 24 bookmarks as bookmod,
25 25 bundle2,
26 26 changegroup,
27 27 discovery,
28 28 error,
29 29 exchangev2,
30 30 lock as lockmod,
31 31 logexchange,
32 32 narrowspec,
33 33 obsolete,
34 34 phases,
35 35 pushkey,
36 36 pycompat,
37 37 repository,
38 38 scmutil,
39 39 sslutil,
40 40 streamclone,
41 41 url as urlmod,
42 42 util,
43 43 )
44 44 from .utils import (
45 45 stringutil,
46 46 )
47 47
48 48 urlerr = util.urlerr
49 49 urlreq = util.urlreq
50 50
51 51 _NARROWACL_SECTION = 'narrowhgacl'
52 52
53 53 # Maps bundle version human names to changegroup versions.
54 54 _bundlespeccgversions = {'v1': '01',
55 55 'v2': '02',
56 56 'packed1': 's1',
57 57 'bundle2': '02', #legacy
58 58 }
59 59
60 60 # Maps bundle version with content opts to choose which part to bundle
61 61 _bundlespeccontentopts = {
62 62 'v1': {
63 63 'changegroup': True,
64 64 'cg.version': '01',
65 65 'obsolescence': False,
66 66 'phases': False,
67 67 'tagsfnodescache': False,
68 68 'revbranchcache': False
69 69 },
70 70 'v2': {
71 71 'changegroup': True,
72 72 'cg.version': '02',
73 73 'obsolescence': False,
74 74 'phases': False,
75 75 'tagsfnodescache': True,
76 76 'revbranchcache': True
77 77 },
78 78 'packed1' : {
79 79 'cg.version': 's1'
80 80 }
81 81 }
82 82 _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
83 83
84 84 _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
85 85 "tagsfnodescache": False,
86 86 "revbranchcache": False}}
87 87
88 88 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
89 89 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
90 90
91 91 @attr.s
92 92 class bundlespec(object):
93 93 compression = attr.ib()
94 94 wirecompression = attr.ib()
95 95 version = attr.ib()
96 96 wireversion = attr.ib()
97 97 params = attr.ib()
98 98 contentopts = attr.ib()
99 99
100 100 def parsebundlespec(repo, spec, strict=True):
101 101 """Parse a bundle string specification into parts.
102 102
103 103 Bundle specifications denote a well-defined bundle/exchange format.
104 104 The content of a given specification should not change over time in
105 105 order to ensure that bundles produced by a newer version of Mercurial are
106 106 readable from an older version.
107 107
108 108 The string currently has the form:
109 109
110 110 <compression>-<type>[;<parameter0>[;<parameter1>]]
111 111
112 112 Where <compression> is one of the supported compression formats
113 113 and <type> is (currently) a version string. A ";" can follow the type and
114 114 all text afterwards is interpreted as URI encoded, ";" delimited key=value
115 115 pairs.
116 116
117 117 If ``strict`` is True (the default) <compression> is required. Otherwise,
118 118 it is optional.
119 119
120 120 Returns a bundlespec object of (compression, version, parameters).
121 121 Compression will be ``None`` if not in strict mode and a compression isn't
122 122 defined.
123 123
124 124 An ``InvalidBundleSpecification`` is raised when the specification is
125 125 not syntactically well formed.
126 126
127 127 An ``UnsupportedBundleSpecification`` is raised when the compression or
128 128 bundle type/version is not recognized.
129 129
130 130 Note: this function will likely eventually return a more complex data
131 131 structure, including bundle2 part information.
132 132 """
133 133 def parseparams(s):
134 134 if ';' not in s:
135 135 return s, {}
136 136
137 137 params = {}
138 138 version, paramstr = s.split(';', 1)
139 139
140 140 for p in paramstr.split(';'):
141 141 if '=' not in p:
142 142 raise error.InvalidBundleSpecification(
143 143 _('invalid bundle specification: '
144 144 'missing "=" in parameter: %s') % p)
145 145
146 146 key, value = p.split('=', 1)
147 147 key = urlreq.unquote(key)
148 148 value = urlreq.unquote(value)
149 149 params[key] = value
150 150
151 151 return version, params
152 152
153 153
154 154 if strict and '-' not in spec:
155 155 raise error.InvalidBundleSpecification(
156 156 _('invalid bundle specification; '
157 157 'must be prefixed with compression: %s') % spec)
158 158
159 159 if '-' in spec:
160 160 compression, version = spec.split('-', 1)
161 161
162 162 if compression not in util.compengines.supportedbundlenames:
163 163 raise error.UnsupportedBundleSpecification(
164 164 _('%s compression is not supported') % compression)
165 165
166 166 version, params = parseparams(version)
167 167
168 168 if version not in _bundlespeccgversions:
169 169 raise error.UnsupportedBundleSpecification(
170 170 _('%s is not a recognized bundle version') % version)
171 171 else:
172 172 # Value could be just the compression or just the version, in which
173 173 # case some defaults are assumed (but only when not in strict mode).
174 174 assert not strict
175 175
176 176 spec, params = parseparams(spec)
177 177
178 178 if spec in util.compengines.supportedbundlenames:
179 179 compression = spec
180 180 version = 'v1'
181 181 # Generaldelta repos require v2.
182 182 if 'generaldelta' in repo.requirements:
183 183 version = 'v2'
184 184 # Modern compression engines require v2.
185 185 if compression not in _bundlespecv1compengines:
186 186 version = 'v2'
187 187 elif spec in _bundlespeccgversions:
188 188 if spec == 'packed1':
189 189 compression = 'none'
190 190 else:
191 191 compression = 'bzip2'
192 192 version = spec
193 193 else:
194 194 raise error.UnsupportedBundleSpecification(
195 195 _('%s is not a recognized bundle specification') % spec)
196 196
197 197 # Bundle version 1 only supports a known set of compression engines.
198 198 if version == 'v1' and compression not in _bundlespecv1compengines:
199 199 raise error.UnsupportedBundleSpecification(
200 200 _('compression engine %s is not supported on v1 bundles') %
201 201 compression)
202 202
203 203 # The specification for packed1 can optionally declare the data formats
204 204 # required to apply it. If we see this metadata, compare against what the
205 205 # repo supports and error if the bundle isn't compatible.
206 206 if version == 'packed1' and 'requirements' in params:
207 207 requirements = set(params['requirements'].split(','))
208 208 missingreqs = requirements - repo.supportedformats
209 209 if missingreqs:
210 210 raise error.UnsupportedBundleSpecification(
211 211 _('missing support for repository features: %s') %
212 212 ', '.join(sorted(missingreqs)))
213 213
214 214 # Compute contentopts based on the version
215 215 contentopts = _bundlespeccontentopts.get(version, {}).copy()
216 216
217 217 # Process the variants
218 218 if "stream" in params and params["stream"] == "v2":
219 219 variant = _bundlespecvariants["streamv2"]
220 220 contentopts.update(variant)
221 221
222 222 engine = util.compengines.forbundlename(compression)
223 223 compression, wirecompression = engine.bundletype()
224 224 wireversion = _bundlespeccgversions[version]
225 225
226 226 return bundlespec(compression, wirecompression, version, wireversion,
227 227 params, contentopts)
228 228
229 229 def readbundle(ui, fh, fname, vfs=None):
230 230 header = changegroup.readexactly(fh, 4)
231 231
232 232 alg = None
233 233 if not fname:
234 234 fname = "stream"
235 235 if not header.startswith('HG') and header.startswith('\0'):
236 236 fh = changegroup.headerlessfixup(fh, header)
237 237 header = "HG10"
238 238 alg = 'UN'
239 239 elif vfs:
240 240 fname = vfs.join(fname)
241 241
242 242 magic, version = header[0:2], header[2:4]
243 243
244 244 if magic != 'HG':
245 245 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
246 246 if version == '10':
247 247 if alg is None:
248 248 alg = changegroup.readexactly(fh, 2)
249 249 return changegroup.cg1unpacker(fh, alg)
250 250 elif version.startswith('2'):
251 251 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
252 252 elif version == 'S1':
253 253 return streamclone.streamcloneapplier(fh)
254 254 else:
255 255 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
256 256
257 257 def getbundlespec(ui, fh):
258 258 """Infer the bundlespec from a bundle file handle.
259 259
260 260 The input file handle is seeked and the original seek position is not
261 261 restored.
262 262 """
263 263 def speccompression(alg):
264 264 try:
265 265 return util.compengines.forbundletype(alg).bundletype()[0]
266 266 except KeyError:
267 267 return None
268 268
269 269 b = readbundle(ui, fh, None)
270 270 if isinstance(b, changegroup.cg1unpacker):
271 271 alg = b._type
272 272 if alg == '_truncatedBZ':
273 273 alg = 'BZ'
274 274 comp = speccompression(alg)
275 275 if not comp:
276 276 raise error.Abort(_('unknown compression algorithm: %s') % alg)
277 277 return '%s-v1' % comp
278 278 elif isinstance(b, bundle2.unbundle20):
279 279 if 'Compression' in b.params:
280 280 comp = speccompression(b.params['Compression'])
281 281 if not comp:
282 282 raise error.Abort(_('unknown compression algorithm: %s') % comp)
283 283 else:
284 284 comp = 'none'
285 285
286 286 version = None
287 287 for part in b.iterparts():
288 288 if part.type == 'changegroup':
289 289 version = part.params['version']
290 290 if version in ('01', '02'):
291 291 version = 'v2'
292 292 else:
293 293 raise error.Abort(_('changegroup version %s does not have '
294 294 'a known bundlespec') % version,
295 295 hint=_('try upgrading your Mercurial '
296 296 'client'))
297 297 elif part.type == 'stream2' and version is None:
298 298 # A stream2 part requires to be part of a v2 bundle
299 299 version = "v2"
300 300 requirements = urlreq.unquote(part.params['requirements'])
301 301 splitted = requirements.split()
302 302 params = bundle2._formatrequirementsparams(splitted)
303 303 return 'none-v2;stream=v2;%s' % params
304 304
305 305 if not version:
306 306 raise error.Abort(_('could not identify changegroup version in '
307 307 'bundle'))
308 308
309 309 return '%s-%s' % (comp, version)
310 310 elif isinstance(b, streamclone.streamcloneapplier):
311 311 requirements = streamclone.readbundle1header(fh)[2]
312 312 formatted = bundle2._formatrequirementsparams(requirements)
313 313 return 'none-packed1;%s' % formatted
314 314 else:
315 315 raise error.Abort(_('unknown bundle type: %s') % b)
316 316
317 317 def _computeoutgoing(repo, heads, common):
318 318 """Computes which revs are outgoing given a set of common
319 319 and a set of heads.
320 320
321 321 This is a separate function so extensions can have access to
322 322 the logic.
323 323
324 324 Returns a discovery.outgoing object.
325 325 """
326 326 cl = repo.changelog
327 327 if common:
328 328 hasnode = cl.hasnode
329 329 common = [n for n in common if hasnode(n)]
330 330 else:
331 331 common = [nullid]
332 332 if not heads:
333 333 heads = cl.heads()
334 334 return discovery.outgoing(repo, common, heads)
335 335
336 336 def _forcebundle1(op):
337 337 """return true if a pull/push must use bundle1
338 338
339 339 This function is used to allow testing of the older bundle version"""
340 340 ui = op.repo.ui
341 341 # The goal is this config is to allow developer to choose the bundle
342 342 # version used during exchanged. This is especially handy during test.
343 343 # Value is a list of bundle version to be picked from, highest version
344 344 # should be used.
345 345 #
346 346 # developer config: devel.legacy.exchange
347 347 exchange = ui.configlist('devel', 'legacy.exchange')
348 348 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
349 349 return forcebundle1 or not op.remote.capable('bundle2')
350 350
351 351 class pushoperation(object):
352 352 """A object that represent a single push operation
353 353
354 354 Its purpose is to carry push related state and very common operations.
355 355
356 356 A new pushoperation should be created at the beginning of each push and
357 357 discarded afterward.
358 358 """
359 359
360 360 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
361 361 bookmarks=(), pushvars=None):
362 362 # repo we push from
363 363 self.repo = repo
364 364 self.ui = repo.ui
365 365 # repo we push to
366 366 self.remote = remote
367 367 # force option provided
368 368 self.force = force
369 369 # revs to be pushed (None is "all")
370 370 self.revs = revs
371 371 # bookmark explicitly pushed
372 372 self.bookmarks = bookmarks
373 373 # allow push of new branch
374 374 self.newbranch = newbranch
375 375 # step already performed
376 376 # (used to check what steps have been already performed through bundle2)
377 377 self.stepsdone = set()
378 378 # Integer version of the changegroup push result
379 379 # - None means nothing to push
380 380 # - 0 means HTTP error
381 381 # - 1 means we pushed and remote head count is unchanged *or*
382 382 # we have outgoing changesets but refused to push
383 383 # - other values as described by addchangegroup()
384 384 self.cgresult = None
385 385 # Boolean value for the bookmark push
386 386 self.bkresult = None
387 387 # discover.outgoing object (contains common and outgoing data)
388 388 self.outgoing = None
389 389 # all remote topological heads before the push
390 390 self.remoteheads = None
391 391 # Details of the remote branch pre and post push
392 392 #
393 393 # mapping: {'branch': ([remoteheads],
394 394 # [newheads],
395 395 # [unsyncedheads],
396 396 # [discardedheads])}
397 397 # - branch: the branch name
398 398 # - remoteheads: the list of remote heads known locally
399 399 # None if the branch is new
400 400 # - newheads: the new remote heads (known locally) with outgoing pushed
401 401 # - unsyncedheads: the list of remote heads unknown locally.
402 402 # - discardedheads: the list of remote heads made obsolete by the push
403 403 self.pushbranchmap = None
404 404 # testable as a boolean indicating if any nodes are missing locally.
405 405 self.incoming = None
406 406 # summary of the remote phase situation
407 407 self.remotephases = None
408 408 # phases changes that must be pushed along side the changesets
409 409 self.outdatedphases = None
410 410 # phases changes that must be pushed if changeset push fails
411 411 self.fallbackoutdatedphases = None
412 412 # outgoing obsmarkers
413 413 self.outobsmarkers = set()
414 414 # outgoing bookmarks
415 415 self.outbookmarks = []
416 416 # transaction manager
417 417 self.trmanager = None
418 418 # map { pushkey partid -> callback handling failure}
419 419 # used to handle exception from mandatory pushkey part failure
420 420 self.pkfailcb = {}
421 421 # an iterable of pushvars or None
422 422 self.pushvars = pushvars
423 423
424 424 @util.propertycache
425 425 def futureheads(self):
426 426 """future remote heads if the changeset push succeeds"""
427 427 return self.outgoing.missingheads
428 428
429 429 @util.propertycache
430 430 def fallbackheads(self):
431 431 """future remote heads if the changeset push fails"""
432 432 if self.revs is None:
433 433 # not target to push, all common are relevant
434 434 return self.outgoing.commonheads
435 435 unfi = self.repo.unfiltered()
436 436 # I want cheads = heads(::missingheads and ::commonheads)
437 437 # (missingheads is revs with secret changeset filtered out)
438 438 #
439 439 # This can be expressed as:
440 440 # cheads = ( (missingheads and ::commonheads)
441 441 # + (commonheads and ::missingheads))"
442 442 # )
443 443 #
444 444 # while trying to push we already computed the following:
445 445 # common = (::commonheads)
446 446 # missing = ((commonheads::missingheads) - commonheads)
447 447 #
448 448 # We can pick:
449 449 # * missingheads part of common (::commonheads)
450 450 common = self.outgoing.common
451 451 nm = self.repo.changelog.nodemap
452 452 cheads = [node for node in self.revs if nm[node] in common]
453 453 # and
454 454 # * commonheads parents on missing
455 455 revset = unfi.set('%ln and parents(roots(%ln))',
456 456 self.outgoing.commonheads,
457 457 self.outgoing.missing)
458 458 cheads.extend(c.node() for c in revset)
459 459 return cheads
460 460
461 461 @property
462 462 def commonheads(self):
463 463 """set of all common heads after changeset bundle push"""
464 464 if self.cgresult:
465 465 return self.futureheads
466 466 else:
467 467 return self.fallbackheads
468 468
469 469 # mapping of message used when pushing bookmark
470 470 bookmsgmap = {'update': (_("updating bookmark %s\n"),
471 471 _('updating bookmark %s failed!\n')),
472 472 'export': (_("exporting bookmark %s\n"),
473 473 _('exporting bookmark %s failed!\n')),
474 474 'delete': (_("deleting remote bookmark %s\n"),
475 475 _('deleting remote bookmark %s failed!\n')),
476 476 }
477 477
478 478
479 479 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
480 480 opargs=None):
481 481 '''Push outgoing changesets (limited by revs) from a local
482 482 repository to remote. Return an integer:
483 483 - None means nothing to push
484 484 - 0 means HTTP error
485 485 - 1 means we pushed and remote head count is unchanged *or*
486 486 we have outgoing changesets but refused to push
487 487 - other values as described by addchangegroup()
488 488 '''
489 489 if opargs is None:
490 490 opargs = {}
491 491 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
492 492 **pycompat.strkwargs(opargs))
493 493 if pushop.remote.local():
494 494 missing = (set(pushop.repo.requirements)
495 495 - pushop.remote.local().supported)
496 496 if missing:
497 497 msg = _("required features are not"
498 498 " supported in the destination:"
499 499 " %s") % (', '.join(sorted(missing)))
500 500 raise error.Abort(msg)
501 501
502 502 if not pushop.remote.canpush():
503 503 raise error.Abort(_("destination does not support push"))
504 504
505 505 if not pushop.remote.capable('unbundle'):
506 506 raise error.Abort(_('cannot push: destination does not support the '
507 507 'unbundle wire protocol command'))
508 508
509 509 # get lock as we might write phase data
510 510 wlock = lock = None
511 511 try:
512 512 # bundle2 push may receive a reply bundle touching bookmarks or other
513 513 # things requiring the wlock. Take it now to ensure proper ordering.
514 514 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
515 515 if (not _forcebundle1(pushop)) and maypushback:
516 516 wlock = pushop.repo.wlock()
517 517 lock = pushop.repo.lock()
518 518 pushop.trmanager = transactionmanager(pushop.repo,
519 519 'push-response',
520 520 pushop.remote.url())
521 521 except error.LockUnavailable as err:
522 522 # source repo cannot be locked.
523 523 # We do not abort the push, but just disable the local phase
524 524 # synchronisation.
525 525 msg = ('cannot lock source repository: %s\n'
526 526 % stringutil.forcebytestr(err))
527 527 pushop.ui.debug(msg)
528 528
529 529 with wlock or util.nullcontextmanager(), \
530 530 lock or util.nullcontextmanager(), \
531 531 pushop.trmanager or util.nullcontextmanager():
532 532 pushop.repo.checkpush(pushop)
533 533 _pushdiscovery(pushop)
534 534 if not _forcebundle1(pushop):
535 535 _pushbundle2(pushop)
536 536 _pushchangeset(pushop)
537 537 _pushsyncphase(pushop)
538 538 _pushobsolete(pushop)
539 539 _pushbookmark(pushop)
540 540
541 541 if repo.ui.configbool('experimental', 'remotenames'):
542 542 logexchange.pullremotenames(repo, remote)
543 543
544 544 return pushop
545 545
546 546 # list of steps to perform discovery before push
547 547 pushdiscoveryorder = []
548 548
549 549 # Mapping between step name and function
550 550 #
551 551 # This exists to help extensions wrap steps if necessary
552 552 pushdiscoverymapping = {}
553 553
554 554 def pushdiscovery(stepname):
555 555 """decorator for function performing discovery before push
556 556
557 557 The function is added to the step -> function mapping and appended to the
558 558 list of steps. Beware that decorated function will be added in order (this
559 559 may matter).
560 560
561 561 You can only use this decorator for a new step, if you want to wrap a step
562 562 from an extension, change the pushdiscovery dictionary directly."""
563 563 def dec(func):
564 564 assert stepname not in pushdiscoverymapping
565 565 pushdiscoverymapping[stepname] = func
566 566 pushdiscoveryorder.append(stepname)
567 567 return func
568 568 return dec
569 569
570 570 def _pushdiscovery(pushop):
571 571 """Run all discovery steps"""
572 572 for stepname in pushdiscoveryorder:
573 573 step = pushdiscoverymapping[stepname]
574 574 step(pushop)
575 575
576 576 @pushdiscovery('changeset')
577 577 def _pushdiscoverychangeset(pushop):
578 578 """discover the changeset that need to be pushed"""
579 579 fci = discovery.findcommonincoming
580 580 if pushop.revs:
581 581 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
582 582 ancestorsof=pushop.revs)
583 583 else:
584 584 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
585 585 common, inc, remoteheads = commoninc
586 586 fco = discovery.findcommonoutgoing
587 587 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
588 588 commoninc=commoninc, force=pushop.force)
589 589 pushop.outgoing = outgoing
590 590 pushop.remoteheads = remoteheads
591 591 pushop.incoming = inc
592 592
593 593 @pushdiscovery('phase')
594 594 def _pushdiscoveryphase(pushop):
595 595 """discover the phase that needs to be pushed
596 596
597 597 (computed for both success and failure case for changesets push)"""
598 598 outgoing = pushop.outgoing
599 599 unfi = pushop.repo.unfiltered()
600 600 remotephases = listkeys(pushop.remote, 'phases')
601 601
602 602 if (pushop.ui.configbool('ui', '_usedassubrepo')
603 603 and remotephases # server supports phases
604 604 and not pushop.outgoing.missing # no changesets to be pushed
605 605 and remotephases.get('publishing', False)):
606 606 # When:
607 607 # - this is a subrepo push
608 608 # - and remote support phase
609 609 # - and no changeset are to be pushed
610 610 # - and remote is publishing
611 611 # We may be in issue 3781 case!
612 612 # We drop the possible phase synchronisation done by
613 613 # courtesy to publish changesets possibly locally draft
614 614 # on the remote.
615 615 pushop.outdatedphases = []
616 616 pushop.fallbackoutdatedphases = []
617 617 return
618 618
619 619 pushop.remotephases = phases.remotephasessummary(pushop.repo,
620 620 pushop.fallbackheads,
621 621 remotephases)
622 622 droots = pushop.remotephases.draftroots
623 623
624 624 extracond = ''
625 625 if not pushop.remotephases.publishing:
626 626 extracond = ' and public()'
627 627 revset = 'heads((%%ln::%%ln) %s)' % extracond
628 628 # Get the list of all revs draft on remote by public here.
629 629 # XXX Beware that revset break if droots is not strictly
630 630 # XXX root we may want to ensure it is but it is costly
631 631 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
632 632 if not outgoing.missing:
633 633 future = fallback
634 634 else:
635 635 # adds changeset we are going to push as draft
636 636 #
637 637 # should not be necessary for publishing server, but because of an
638 638 # issue fixed in xxxxx we have to do it anyway.
639 639 fdroots = list(unfi.set('roots(%ln + %ln::)',
640 640 outgoing.missing, droots))
641 641 fdroots = [f.node() for f in fdroots]
642 642 future = list(unfi.set(revset, fdroots, pushop.futureheads))
643 643 pushop.outdatedphases = future
644 644 pushop.fallbackoutdatedphases = fallback
645 645
646 646 @pushdiscovery('obsmarker')
647 647 def _pushdiscoveryobsmarkers(pushop):
648 648 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
649 649 return
650 650
651 651 if not pushop.repo.obsstore:
652 652 return
653 653
654 654 if 'obsolete' not in listkeys(pushop.remote, 'namespaces'):
655 655 return
656 656
657 657 repo = pushop.repo
658 658 # very naive computation, that can be quite expensive on big repo.
659 659 # However: evolution is currently slow on them anyway.
660 660 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
661 661 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
662 662
663 663 @pushdiscovery('bookmarks')
664 664 def _pushdiscoverybookmarks(pushop):
665 665 ui = pushop.ui
666 666 repo = pushop.repo.unfiltered()
667 667 remote = pushop.remote
668 668 ui.debug("checking for updated bookmarks\n")
669 669 ancestors = ()
670 670 if pushop.revs:
671 671 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
672 672 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
673 673
674 674 remotebookmark = listkeys(remote, 'bookmarks')
675 675
676 676 explicit = set([repo._bookmarks.expandname(bookmark)
677 677 for bookmark in pushop.bookmarks])
678 678
679 679 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
680 680 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
681 681
682 682 def safehex(x):
683 683 if x is None:
684 684 return x
685 685 return hex(x)
686 686
687 687 def hexifycompbookmarks(bookmarks):
688 688 return [(b, safehex(scid), safehex(dcid))
689 689 for (b, scid, dcid) in bookmarks]
690 690
691 691 comp = [hexifycompbookmarks(marks) for marks in comp]
692 692 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
693 693
694 694 def _processcompared(pushop, pushed, explicit, remotebms, comp):
695 695 """take decision on bookmark to pull from the remote bookmark
696 696
697 697 Exist to help extensions who want to alter this behavior.
698 698 """
699 699 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
700 700
701 701 repo = pushop.repo
702 702
703 703 for b, scid, dcid in advsrc:
704 704 if b in explicit:
705 705 explicit.remove(b)
706 706 if not pushed or repo[scid].rev() in pushed:
707 707 pushop.outbookmarks.append((b, dcid, scid))
708 708 # search added bookmark
709 709 for b, scid, dcid in addsrc:
710 710 if b in explicit:
711 711 explicit.remove(b)
712 712 pushop.outbookmarks.append((b, '', scid))
713 713 # search for overwritten bookmark
714 714 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
715 715 if b in explicit:
716 716 explicit.remove(b)
717 717 pushop.outbookmarks.append((b, dcid, scid))
718 718 # search for bookmark to delete
719 719 for b, scid, dcid in adddst:
720 720 if b in explicit:
721 721 explicit.remove(b)
722 722 # treat as "deleted locally"
723 723 pushop.outbookmarks.append((b, dcid, ''))
724 724 # identical bookmarks shouldn't get reported
725 725 for b, scid, dcid in same:
726 726 if b in explicit:
727 727 explicit.remove(b)
728 728
729 729 if explicit:
730 730 explicit = sorted(explicit)
731 731 # we should probably list all of them
732 732 pushop.ui.warn(_('bookmark %s does not exist on the local '
733 733 'or remote repository!\n') % explicit[0])
734 734 pushop.bkresult = 2
735 735
736 736 pushop.outbookmarks.sort()
737 737
738 738 def _pushcheckoutgoing(pushop):
739 739 outgoing = pushop.outgoing
740 740 unfi = pushop.repo.unfiltered()
741 741 if not outgoing.missing:
742 742 # nothing to push
743 743 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
744 744 return False
745 745 # something to push
746 746 if not pushop.force:
747 747 # if repo.obsstore == False --> no obsolete
748 748 # then, save the iteration
749 749 if unfi.obsstore:
750 750 # this message are here for 80 char limit reason
751 751 mso = _("push includes obsolete changeset: %s!")
752 752 mspd = _("push includes phase-divergent changeset: %s!")
753 753 mscd = _("push includes content-divergent changeset: %s!")
754 754 mst = {"orphan": _("push includes orphan changeset: %s!"),
755 755 "phase-divergent": mspd,
756 756 "content-divergent": mscd}
757 757 # If we are to push if there is at least one
758 758 # obsolete or unstable changeset in missing, at
759 759 # least one of the missinghead will be obsolete or
760 760 # unstable. So checking heads only is ok
761 761 for node in outgoing.missingheads:
762 762 ctx = unfi[node]
763 763 if ctx.obsolete():
764 764 raise error.Abort(mso % ctx)
765 765 elif ctx.isunstable():
766 766 # TODO print more than one instability in the abort
767 767 # message
768 768 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
769 769
770 770 discovery.checkheads(pushop)
771 771 return True
772 772
773 773 # List of names of steps to perform for an outgoing bundle2, order matters.
774 774 b2partsgenorder = []
775 775
776 776 # Mapping between step name and function
777 777 #
778 778 # This exists to help extensions wrap steps if necessary
779 779 b2partsgenmapping = {}
780 780
781 781 def b2partsgenerator(stepname, idx=None):
782 782 """decorator for function generating bundle2 part
783 783
784 784 The function is added to the step -> function mapping and appended to the
785 785 list of steps. Beware that decorated functions will be added in order
786 786 (this may matter).
787 787
788 788 You can only use this decorator for new steps, if you want to wrap a step
789 789 from an extension, attack the b2partsgenmapping dictionary directly."""
790 790 def dec(func):
791 791 assert stepname not in b2partsgenmapping
792 792 b2partsgenmapping[stepname] = func
793 793 if idx is None:
794 794 b2partsgenorder.append(stepname)
795 795 else:
796 796 b2partsgenorder.insert(idx, stepname)
797 797 return func
798 798 return dec
799 799
800 800 def _pushb2ctxcheckheads(pushop, bundler):
801 801 """Generate race condition checking parts
802 802
803 803 Exists as an independent function to aid extensions
804 804 """
805 805 # * 'force' do not check for push race,
806 806 # * if we don't push anything, there are nothing to check.
807 807 if not pushop.force and pushop.outgoing.missingheads:
808 808 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
809 809 emptyremote = pushop.pushbranchmap is None
810 810 if not allowunrelated or emptyremote:
811 811 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
812 812 else:
813 813 affected = set()
814 814 for branch, heads in pushop.pushbranchmap.iteritems():
815 815 remoteheads, newheads, unsyncedheads, discardedheads = heads
816 816 if remoteheads is not None:
817 817 remote = set(remoteheads)
818 818 affected |= set(discardedheads) & remote
819 819 affected |= remote - set(newheads)
820 820 if affected:
821 821 data = iter(sorted(affected))
822 822 bundler.newpart('check:updated-heads', data=data)
823 823
824 824 def _pushing(pushop):
825 825 """return True if we are pushing anything"""
826 826 return bool(pushop.outgoing.missing
827 827 or pushop.outdatedphases
828 828 or pushop.outobsmarkers
829 829 or pushop.outbookmarks)
830 830
831 831 @b2partsgenerator('check-bookmarks')
832 832 def _pushb2checkbookmarks(pushop, bundler):
833 833 """insert bookmark move checking"""
834 834 if not _pushing(pushop) or pushop.force:
835 835 return
836 836 b2caps = bundle2.bundle2caps(pushop.remote)
837 837 hasbookmarkcheck = 'bookmarks' in b2caps
838 838 if not (pushop.outbookmarks and hasbookmarkcheck):
839 839 return
840 840 data = []
841 841 for book, old, new in pushop.outbookmarks:
842 842 old = bin(old)
843 843 data.append((book, old))
844 844 checkdata = bookmod.binaryencode(data)
845 845 bundler.newpart('check:bookmarks', data=checkdata)
846 846
847 847 @b2partsgenerator('check-phases')
848 848 def _pushb2checkphases(pushop, bundler):
849 849 """insert phase move checking"""
850 850 if not _pushing(pushop) or pushop.force:
851 851 return
852 852 b2caps = bundle2.bundle2caps(pushop.remote)
853 853 hasphaseheads = 'heads' in b2caps.get('phases', ())
854 854 if pushop.remotephases is not None and hasphaseheads:
855 855 # check that the remote phase has not changed
856 856 checks = [[] for p in phases.allphases]
857 857 checks[phases.public].extend(pushop.remotephases.publicheads)
858 858 checks[phases.draft].extend(pushop.remotephases.draftroots)
859 859 if any(checks):
860 860 for nodes in checks:
861 861 nodes.sort()
862 862 checkdata = phases.binaryencode(checks)
863 863 bundler.newpart('check:phases', data=checkdata)
864 864
865 865 @b2partsgenerator('changeset')
866 866 def _pushb2ctx(pushop, bundler):
867 867 """handle changegroup push through bundle2
868 868
869 869 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
870 870 """
871 871 if 'changesets' in pushop.stepsdone:
872 872 return
873 873 pushop.stepsdone.add('changesets')
874 874 # Send known heads to the server for race detection.
875 875 if not _pushcheckoutgoing(pushop):
876 876 return
877 877 pushop.repo.prepushoutgoinghooks(pushop)
878 878
879 879 _pushb2ctxcheckheads(pushop, bundler)
880 880
881 881 b2caps = bundle2.bundle2caps(pushop.remote)
882 882 version = '01'
883 883 cgversions = b2caps.get('changegroup')
884 884 if cgversions: # 3.1 and 3.2 ship with an empty value
885 885 cgversions = [v for v in cgversions
886 886 if v in changegroup.supportedoutgoingversions(
887 887 pushop.repo)]
888 888 if not cgversions:
889 889 raise ValueError(_('no common changegroup version'))
890 890 version = max(cgversions)
891 891 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
892 892 'push')
893 893 cgpart = bundler.newpart('changegroup', data=cgstream)
894 894 if cgversions:
895 895 cgpart.addparam('version', version)
896 896 if 'treemanifest' in pushop.repo.requirements:
897 897 cgpart.addparam('treemanifest', '1')
898 898 def handlereply(op):
899 899 """extract addchangegroup returns from server reply"""
900 900 cgreplies = op.records.getreplies(cgpart.id)
901 901 assert len(cgreplies['changegroup']) == 1
902 902 pushop.cgresult = cgreplies['changegroup'][0]['return']
903 903 return handlereply
904 904
905 905 @b2partsgenerator('phase')
906 906 def _pushb2phases(pushop, bundler):
907 907 """handle phase push through bundle2"""
908 908 if 'phases' in pushop.stepsdone:
909 909 return
910 910 b2caps = bundle2.bundle2caps(pushop.remote)
911 911 ui = pushop.repo.ui
912 912
913 913 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
914 914 haspushkey = 'pushkey' in b2caps
915 915 hasphaseheads = 'heads' in b2caps.get('phases', ())
916 916
917 917 if hasphaseheads and not legacyphase:
918 918 return _pushb2phaseheads(pushop, bundler)
919 919 elif haspushkey:
920 920 return _pushb2phasespushkey(pushop, bundler)
921 921
922 922 def _pushb2phaseheads(pushop, bundler):
923 923 """push phase information through a bundle2 - binary part"""
924 924 pushop.stepsdone.add('phases')
925 925 if pushop.outdatedphases:
926 926 updates = [[] for p in phases.allphases]
927 927 updates[0].extend(h.node() for h in pushop.outdatedphases)
928 928 phasedata = phases.binaryencode(updates)
929 929 bundler.newpart('phase-heads', data=phasedata)
930 930
931 931 def _pushb2phasespushkey(pushop, bundler):
932 932 """push phase information through a bundle2 - pushkey part"""
933 933 pushop.stepsdone.add('phases')
934 934 part2node = []
935 935
936 936 def handlefailure(pushop, exc):
937 937 targetid = int(exc.partid)
938 938 for partid, node in part2node:
939 939 if partid == targetid:
940 940 raise error.Abort(_('updating %s to public failed') % node)
941 941
942 942 enc = pushkey.encode
943 943 for newremotehead in pushop.outdatedphases:
944 944 part = bundler.newpart('pushkey')
945 945 part.addparam('namespace', enc('phases'))
946 946 part.addparam('key', enc(newremotehead.hex()))
947 947 part.addparam('old', enc('%d' % phases.draft))
948 948 part.addparam('new', enc('%d' % phases.public))
949 949 part2node.append((part.id, newremotehead))
950 950 pushop.pkfailcb[part.id] = handlefailure
951 951
952 952 def handlereply(op):
953 953 for partid, node in part2node:
954 954 partrep = op.records.getreplies(partid)
955 955 results = partrep['pushkey']
956 956 assert len(results) <= 1
957 957 msg = None
958 958 if not results:
959 959 msg = _('server ignored update of %s to public!\n') % node
960 960 elif not int(results[0]['return']):
961 961 msg = _('updating %s to public failed!\n') % node
962 962 if msg is not None:
963 963 pushop.ui.warn(msg)
964 964 return handlereply
965 965
966 966 @b2partsgenerator('obsmarkers')
967 967 def _pushb2obsmarkers(pushop, bundler):
968 968 if 'obsmarkers' in pushop.stepsdone:
969 969 return
970 970 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
971 971 if obsolete.commonversion(remoteversions) is None:
972 972 return
973 973 pushop.stepsdone.add('obsmarkers')
974 974 if pushop.outobsmarkers:
975 975 markers = sorted(pushop.outobsmarkers)
976 976 bundle2.buildobsmarkerspart(bundler, markers)
977 977
978 978 @b2partsgenerator('bookmarks')
979 979 def _pushb2bookmarks(pushop, bundler):
980 980 """handle bookmark push through bundle2"""
981 981 if 'bookmarks' in pushop.stepsdone:
982 982 return
983 983 b2caps = bundle2.bundle2caps(pushop.remote)
984 984
985 985 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
986 986 legacybooks = 'bookmarks' in legacy
987 987
988 988 if not legacybooks and 'bookmarks' in b2caps:
989 989 return _pushb2bookmarkspart(pushop, bundler)
990 990 elif 'pushkey' in b2caps:
991 991 return _pushb2bookmarkspushkey(pushop, bundler)
992 992
993 993 def _bmaction(old, new):
994 994 """small utility for bookmark pushing"""
995 995 if not old:
996 996 return 'export'
997 997 elif not new:
998 998 return 'delete'
999 999 return 'update'
1000 1000
1001 1001 def _pushb2bookmarkspart(pushop, bundler):
1002 1002 pushop.stepsdone.add('bookmarks')
1003 1003 if not pushop.outbookmarks:
1004 1004 return
1005 1005
1006 1006 allactions = []
1007 1007 data = []
1008 1008 for book, old, new in pushop.outbookmarks:
1009 1009 new = bin(new)
1010 1010 data.append((book, new))
1011 1011 allactions.append((book, _bmaction(old, new)))
1012 1012 checkdata = bookmod.binaryencode(data)
1013 1013 bundler.newpart('bookmarks', data=checkdata)
1014 1014
1015 1015 def handlereply(op):
1016 1016 ui = pushop.ui
1017 1017 # if success
1018 1018 for book, action in allactions:
1019 1019 ui.status(bookmsgmap[action][0] % book)
1020 1020
1021 1021 return handlereply
1022 1022
1023 1023 def _pushb2bookmarkspushkey(pushop, bundler):
1024 1024 pushop.stepsdone.add('bookmarks')
1025 1025 part2book = []
1026 1026 enc = pushkey.encode
1027 1027
1028 1028 def handlefailure(pushop, exc):
1029 1029 targetid = int(exc.partid)
1030 1030 for partid, book, action in part2book:
1031 1031 if partid == targetid:
1032 1032 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1033 1033 # we should not be called for part we did not generated
1034 1034 assert False
1035 1035
1036 1036 for book, old, new in pushop.outbookmarks:
1037 1037 part = bundler.newpart('pushkey')
1038 1038 part.addparam('namespace', enc('bookmarks'))
1039 1039 part.addparam('key', enc(book))
1040 1040 part.addparam('old', enc(old))
1041 1041 part.addparam('new', enc(new))
1042 1042 action = 'update'
1043 1043 if not old:
1044 1044 action = 'export'
1045 1045 elif not new:
1046 1046 action = 'delete'
1047 1047 part2book.append((part.id, book, action))
1048 1048 pushop.pkfailcb[part.id] = handlefailure
1049 1049
1050 1050 def handlereply(op):
1051 1051 ui = pushop.ui
1052 1052 for partid, book, action in part2book:
1053 1053 partrep = op.records.getreplies(partid)
1054 1054 results = partrep['pushkey']
1055 1055 assert len(results) <= 1
1056 1056 if not results:
1057 1057 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
1058 1058 else:
1059 1059 ret = int(results[0]['return'])
1060 1060 if ret:
1061 1061 ui.status(bookmsgmap[action][0] % book)
1062 1062 else:
1063 1063 ui.warn(bookmsgmap[action][1] % book)
1064 1064 if pushop.bkresult is not None:
1065 1065 pushop.bkresult = 1
1066 1066 return handlereply
1067 1067
1068 1068 @b2partsgenerator('pushvars', idx=0)
1069 1069 def _getbundlesendvars(pushop, bundler):
1070 1070 '''send shellvars via bundle2'''
1071 1071 pushvars = pushop.pushvars
1072 1072 if pushvars:
1073 1073 shellvars = {}
1074 1074 for raw in pushvars:
1075 1075 if '=' not in raw:
1076 1076 msg = ("unable to parse variable '%s', should follow "
1077 1077 "'KEY=VALUE' or 'KEY=' format")
1078 1078 raise error.Abort(msg % raw)
1079 1079 k, v = raw.split('=', 1)
1080 1080 shellvars[k] = v
1081 1081
1082 1082 part = bundler.newpart('pushvars')
1083 1083
1084 1084 for key, value in shellvars.iteritems():
1085 1085 part.addparam(key, value, mandatory=False)
1086 1086
1087 1087 def _pushbundle2(pushop):
1088 1088 """push data to the remote using bundle2
1089 1089
1090 1090 The only currently supported type of data is changegroup but this will
1091 1091 evolve in the future."""
1092 1092 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1093 1093 pushback = (pushop.trmanager
1094 1094 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1095 1095
1096 1096 # create reply capability
1097 1097 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1098 1098 allowpushback=pushback,
1099 1099 role='client'))
1100 1100 bundler.newpart('replycaps', data=capsblob)
1101 1101 replyhandlers = []
1102 1102 for partgenname in b2partsgenorder:
1103 1103 partgen = b2partsgenmapping[partgenname]
1104 1104 ret = partgen(pushop, bundler)
1105 1105 if callable(ret):
1106 1106 replyhandlers.append(ret)
1107 1107 # do not push if nothing to push
1108 1108 if bundler.nbparts <= 1:
1109 1109 return
1110 1110 stream = util.chunkbuffer(bundler.getchunks())
1111 1111 try:
1112 1112 try:
1113 1113 with pushop.remote.commandexecutor() as e:
1114 1114 reply = e.callcommand('unbundle', {
1115 1115 'bundle': stream,
1116 1116 'heads': ['force'],
1117 1117 'url': pushop.remote.url(),
1118 1118 }).result()
1119 1119 except error.BundleValueError as exc:
1120 1120 raise error.Abort(_('missing support for %s') % exc)
1121 1121 try:
1122 1122 trgetter = None
1123 1123 if pushback:
1124 1124 trgetter = pushop.trmanager.transaction
1125 1125 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1126 1126 except error.BundleValueError as exc:
1127 1127 raise error.Abort(_('missing support for %s') % exc)
1128 1128 except bundle2.AbortFromPart as exc:
1129 1129 pushop.ui.status(_('remote: %s\n') % exc)
1130 1130 if exc.hint is not None:
1131 1131 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1132 1132 raise error.Abort(_('push failed on remote'))
1133 1133 except error.PushkeyFailed as exc:
1134 1134 partid = int(exc.partid)
1135 1135 if partid not in pushop.pkfailcb:
1136 1136 raise
1137 1137 pushop.pkfailcb[partid](pushop, exc)
1138 1138 for rephand in replyhandlers:
1139 1139 rephand(op)
1140 1140
1141 1141 def _pushchangeset(pushop):
1142 1142 """Make the actual push of changeset bundle to remote repo"""
1143 1143 if 'changesets' in pushop.stepsdone:
1144 1144 return
1145 1145 pushop.stepsdone.add('changesets')
1146 1146 if not _pushcheckoutgoing(pushop):
1147 1147 return
1148 1148
1149 1149 # Should have verified this in push().
1150 1150 assert pushop.remote.capable('unbundle')
1151 1151
1152 1152 pushop.repo.prepushoutgoinghooks(pushop)
1153 1153 outgoing = pushop.outgoing
1154 1154 # TODO: get bundlecaps from remote
1155 1155 bundlecaps = None
1156 1156 # create a changegroup from local
1157 1157 if pushop.revs is None and not (outgoing.excluded
1158 1158 or pushop.repo.changelog.filteredrevs):
1159 1159 # push everything,
1160 1160 # use the fast path, no race possible on push
1161 1161 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1162 1162 fastpath=True, bundlecaps=bundlecaps)
1163 1163 else:
1164 1164 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1165 1165 'push', bundlecaps=bundlecaps)
1166 1166
1167 1167 # apply changegroup to remote
1168 1168 # local repo finds heads on server, finds out what
1169 1169 # revs it must push. once revs transferred, if server
1170 1170 # finds it has different heads (someone else won
1171 1171 # commit/push race), server aborts.
1172 1172 if pushop.force:
1173 1173 remoteheads = ['force']
1174 1174 else:
1175 1175 remoteheads = pushop.remoteheads
1176 1176 # ssh: return remote's addchangegroup()
1177 1177 # http: return remote's addchangegroup() or 0 for error
1178 1178 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1179 1179 pushop.repo.url())
1180 1180
1181 1181 def _pushsyncphase(pushop):
1182 1182 """synchronise phase information locally and remotely"""
1183 1183 cheads = pushop.commonheads
1184 1184 # even when we don't push, exchanging phase data is useful
1185 1185 remotephases = listkeys(pushop.remote, 'phases')
1186 1186 if (pushop.ui.configbool('ui', '_usedassubrepo')
1187 1187 and remotephases # server supports phases
1188 1188 and pushop.cgresult is None # nothing was pushed
1189 1189 and remotephases.get('publishing', False)):
1190 1190 # When:
1191 1191 # - this is a subrepo push
1192 1192 # - and remote support phase
1193 1193 # - and no changeset was pushed
1194 1194 # - and remote is publishing
1195 1195 # We may be in issue 3871 case!
1196 1196 # We drop the possible phase synchronisation done by
1197 1197 # courtesy to publish changesets possibly locally draft
1198 1198 # on the remote.
1199 1199 remotephases = {'publishing': 'True'}
1200 1200 if not remotephases: # old server or public only reply from non-publishing
1201 1201 _localphasemove(pushop, cheads)
1202 1202 # don't push any phase data as there is nothing to push
1203 1203 else:
1204 1204 ana = phases.analyzeremotephases(pushop.repo, cheads,
1205 1205 remotephases)
1206 1206 pheads, droots = ana
1207 1207 ### Apply remote phase on local
1208 1208 if remotephases.get('publishing', False):
1209 1209 _localphasemove(pushop, cheads)
1210 1210 else: # publish = False
1211 1211 _localphasemove(pushop, pheads)
1212 1212 _localphasemove(pushop, cheads, phases.draft)
1213 1213 ### Apply local phase on remote
1214 1214
1215 1215 if pushop.cgresult:
1216 1216 if 'phases' in pushop.stepsdone:
1217 1217 # phases already pushed though bundle2
1218 1218 return
1219 1219 outdated = pushop.outdatedphases
1220 1220 else:
1221 1221 outdated = pushop.fallbackoutdatedphases
1222 1222
1223 1223 pushop.stepsdone.add('phases')
1224 1224
1225 1225 # filter heads already turned public by the push
1226 1226 outdated = [c for c in outdated if c.node() not in pheads]
1227 1227 # fallback to independent pushkey command
1228 1228 for newremotehead in outdated:
1229 1229 with pushop.remote.commandexecutor() as e:
1230 1230 r = e.callcommand('pushkey', {
1231 1231 'namespace': 'phases',
1232 1232 'key': newremotehead.hex(),
1233 1233 'old': '%d' % phases.draft,
1234 1234 'new': '%d' % phases.public
1235 1235 }).result()
1236 1236
1237 1237 if not r:
1238 1238 pushop.ui.warn(_('updating %s to public failed!\n')
1239 1239 % newremotehead)
1240 1240
1241 1241 def _localphasemove(pushop, nodes, phase=phases.public):
1242 1242 """move <nodes> to <phase> in the local source repo"""
1243 1243 if pushop.trmanager:
1244 1244 phases.advanceboundary(pushop.repo,
1245 1245 pushop.trmanager.transaction(),
1246 1246 phase,
1247 1247 nodes)
1248 1248 else:
1249 1249 # repo is not locked, do not change any phases!
1250 1250 # Informs the user that phases should have been moved when
1251 1251 # applicable.
1252 1252 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1253 1253 phasestr = phases.phasenames[phase]
1254 1254 if actualmoves:
1255 1255 pushop.ui.status(_('cannot lock source repo, skipping '
1256 1256 'local %s phase update\n') % phasestr)
1257 1257
1258 1258 def _pushobsolete(pushop):
1259 1259 """utility function to push obsolete markers to a remote"""
1260 1260 if 'obsmarkers' in pushop.stepsdone:
1261 1261 return
1262 1262 repo = pushop.repo
1263 1263 remote = pushop.remote
1264 1264 pushop.stepsdone.add('obsmarkers')
1265 1265 if pushop.outobsmarkers:
1266 1266 pushop.ui.debug('try to push obsolete markers to remote\n')
1267 1267 rslts = []
1268 1268 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1269 1269 for key in sorted(remotedata, reverse=True):
1270 1270 # reverse sort to ensure we end with dump0
1271 1271 data = remotedata[key]
1272 1272 rslts.append(remote.pushkey('obsolete', key, '', data))
1273 1273 if [r for r in rslts if not r]:
1274 1274 msg = _('failed to push some obsolete markers!\n')
1275 1275 repo.ui.warn(msg)
1276 1276
1277 1277 def _pushbookmark(pushop):
1278 1278 """Update bookmark position on remote"""
1279 1279 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1280 1280 return
1281 1281 pushop.stepsdone.add('bookmarks')
1282 1282 ui = pushop.ui
1283 1283 remote = pushop.remote
1284 1284
1285 1285 for b, old, new in pushop.outbookmarks:
1286 1286 action = 'update'
1287 1287 if not old:
1288 1288 action = 'export'
1289 1289 elif not new:
1290 1290 action = 'delete'
1291 1291
1292 1292 with remote.commandexecutor() as e:
1293 1293 r = e.callcommand('pushkey', {
1294 1294 'namespace': 'bookmarks',
1295 1295 'key': b,
1296 1296 'old': old,
1297 1297 'new': new,
1298 1298 }).result()
1299 1299
1300 1300 if r:
1301 1301 ui.status(bookmsgmap[action][0] % b)
1302 1302 else:
1303 1303 ui.warn(bookmsgmap[action][1] % b)
1304 1304 # discovery can have set the value form invalid entry
1305 1305 if pushop.bkresult is not None:
1306 1306 pushop.bkresult = 1
1307 1307
1308 1308 class pulloperation(object):
1309 1309 """A object that represent a single pull operation
1310 1310
1311 1311 It purpose is to carry pull related state and very common operation.
1312 1312
1313 1313 A new should be created at the beginning of each pull and discarded
1314 1314 afterward.
1315 1315 """
1316 1316
1317 1317 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1318 1318 remotebookmarks=None, streamclonerequested=None,
1319 1319 includepats=None, excludepats=None, depth=None):
1320 1320 # repo we pull into
1321 1321 self.repo = repo
1322 1322 # repo we pull from
1323 1323 self.remote = remote
1324 1324 # revision we try to pull (None is "all")
1325 1325 self.heads = heads
1326 1326 # bookmark pulled explicitly
1327 1327 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1328 1328 for bookmark in bookmarks]
1329 1329 # do we force pull?
1330 1330 self.force = force
1331 1331 # whether a streaming clone was requested
1332 1332 self.streamclonerequested = streamclonerequested
1333 1333 # transaction manager
1334 1334 self.trmanager = None
1335 1335 # set of common changeset between local and remote before pull
1336 1336 self.common = None
1337 1337 # set of pulled head
1338 1338 self.rheads = None
1339 1339 # list of missing changeset to fetch remotely
1340 1340 self.fetch = None
1341 1341 # remote bookmarks data
1342 1342 self.remotebookmarks = remotebookmarks
1343 1343 # result of changegroup pulling (used as return code by pull)
1344 1344 self.cgresult = None
1345 1345 # list of step already done
1346 1346 self.stepsdone = set()
1347 1347 # Whether we attempted a clone from pre-generated bundles.
1348 1348 self.clonebundleattempted = False
1349 1349 # Set of file patterns to include.
1350 1350 self.includepats = includepats
1351 1351 # Set of file patterns to exclude.
1352 1352 self.excludepats = excludepats
1353 1353 # Number of ancestor changesets to pull from each pulled head.
1354 1354 self.depth = depth
1355 1355
1356 1356 @util.propertycache
1357 1357 def pulledsubset(self):
1358 1358 """heads of the set of changeset target by the pull"""
1359 1359 # compute target subset
1360 1360 if self.heads is None:
1361 1361 # We pulled every thing possible
1362 1362 # sync on everything common
1363 1363 c = set(self.common)
1364 1364 ret = list(self.common)
1365 1365 for n in self.rheads:
1366 1366 if n not in c:
1367 1367 ret.append(n)
1368 1368 return ret
1369 1369 else:
1370 1370 # We pulled a specific subset
1371 1371 # sync on this subset
1372 1372 return self.heads
1373 1373
1374 1374 @util.propertycache
1375 1375 def canusebundle2(self):
1376 1376 return not _forcebundle1(self)
1377 1377
1378 1378 @util.propertycache
1379 1379 def remotebundle2caps(self):
1380 1380 return bundle2.bundle2caps(self.remote)
1381 1381
1382 1382 def gettransaction(self):
1383 1383 # deprecated; talk to trmanager directly
1384 1384 return self.trmanager.transaction()
1385 1385
1386 1386 class transactionmanager(util.transactional):
1387 1387 """An object to manage the life cycle of a transaction
1388 1388
1389 1389 It creates the transaction on demand and calls the appropriate hooks when
1390 1390 closing the transaction."""
1391 1391 def __init__(self, repo, source, url):
1392 1392 self.repo = repo
1393 1393 self.source = source
1394 1394 self.url = url
1395 1395 self._tr = None
1396 1396
1397 1397 def transaction(self):
1398 1398 """Return an open transaction object, constructing if necessary"""
1399 1399 if not self._tr:
1400 1400 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1401 1401 self._tr = self.repo.transaction(trname)
1402 1402 self._tr.hookargs['source'] = self.source
1403 1403 self._tr.hookargs['url'] = self.url
1404 1404 return self._tr
1405 1405
1406 1406 def close(self):
1407 1407 """close transaction if created"""
1408 1408 if self._tr is not None:
1409 1409 self._tr.close()
1410 1410
1411 1411 def release(self):
1412 1412 """release transaction if created"""
1413 1413 if self._tr is not None:
1414 1414 self._tr.release()
1415 1415
1416 1416 def listkeys(remote, namespace):
1417 1417 with remote.commandexecutor() as e:
1418 1418 return e.callcommand('listkeys', {'namespace': namespace}).result()
1419 1419
1420 1420 def _fullpullbundle2(repo, pullop):
1421 1421 # The server may send a partial reply, i.e. when inlining
1422 1422 # pre-computed bundles. In that case, update the common
1423 1423 # set based on the results and pull another bundle.
1424 1424 #
1425 1425 # There are two indicators that the process is finished:
1426 1426 # - no changeset has been added, or
1427 1427 # - all remote heads are known locally.
1428 1428 # The head check must use the unfiltered view as obsoletion
1429 1429 # markers can hide heads.
1430 1430 unfi = repo.unfiltered()
1431 1431 unficl = unfi.changelog
1432 1432 def headsofdiff(h1, h2):
1433 1433 """Returns heads(h1 % h2)"""
1434 1434 res = unfi.set('heads(%ln %% %ln)', h1, h2)
1435 1435 return set(ctx.node() for ctx in res)
1436 1436 def headsofunion(h1, h2):
1437 1437 """Returns heads((h1 + h2) - null)"""
1438 1438 res = unfi.set('heads((%ln + %ln - null))', h1, h2)
1439 1439 return set(ctx.node() for ctx in res)
1440 1440 while True:
1441 1441 old_heads = unficl.heads()
1442 1442 clstart = len(unficl)
1443 1443 _pullbundle2(pullop)
1444 1444 if repository.NARROW_REQUIREMENT in repo.requirements:
1445 1445 # XXX narrow clones filter the heads on the server side during
1446 1446 # XXX getbundle and result in partial replies as well.
1447 1447 # XXX Disable pull bundles in this case as band aid to avoid
1448 1448 # XXX extra round trips.
1449 1449 break
1450 1450 if clstart == len(unficl):
1451 1451 break
1452 1452 if all(unficl.hasnode(n) for n in pullop.rheads):
1453 1453 break
1454 1454 new_heads = headsofdiff(unficl.heads(), old_heads)
1455 1455 pullop.common = headsofunion(new_heads, pullop.common)
1456 1456 pullop.rheads = set(pullop.rheads) - pullop.common
1457 1457
1458 1458 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1459 1459 streamclonerequested=None, includepats=None, excludepats=None,
1460 1460 depth=None):
1461 1461 """Fetch repository data from a remote.
1462 1462
1463 1463 This is the main function used to retrieve data from a remote repository.
1464 1464
1465 1465 ``repo`` is the local repository to clone into.
1466 1466 ``remote`` is a peer instance.
1467 1467 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1468 1468 default) means to pull everything from the remote.
1469 1469 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1470 1470 default, all remote bookmarks are pulled.
1471 1471 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1472 1472 initialization.
1473 1473 ``streamclonerequested`` is a boolean indicating whether a "streaming
1474 1474 clone" is requested. A "streaming clone" is essentially a raw file copy
1475 1475 of revlogs from the server. This only works when the local repository is
1476 1476 empty. The default value of ``None`` means to respect the server
1477 1477 configuration for preferring stream clones.
1478 1478 ``includepats`` and ``excludepats`` define explicit file patterns to
1479 1479 include and exclude in storage, respectively. If not defined, narrow
1480 1480 patterns from the repo instance are used, if available.
1481 1481 ``depth`` is an integer indicating the DAG depth of history we're
1482 1482 interested in. If defined, for each revision specified in ``heads``, we
1483 1483 will fetch up to this many of its ancestors and data associated with them.
1484 1484
1485 1485 Returns the ``pulloperation`` created for this pull.
1486 1486 """
1487 1487 if opargs is None:
1488 1488 opargs = {}
1489 1489
1490 1490 # We allow the narrow patterns to be passed in explicitly to provide more
1491 1491 # flexibility for API consumers.
1492 1492 if includepats or excludepats:
1493 1493 includepats = includepats or set()
1494 1494 excludepats = excludepats or set()
1495 1495 else:
1496 1496 includepats, excludepats = repo.narrowpats
1497 1497
1498 1498 narrowspec.validatepatterns(includepats)
1499 1499 narrowspec.validatepatterns(excludepats)
1500 1500
1501 1501 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1502 1502 streamclonerequested=streamclonerequested,
1503 1503 includepats=includepats, excludepats=excludepats,
1504 1504 depth=depth,
1505 1505 **pycompat.strkwargs(opargs))
1506 1506
1507 1507 peerlocal = pullop.remote.local()
1508 1508 if peerlocal:
1509 1509 missing = set(peerlocal.requirements) - pullop.repo.supported
1510 1510 if missing:
1511 1511 msg = _("required features are not"
1512 1512 " supported in the destination:"
1513 1513 " %s") % (', '.join(sorted(missing)))
1514 1514 raise error.Abort(msg)
1515 1515
1516 1516 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1517 1517 with repo.wlock(), repo.lock(), pullop.trmanager:
1518 1518 # Use the modern wire protocol, if available.
1519 1519 if remote.capable('command-changesetdata'):
1520 1520 exchangev2.pull(pullop)
1521 1521 else:
1522 1522 # This should ideally be in _pullbundle2(). However, it needs to run
1523 1523 # before discovery to avoid extra work.
1524 1524 _maybeapplyclonebundle(pullop)
1525 1525 streamclone.maybeperformlegacystreamclone(pullop)
1526 1526 _pulldiscovery(pullop)
1527 1527 if pullop.canusebundle2:
1528 1528 _fullpullbundle2(repo, pullop)
1529 1529 _pullchangeset(pullop)
1530 1530 _pullphase(pullop)
1531 1531 _pullbookmarks(pullop)
1532 1532 _pullobsolete(pullop)
1533 1533
1534 1534 # storing remotenames
1535 1535 if repo.ui.configbool('experimental', 'remotenames'):
1536 1536 logexchange.pullremotenames(repo, remote)
1537 1537
1538 1538 return pullop
1539 1539
1540 1540 # list of steps to perform discovery before pull
1541 1541 pulldiscoveryorder = []
1542 1542
1543 1543 # Mapping between step name and function
1544 1544 #
1545 1545 # This exists to help extensions wrap steps if necessary
1546 1546 pulldiscoverymapping = {}
1547 1547
1548 1548 def pulldiscovery(stepname):
1549 1549 """decorator for function performing discovery before pull
1550 1550
1551 1551 The function is added to the step -> function mapping and appended to the
1552 1552 list of steps. Beware that decorated function will be added in order (this
1553 1553 may matter).
1554 1554
1555 1555 You can only use this decorator for a new step, if you want to wrap a step
1556 1556 from an extension, change the pulldiscovery dictionary directly."""
1557 1557 def dec(func):
1558 1558 assert stepname not in pulldiscoverymapping
1559 1559 pulldiscoverymapping[stepname] = func
1560 1560 pulldiscoveryorder.append(stepname)
1561 1561 return func
1562 1562 return dec
1563 1563
1564 1564 def _pulldiscovery(pullop):
1565 1565 """Run all discovery steps"""
1566 1566 for stepname in pulldiscoveryorder:
1567 1567 step = pulldiscoverymapping[stepname]
1568 1568 step(pullop)
1569 1569
1570 1570 @pulldiscovery('b1:bookmarks')
1571 1571 def _pullbookmarkbundle1(pullop):
1572 1572 """fetch bookmark data in bundle1 case
1573 1573
1574 1574 If not using bundle2, we have to fetch bookmarks before changeset
1575 1575 discovery to reduce the chance and impact of race conditions."""
1576 1576 if pullop.remotebookmarks is not None:
1577 1577 return
1578 1578 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1579 1579 # all known bundle2 servers now support listkeys, but lets be nice with
1580 1580 # new implementation.
1581 1581 return
1582 1582 books = listkeys(pullop.remote, 'bookmarks')
1583 1583 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1584 1584
1585 1585
1586 1586 @pulldiscovery('changegroup')
1587 1587 def _pulldiscoverychangegroup(pullop):
1588 1588 """discovery phase for the pull
1589 1589
1590 1590 Current handle changeset discovery only, will change handle all discovery
1591 1591 at some point."""
1592 1592 tmp = discovery.findcommonincoming(pullop.repo,
1593 1593 pullop.remote,
1594 1594 heads=pullop.heads,
1595 1595 force=pullop.force)
1596 1596 common, fetch, rheads = tmp
1597 1597 nm = pullop.repo.unfiltered().changelog.nodemap
1598 1598 if fetch and rheads:
1599 1599 # If a remote heads is filtered locally, put in back in common.
1600 1600 #
1601 1601 # This is a hackish solution to catch most of "common but locally
1602 1602 # hidden situation". We do not performs discovery on unfiltered
1603 1603 # repository because it end up doing a pathological amount of round
1604 1604 # trip for w huge amount of changeset we do not care about.
1605 1605 #
1606 1606 # If a set of such "common but filtered" changeset exist on the server
1607 1607 # but are not including a remote heads, we'll not be able to detect it,
1608 1608 scommon = set(common)
1609 1609 for n in rheads:
1610 1610 if n in nm:
1611 1611 if n not in scommon:
1612 1612 common.append(n)
1613 1613 if set(rheads).issubset(set(common)):
1614 1614 fetch = []
1615 1615 pullop.common = common
1616 1616 pullop.fetch = fetch
1617 1617 pullop.rheads = rheads
1618 1618
1619 1619 def _pullbundle2(pullop):
1620 1620 """pull data using bundle2
1621 1621
1622 1622 For now, the only supported data are changegroup."""
1623 1623 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1624 1624
1625 1625 # make ui easier to access
1626 1626 ui = pullop.repo.ui
1627 1627
1628 1628 # At the moment we don't do stream clones over bundle2. If that is
1629 1629 # implemented then here's where the check for that will go.
1630 1630 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1631 1631
1632 1632 # declare pull perimeters
1633 1633 kwargs['common'] = pullop.common
1634 1634 kwargs['heads'] = pullop.heads or pullop.rheads
1635 1635
1636 1636 if streaming:
1637 1637 kwargs['cg'] = False
1638 1638 kwargs['stream'] = True
1639 1639 pullop.stepsdone.add('changegroup')
1640 1640 pullop.stepsdone.add('phases')
1641 1641
1642 1642 else:
1643 1643 # pulling changegroup
1644 1644 pullop.stepsdone.add('changegroup')
1645 1645
1646 1646 kwargs['cg'] = pullop.fetch
1647 1647
1648 1648 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1649 1649 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1650 1650 if (not legacyphase and hasbinaryphase):
1651 1651 kwargs['phases'] = True
1652 1652 pullop.stepsdone.add('phases')
1653 1653
1654 1654 if 'listkeys' in pullop.remotebundle2caps:
1655 1655 if 'phases' not in pullop.stepsdone:
1656 1656 kwargs['listkeys'] = ['phases']
1657 1657
1658 1658 bookmarksrequested = False
1659 1659 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1660 1660 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1661 1661
1662 1662 if pullop.remotebookmarks is not None:
1663 1663 pullop.stepsdone.add('request-bookmarks')
1664 1664
1665 1665 if ('request-bookmarks' not in pullop.stepsdone
1666 1666 and pullop.remotebookmarks is None
1667 1667 and not legacybookmark and hasbinarybook):
1668 1668 kwargs['bookmarks'] = True
1669 1669 bookmarksrequested = True
1670 1670
1671 1671 if 'listkeys' in pullop.remotebundle2caps:
1672 1672 if 'request-bookmarks' not in pullop.stepsdone:
1673 1673 # make sure to always includes bookmark data when migrating
1674 1674 # `hg incoming --bundle` to using this function.
1675 1675 pullop.stepsdone.add('request-bookmarks')
1676 1676 kwargs.setdefault('listkeys', []).append('bookmarks')
1677 1677
1678 1678 # If this is a full pull / clone and the server supports the clone bundles
1679 1679 # feature, tell the server whether we attempted a clone bundle. The
1680 1680 # presence of this flag indicates the client supports clone bundles. This
1681 1681 # will enable the server to treat clients that support clone bundles
1682 1682 # differently from those that don't.
1683 1683 if (pullop.remote.capable('clonebundles')
1684 1684 and pullop.heads is None and list(pullop.common) == [nullid]):
1685 1685 kwargs['cbattempted'] = pullop.clonebundleattempted
1686 1686
1687 1687 if streaming:
1688 1688 pullop.repo.ui.status(_('streaming all changes\n'))
1689 1689 elif not pullop.fetch:
1690 1690 pullop.repo.ui.status(_("no changes found\n"))
1691 1691 pullop.cgresult = 0
1692 1692 else:
1693 1693 if pullop.heads is None and list(pullop.common) == [nullid]:
1694 1694 pullop.repo.ui.status(_("requesting all changes\n"))
1695 1695 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1696 1696 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1697 1697 if obsolete.commonversion(remoteversions) is not None:
1698 1698 kwargs['obsmarkers'] = True
1699 1699 pullop.stepsdone.add('obsmarkers')
1700 1700 _pullbundle2extraprepare(pullop, kwargs)
1701 1701
1702 1702 with pullop.remote.commandexecutor() as e:
1703 1703 args = dict(kwargs)
1704 1704 args['source'] = 'pull'
1705 1705 bundle = e.callcommand('getbundle', args).result()
1706 1706
1707 1707 try:
1708 1708 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
1709 1709 source='pull')
1710 1710 op.modes['bookmarks'] = 'records'
1711 1711 bundle2.processbundle(pullop.repo, bundle, op=op)
1712 1712 except bundle2.AbortFromPart as exc:
1713 1713 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1714 1714 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1715 1715 except error.BundleValueError as exc:
1716 1716 raise error.Abort(_('missing support for %s') % exc)
1717 1717
1718 1718 if pullop.fetch:
1719 1719 pullop.cgresult = bundle2.combinechangegroupresults(op)
1720 1720
1721 1721 # processing phases change
1722 1722 for namespace, value in op.records['listkeys']:
1723 1723 if namespace == 'phases':
1724 1724 _pullapplyphases(pullop, value)
1725 1725
1726 1726 # processing bookmark update
1727 1727 if bookmarksrequested:
1728 1728 books = {}
1729 1729 for record in op.records['bookmarks']:
1730 1730 books[record['bookmark']] = record["node"]
1731 1731 pullop.remotebookmarks = books
1732 1732 else:
1733 1733 for namespace, value in op.records['listkeys']:
1734 1734 if namespace == 'bookmarks':
1735 1735 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1736 1736
1737 1737 # bookmark data were either already there or pulled in the bundle
1738 1738 if pullop.remotebookmarks is not None:
1739 1739 _pullbookmarks(pullop)
1740 1740
1741 1741 def _pullbundle2extraprepare(pullop, kwargs):
1742 1742 """hook function so that extensions can extend the getbundle call"""
1743 1743
1744 1744 def _pullchangeset(pullop):
1745 1745 """pull changeset from unbundle into the local repo"""
1746 1746 # We delay the open of the transaction as late as possible so we
1747 1747 # don't open transaction for nothing or you break future useful
1748 1748 # rollback call
1749 1749 if 'changegroup' in pullop.stepsdone:
1750 1750 return
1751 1751 pullop.stepsdone.add('changegroup')
1752 1752 if not pullop.fetch:
1753 1753 pullop.repo.ui.status(_("no changes found\n"))
1754 1754 pullop.cgresult = 0
1755 1755 return
1756 1756 tr = pullop.gettransaction()
1757 1757 if pullop.heads is None and list(pullop.common) == [nullid]:
1758 1758 pullop.repo.ui.status(_("requesting all changes\n"))
1759 1759 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1760 1760 # issue1320, avoid a race if remote changed after discovery
1761 1761 pullop.heads = pullop.rheads
1762 1762
1763 1763 if pullop.remote.capable('getbundle'):
1764 1764 # TODO: get bundlecaps from remote
1765 1765 cg = pullop.remote.getbundle('pull', common=pullop.common,
1766 1766 heads=pullop.heads or pullop.rheads)
1767 1767 elif pullop.heads is None:
1768 1768 with pullop.remote.commandexecutor() as e:
1769 1769 cg = e.callcommand('changegroup', {
1770 1770 'nodes': pullop.fetch,
1771 1771 'source': 'pull',
1772 1772 }).result()
1773 1773
1774 1774 elif not pullop.remote.capable('changegroupsubset'):
1775 1775 raise error.Abort(_("partial pull cannot be done because "
1776 1776 "other repository doesn't support "
1777 1777 "changegroupsubset."))
1778 1778 else:
1779 1779 with pullop.remote.commandexecutor() as e:
1780 1780 cg = e.callcommand('changegroupsubset', {
1781 1781 'bases': pullop.fetch,
1782 1782 'heads': pullop.heads,
1783 1783 'source': 'pull',
1784 1784 }).result()
1785 1785
1786 1786 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1787 1787 pullop.remote.url())
1788 1788 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1789 1789
1790 1790 def _pullphase(pullop):
1791 1791 # Get remote phases data from remote
1792 1792 if 'phases' in pullop.stepsdone:
1793 1793 return
1794 1794 remotephases = listkeys(pullop.remote, 'phases')
1795 1795 _pullapplyphases(pullop, remotephases)
1796 1796
1797 1797 def _pullapplyphases(pullop, remotephases):
1798 1798 """apply phase movement from observed remote state"""
1799 1799 if 'phases' in pullop.stepsdone:
1800 1800 return
1801 1801 pullop.stepsdone.add('phases')
1802 1802 publishing = bool(remotephases.get('publishing', False))
1803 1803 if remotephases and not publishing:
1804 1804 # remote is new and non-publishing
1805 1805 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1806 1806 pullop.pulledsubset,
1807 1807 remotephases)
1808 1808 dheads = pullop.pulledsubset
1809 1809 else:
1810 1810 # Remote is old or publishing all common changesets
1811 1811 # should be seen as public
1812 1812 pheads = pullop.pulledsubset
1813 1813 dheads = []
1814 1814 unfi = pullop.repo.unfiltered()
1815 1815 phase = unfi._phasecache.phase
1816 1816 rev = unfi.changelog.nodemap.get
1817 1817 public = phases.public
1818 1818 draft = phases.draft
1819 1819
1820 1820 # exclude changesets already public locally and update the others
1821 1821 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1822 1822 if pheads:
1823 1823 tr = pullop.gettransaction()
1824 1824 phases.advanceboundary(pullop.repo, tr, public, pheads)
1825 1825
1826 1826 # exclude changesets already draft locally and update the others
1827 1827 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1828 1828 if dheads:
1829 1829 tr = pullop.gettransaction()
1830 1830 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1831 1831
1832 1832 def _pullbookmarks(pullop):
1833 1833 """process the remote bookmark information to update the local one"""
1834 1834 if 'bookmarks' in pullop.stepsdone:
1835 1835 return
1836 1836 pullop.stepsdone.add('bookmarks')
1837 1837 repo = pullop.repo
1838 1838 remotebookmarks = pullop.remotebookmarks
1839 1839 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1840 1840 pullop.remote.url(),
1841 1841 pullop.gettransaction,
1842 1842 explicit=pullop.explicitbookmarks)
1843 1843
1844 1844 def _pullobsolete(pullop):
1845 1845 """utility function to pull obsolete markers from a remote
1846 1846
1847 1847 The `gettransaction` is function that return the pull transaction, creating
1848 1848 one if necessary. We return the transaction to inform the calling code that
1849 1849 a new transaction have been created (when applicable).
1850 1850
1851 1851 Exists mostly to allow overriding for experimentation purpose"""
1852 1852 if 'obsmarkers' in pullop.stepsdone:
1853 1853 return
1854 1854 pullop.stepsdone.add('obsmarkers')
1855 1855 tr = None
1856 1856 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1857 1857 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1858 1858 remoteobs = listkeys(pullop.remote, 'obsolete')
1859 1859 if 'dump0' in remoteobs:
1860 1860 tr = pullop.gettransaction()
1861 1861 markers = []
1862 1862 for key in sorted(remoteobs, reverse=True):
1863 1863 if key.startswith('dump'):
1864 1864 data = util.b85decode(remoteobs[key])
1865 1865 version, newmarks = obsolete._readmarkers(data)
1866 1866 markers += newmarks
1867 1867 if markers:
1868 1868 pullop.repo.obsstore.add(tr, markers)
1869 1869 pullop.repo.invalidatevolatilesets()
1870 1870 return tr
1871 1871
1872 1872 def applynarrowacl(repo, kwargs):
1873 1873 """Apply narrow fetch access control.
1874 1874
1875 1875 This massages the named arguments for getbundle wire protocol commands
1876 1876 so requested data is filtered through access control rules.
1877 1877 """
1878 1878 ui = repo.ui
1879 1879 # TODO this assumes existence of HTTP and is a layering violation.
1880 1880 username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
1881 1881 user_includes = ui.configlist(
1882 1882 _NARROWACL_SECTION, username + '.includes',
1883 1883 ui.configlist(_NARROWACL_SECTION, 'default.includes'))
1884 1884 user_excludes = ui.configlist(
1885 1885 _NARROWACL_SECTION, username + '.excludes',
1886 1886 ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
1887 1887 if not user_includes:
1888 1888 raise error.Abort(_("{} configuration for user {} is empty")
1889 1889 .format(_NARROWACL_SECTION, username))
1890 1890
1891 1891 user_includes = [
1892 1892 'path:.' if p == '*' else 'path:' + p for p in user_includes]
1893 1893 user_excludes = [
1894 1894 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
1895 1895
1896 1896 req_includes = set(kwargs.get(r'includepats', []))
1897 1897 req_excludes = set(kwargs.get(r'excludepats', []))
1898 1898
1899 1899 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
1900 1900 req_includes, req_excludes, user_includes, user_excludes)
1901 1901
1902 1902 if invalid_includes:
1903 1903 raise error.Abort(
1904 1904 _("The following includes are not accessible for {}: {}")
1905 1905 .format(username, invalid_includes))
1906 1906
1907 1907 new_args = {}
1908 1908 new_args.update(kwargs)
1909 1909 new_args[r'narrow'] = True
1910 1910 new_args[r'narrow_acl'] = True
1911 1911 new_args[r'includepats'] = req_includes
1912 1912 if req_excludes:
1913 1913 new_args[r'excludepats'] = req_excludes
1914 1914
1915 1915 return new_args
1916 1916
1917 1917 def _computeellipsis(repo, common, heads, known, match, depth=None):
1918 1918 """Compute the shape of a narrowed DAG.
1919 1919
1920 1920 Args:
1921 1921 repo: The repository we're transferring.
1922 1922 common: The roots of the DAG range we're transferring.
1923 1923 May be just [nullid], which means all ancestors of heads.
1924 1924 heads: The heads of the DAG range we're transferring.
1925 1925 match: The narrowmatcher that allows us to identify relevant changes.
1926 1926 depth: If not None, only consider nodes to be full nodes if they are at
1927 1927 most depth changesets away from one of heads.
1928 1928
1929 1929 Returns:
1930 1930 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
1931 1931
1932 1932 visitnodes: The list of nodes (either full or ellipsis) which
1933 1933 need to be sent to the client.
1934 1934 relevant_nodes: The set of changelog nodes which change a file inside
1935 1935 the narrowspec. The client needs these as non-ellipsis nodes.
1936 1936 ellipsisroots: A dict of {rev: parents} that is used in
1937 1937 narrowchangegroup to produce ellipsis nodes with the
1938 1938 correct parents.
1939 1939 """
1940 1940 cl = repo.changelog
1941 1941 mfl = repo.manifestlog
1942 1942
1943 1943 clrev = cl.rev
1944 1944
1945 1945 commonrevs = {clrev(n) for n in common} | {nullrev}
1946 1946 headsrevs = {clrev(n) for n in heads}
1947 1947
1948 1948 if depth:
1949 1949 revdepth = {h: 0 for h in headsrevs}
1950 1950
1951 1951 ellipsisheads = collections.defaultdict(set)
1952 1952 ellipsisroots = collections.defaultdict(set)
1953 1953
1954 1954 def addroot(head, curchange):
1955 1955 """Add a root to an ellipsis head, splitting heads with 3 roots."""
1956 1956 ellipsisroots[head].add(curchange)
1957 1957 # Recursively split ellipsis heads with 3 roots by finding the
1958 1958 # roots' youngest common descendant which is an elided merge commit.
1959 1959 # That descendant takes 2 of the 3 roots as its own, and becomes a
1960 1960 # root of the head.
1961 1961 while len(ellipsisroots[head]) > 2:
1962 1962 child, roots = splithead(head)
1963 1963 splitroots(head, child, roots)
1964 1964 head = child # Recurse in case we just added a 3rd root
1965 1965
1966 1966 def splitroots(head, child, roots):
1967 1967 ellipsisroots[head].difference_update(roots)
1968 1968 ellipsisroots[head].add(child)
1969 1969 ellipsisroots[child].update(roots)
1970 1970 ellipsisroots[child].discard(child)
1971 1971
1972 1972 def splithead(head):
1973 1973 r1, r2, r3 = sorted(ellipsisroots[head])
1974 1974 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
1975 1975 mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
1976 1976 nr1, head, nr2, head)
1977 1977 for j in mid:
1978 1978 if j == nr2:
1979 1979 return nr2, (nr1, nr2)
1980 1980 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
1981 1981 return j, (nr1, nr2)
1982 1982 raise error.Abort(_('Failed to split up ellipsis node! head: %d, '
1983 1983 'roots: %d %d %d') % (head, r1, r2, r3))
1984 1984
1985 1985 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
1986 1986 visit = reversed(missing)
1987 1987 relevant_nodes = set()
1988 1988 visitnodes = [cl.node(m) for m in missing]
1989 1989 required = set(headsrevs) | known
1990 1990 for rev in visit:
1991 1991 clrev = cl.changelogrevision(rev)
1992 1992 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
1993 1993 if depth is not None:
1994 1994 curdepth = revdepth[rev]
1995 1995 for p in ps:
1996 1996 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
1997 1997 needed = False
1998 1998 shallow_enough = depth is None or revdepth[rev] <= depth
1999 1999 if shallow_enough:
2000 2000 curmf = mfl[clrev.manifest].read()
2001 2001 if ps:
2002 2002 # We choose to not trust the changed files list in
2003 2003 # changesets because it's not always correct. TODO: could
2004 2004 # we trust it for the non-merge case?
2005 2005 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2006 2006 needed = bool(curmf.diff(p1mf, match))
2007 2007 if not needed and len(ps) > 1:
2008 2008 # For merge changes, the list of changed files is not
2009 2009 # helpful, since we need to emit the merge if a file
2010 2010 # in the narrow spec has changed on either side of the
2011 2011 # merge. As a result, we do a manifest diff to check.
2012 2012 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2013 2013 needed = bool(curmf.diff(p2mf, match))
2014 2014 else:
2015 2015 # For a root node, we need to include the node if any
2016 2016 # files in the node match the narrowspec.
2017 2017 needed = any(curmf.walk(match))
2018 2018
2019 2019 if needed:
2020 2020 for head in ellipsisheads[rev]:
2021 2021 addroot(head, rev)
2022 2022 for p in ps:
2023 2023 required.add(p)
2024 2024 relevant_nodes.add(cl.node(rev))
2025 2025 else:
2026 2026 if not ps:
2027 2027 ps = [nullrev]
2028 2028 if rev in required:
2029 2029 for head in ellipsisheads[rev]:
2030 2030 addroot(head, rev)
2031 2031 for p in ps:
2032 2032 ellipsisheads[p].add(rev)
2033 2033 else:
2034 2034 for p in ps:
2035 2035 ellipsisheads[p] |= ellipsisheads[rev]
2036 2036
2037 2037 # add common changesets as roots of their reachable ellipsis heads
2038 2038 for c in commonrevs:
2039 2039 for head in ellipsisheads[c]:
2040 2040 addroot(head, c)
2041 2041 return visitnodes, relevant_nodes, ellipsisroots
2042 2042
2043 2043 def caps20to10(repo, role):
2044 2044 """return a set with appropriate options to use bundle20 during getbundle"""
2045 2045 caps = {'HG20'}
2046 2046 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2047 2047 caps.add('bundle2=' + urlreq.quote(capsblob))
2048 2048 return caps
2049 2049
2050 2050 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2051 2051 getbundle2partsorder = []
2052 2052
2053 2053 # Mapping between step name and function
2054 2054 #
2055 2055 # This exists to help extensions wrap steps if necessary
2056 2056 getbundle2partsmapping = {}
2057 2057
2058 2058 def getbundle2partsgenerator(stepname, idx=None):
2059 2059 """decorator for function generating bundle2 part for getbundle
2060 2060
2061 2061 The function is added to the step -> function mapping and appended to the
2062 2062 list of steps. Beware that decorated functions will be added in order
2063 2063 (this may matter).
2064 2064
2065 2065 You can only use this decorator for new steps, if you want to wrap a step
2066 2066 from an extension, attack the getbundle2partsmapping dictionary directly."""
2067 2067 def dec(func):
2068 2068 assert stepname not in getbundle2partsmapping
2069 2069 getbundle2partsmapping[stepname] = func
2070 2070 if idx is None:
2071 2071 getbundle2partsorder.append(stepname)
2072 2072 else:
2073 2073 getbundle2partsorder.insert(idx, stepname)
2074 2074 return func
2075 2075 return dec
2076 2076
2077 2077 def bundle2requested(bundlecaps):
2078 2078 if bundlecaps is not None:
2079 2079 return any(cap.startswith('HG2') for cap in bundlecaps)
2080 2080 return False
2081 2081
2082 2082 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
2083 2083 **kwargs):
2084 2084 """Return chunks constituting a bundle's raw data.
2085 2085
2086 2086 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2087 2087 passed.
2088 2088
2089 2089 Returns a 2-tuple of a dict with metadata about the generated bundle
2090 2090 and an iterator over raw chunks (of varying sizes).
2091 2091 """
2092 2092 kwargs = pycompat.byteskwargs(kwargs)
2093 2093 info = {}
2094 2094 usebundle2 = bundle2requested(bundlecaps)
2095 2095 # bundle10 case
2096 2096 if not usebundle2:
2097 2097 if bundlecaps and not kwargs.get('cg', True):
2098 2098 raise ValueError(_('request for bundle10 must include changegroup'))
2099 2099
2100 2100 if kwargs:
2101 2101 raise ValueError(_('unsupported getbundle arguments: %s')
2102 2102 % ', '.join(sorted(kwargs.keys())))
2103 2103 outgoing = _computeoutgoing(repo, heads, common)
2104 2104 info['bundleversion'] = 1
2105 2105 return info, changegroup.makestream(repo, outgoing, '01', source,
2106 2106 bundlecaps=bundlecaps)
2107 2107
2108 2108 # bundle20 case
2109 2109 info['bundleversion'] = 2
2110 2110 b2caps = {}
2111 2111 for bcaps in bundlecaps:
2112 2112 if bcaps.startswith('bundle2='):
2113 2113 blob = urlreq.unquote(bcaps[len('bundle2='):])
2114 2114 b2caps.update(bundle2.decodecaps(blob))
2115 2115 bundler = bundle2.bundle20(repo.ui, b2caps)
2116 2116
2117 2117 kwargs['heads'] = heads
2118 2118 kwargs['common'] = common
2119 2119
2120 2120 for name in getbundle2partsorder:
2121 2121 func = getbundle2partsmapping[name]
2122 2122 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
2123 2123 **pycompat.strkwargs(kwargs))
2124 2124
2125 2125 info['prefercompressed'] = bundler.prefercompressed
2126 2126
2127 2127 return info, bundler.getchunks()
2128 2128
2129 2129 @getbundle2partsgenerator('stream2')
2130 2130 def _getbundlestream2(bundler, repo, *args, **kwargs):
2131 2131 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2132 2132
2133 2133 @getbundle2partsgenerator('changegroup')
2134 2134 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
2135 2135 b2caps=None, heads=None, common=None, **kwargs):
2136 2136 """add a changegroup part to the requested bundle"""
2137 2137 if not kwargs.get(r'cg', True):
2138 2138 return
2139 2139
2140 2140 version = '01'
2141 2141 cgversions = b2caps.get('changegroup')
2142 2142 if cgversions: # 3.1 and 3.2 ship with an empty value
2143 2143 cgversions = [v for v in cgversions
2144 2144 if v in changegroup.supportedoutgoingversions(repo)]
2145 2145 if not cgversions:
2146 2146 raise ValueError(_('no common changegroup version'))
2147 2147 version = max(cgversions)
2148 2148
2149 2149 outgoing = _computeoutgoing(repo, heads, common)
2150 2150 if not outgoing.missing:
2151 2151 return
2152 2152
2153 2153 if kwargs.get(r'narrow', False):
2154 2154 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
2155 2155 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
2156 2156 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2157 2157 else:
2158 2158 matcher = None
2159 2159
2160 2160 cgstream = changegroup.makestream(repo, outgoing, version, source,
2161 2161 bundlecaps=bundlecaps, matcher=matcher)
2162 2162
2163 2163 part = bundler.newpart('changegroup', data=cgstream)
2164 2164 if cgversions:
2165 2165 part.addparam('version', version)
2166 2166
2167 2167 part.addparam('nbchanges', '%d' % len(outgoing.missing),
2168 2168 mandatory=False)
2169 2169
2170 2170 if 'treemanifest' in repo.requirements:
2171 2171 part.addparam('treemanifest', '1')
2172 2172
2173 if (kwargs.get(r'narrow', False) and kwargs.get('narrow_acl', False)
2173 if (kwargs.get(r'narrow', False) and kwargs.get(r'narrow_acl', False)
2174 2174 and (include or exclude)):
2175 2175 narrowspecpart = bundler.newpart('narrow:spec')
2176 2176 if include:
2177 2177 narrowspecpart.addparam(
2178 2178 'include', '\n'.join(include), mandatory=True)
2179 2179 if exclude:
2180 2180 narrowspecpart.addparam(
2181 2181 'exclude', '\n'.join(exclude), mandatory=True)
2182 2182
2183 2183 @getbundle2partsgenerator('bookmarks')
2184 2184 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
2185 2185 b2caps=None, **kwargs):
2186 2186 """add a bookmark part to the requested bundle"""
2187 2187 if not kwargs.get(r'bookmarks', False):
2188 2188 return
2189 2189 if 'bookmarks' not in b2caps:
2190 2190 raise ValueError(_('no common bookmarks exchange method'))
2191 2191 books = bookmod.listbinbookmarks(repo)
2192 2192 data = bookmod.binaryencode(books)
2193 2193 if data:
2194 2194 bundler.newpart('bookmarks', data=data)
2195 2195
2196 2196 @getbundle2partsgenerator('listkeys')
2197 2197 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
2198 2198 b2caps=None, **kwargs):
2199 2199 """add parts containing listkeys namespaces to the requested bundle"""
2200 2200 listkeys = kwargs.get(r'listkeys', ())
2201 2201 for namespace in listkeys:
2202 2202 part = bundler.newpart('listkeys')
2203 2203 part.addparam('namespace', namespace)
2204 2204 keys = repo.listkeys(namespace).items()
2205 2205 part.data = pushkey.encodekeys(keys)
2206 2206
2207 2207 @getbundle2partsgenerator('obsmarkers')
2208 2208 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
2209 2209 b2caps=None, heads=None, **kwargs):
2210 2210 """add an obsolescence markers part to the requested bundle"""
2211 2211 if kwargs.get(r'obsmarkers', False):
2212 2212 if heads is None:
2213 2213 heads = repo.heads()
2214 2214 subset = [c.node() for c in repo.set('::%ln', heads)]
2215 2215 markers = repo.obsstore.relevantmarkers(subset)
2216 2216 markers = sorted(markers)
2217 2217 bundle2.buildobsmarkerspart(bundler, markers)
2218 2218
2219 2219 @getbundle2partsgenerator('phases')
2220 2220 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
2221 2221 b2caps=None, heads=None, **kwargs):
2222 2222 """add phase heads part to the requested bundle"""
2223 2223 if kwargs.get(r'phases', False):
2224 2224 if not 'heads' in b2caps.get('phases'):
2225 2225 raise ValueError(_('no common phases exchange method'))
2226 2226 if heads is None:
2227 2227 heads = repo.heads()
2228 2228
2229 2229 headsbyphase = collections.defaultdict(set)
2230 2230 if repo.publishing():
2231 2231 headsbyphase[phases.public] = heads
2232 2232 else:
2233 2233 # find the appropriate heads to move
2234 2234
2235 2235 phase = repo._phasecache.phase
2236 2236 node = repo.changelog.node
2237 2237 rev = repo.changelog.rev
2238 2238 for h in heads:
2239 2239 headsbyphase[phase(repo, rev(h))].add(h)
2240 2240 seenphases = list(headsbyphase.keys())
2241 2241
2242 2242 # We do not handle anything but public and draft phase for now)
2243 2243 if seenphases:
2244 2244 assert max(seenphases) <= phases.draft
2245 2245
2246 2246 # if client is pulling non-public changesets, we need to find
2247 2247 # intermediate public heads.
2248 2248 draftheads = headsbyphase.get(phases.draft, set())
2249 2249 if draftheads:
2250 2250 publicheads = headsbyphase.get(phases.public, set())
2251 2251
2252 2252 revset = 'heads(only(%ln, %ln) and public())'
2253 2253 extraheads = repo.revs(revset, draftheads, publicheads)
2254 2254 for r in extraheads:
2255 2255 headsbyphase[phases.public].add(node(r))
2256 2256
2257 2257 # transform data in a format used by the encoding function
2258 2258 phasemapping = []
2259 2259 for phase in phases.allphases:
2260 2260 phasemapping.append(sorted(headsbyphase[phase]))
2261 2261
2262 2262 # generate the actual part
2263 2263 phasedata = phases.binaryencode(phasemapping)
2264 2264 bundler.newpart('phase-heads', data=phasedata)
2265 2265
2266 2266 @getbundle2partsgenerator('hgtagsfnodes')
2267 2267 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
2268 2268 b2caps=None, heads=None, common=None,
2269 2269 **kwargs):
2270 2270 """Transfer the .hgtags filenodes mapping.
2271 2271
2272 2272 Only values for heads in this bundle will be transferred.
2273 2273
2274 2274 The part data consists of pairs of 20 byte changeset node and .hgtags
2275 2275 filenodes raw values.
2276 2276 """
2277 2277 # Don't send unless:
2278 2278 # - changeset are being exchanged,
2279 2279 # - the client supports it.
2280 2280 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
2281 2281 return
2282 2282
2283 2283 outgoing = _computeoutgoing(repo, heads, common)
2284 2284 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2285 2285
2286 2286 @getbundle2partsgenerator('cache:rev-branch-cache')
2287 2287 def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
2288 2288 b2caps=None, heads=None, common=None,
2289 2289 **kwargs):
2290 2290 """Transfer the rev-branch-cache mapping
2291 2291
2292 2292 The payload is a series of data related to each branch
2293 2293
2294 2294 1) branch name length
2295 2295 2) number of open heads
2296 2296 3) number of closed heads
2297 2297 4) open heads nodes
2298 2298 5) closed heads nodes
2299 2299 """
2300 2300 # Don't send unless:
2301 2301 # - changeset are being exchanged,
2302 2302 # - the client supports it.
2303 2303 # - narrow bundle isn't in play (not currently compatible).
2304 2304 if (not kwargs.get(r'cg', True)
2305 2305 or 'rev-branch-cache' not in b2caps
2306 2306 or kwargs.get(r'narrow', False)
2307 2307 or repo.ui.has_section(_NARROWACL_SECTION)):
2308 2308 return
2309 2309
2310 2310 outgoing = _computeoutgoing(repo, heads, common)
2311 2311 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2312 2312
2313 2313 def check_heads(repo, their_heads, context):
2314 2314 """check if the heads of a repo have been modified
2315 2315
2316 2316 Used by peer for unbundling.
2317 2317 """
2318 2318 heads = repo.heads()
2319 2319 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
2320 2320 if not (their_heads == ['force'] or their_heads == heads or
2321 2321 their_heads == ['hashed', heads_hash]):
2322 2322 # someone else committed/pushed/unbundled while we
2323 2323 # were transferring data
2324 2324 raise error.PushRaced('repository changed while %s - '
2325 2325 'please try again' % context)
2326 2326
2327 2327 def unbundle(repo, cg, heads, source, url):
2328 2328 """Apply a bundle to a repo.
2329 2329
2330 2330 this function makes sure the repo is locked during the application and have
2331 2331 mechanism to check that no push race occurred between the creation of the
2332 2332 bundle and its application.
2333 2333
2334 2334 If the push was raced as PushRaced exception is raised."""
2335 2335 r = 0
2336 2336 # need a transaction when processing a bundle2 stream
2337 2337 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2338 2338 lockandtr = [None, None, None]
2339 2339 recordout = None
2340 2340 # quick fix for output mismatch with bundle2 in 3.4
2341 2341 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
2342 2342 if url.startswith('remote:http:') or url.startswith('remote:https:'):
2343 2343 captureoutput = True
2344 2344 try:
2345 2345 # note: outside bundle1, 'heads' is expected to be empty and this
2346 2346 # 'check_heads' call wil be a no-op
2347 2347 check_heads(repo, heads, 'uploading changes')
2348 2348 # push can proceed
2349 2349 if not isinstance(cg, bundle2.unbundle20):
2350 2350 # legacy case: bundle1 (changegroup 01)
2351 2351 txnname = "\n".join([source, util.hidepassword(url)])
2352 2352 with repo.lock(), repo.transaction(txnname) as tr:
2353 2353 op = bundle2.applybundle(repo, cg, tr, source, url)
2354 2354 r = bundle2.combinechangegroupresults(op)
2355 2355 else:
2356 2356 r = None
2357 2357 try:
2358 2358 def gettransaction():
2359 2359 if not lockandtr[2]:
2360 2360 lockandtr[0] = repo.wlock()
2361 2361 lockandtr[1] = repo.lock()
2362 2362 lockandtr[2] = repo.transaction(source)
2363 2363 lockandtr[2].hookargs['source'] = source
2364 2364 lockandtr[2].hookargs['url'] = url
2365 2365 lockandtr[2].hookargs['bundle2'] = '1'
2366 2366 return lockandtr[2]
2367 2367
2368 2368 # Do greedy locking by default until we're satisfied with lazy
2369 2369 # locking.
2370 2370 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
2371 2371 gettransaction()
2372 2372
2373 2373 op = bundle2.bundleoperation(repo, gettransaction,
2374 2374 captureoutput=captureoutput,
2375 2375 source='push')
2376 2376 try:
2377 2377 op = bundle2.processbundle(repo, cg, op=op)
2378 2378 finally:
2379 2379 r = op.reply
2380 2380 if captureoutput and r is not None:
2381 2381 repo.ui.pushbuffer(error=True, subproc=True)
2382 2382 def recordout(output):
2383 2383 r.newpart('output', data=output, mandatory=False)
2384 2384 if lockandtr[2] is not None:
2385 2385 lockandtr[2].close()
2386 2386 except BaseException as exc:
2387 2387 exc.duringunbundle2 = True
2388 2388 if captureoutput and r is not None:
2389 2389 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2390 2390 def recordout(output):
2391 2391 part = bundle2.bundlepart('output', data=output,
2392 2392 mandatory=False)
2393 2393 parts.append(part)
2394 2394 raise
2395 2395 finally:
2396 2396 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2397 2397 if recordout is not None:
2398 2398 recordout(repo.ui.popbuffer())
2399 2399 return r
2400 2400
2401 2401 def _maybeapplyclonebundle(pullop):
2402 2402 """Apply a clone bundle from a remote, if possible."""
2403 2403
2404 2404 repo = pullop.repo
2405 2405 remote = pullop.remote
2406 2406
2407 2407 if not repo.ui.configbool('ui', 'clonebundles'):
2408 2408 return
2409 2409
2410 2410 # Only run if local repo is empty.
2411 2411 if len(repo):
2412 2412 return
2413 2413
2414 2414 if pullop.heads:
2415 2415 return
2416 2416
2417 2417 if not remote.capable('clonebundles'):
2418 2418 return
2419 2419
2420 2420 with remote.commandexecutor() as e:
2421 2421 res = e.callcommand('clonebundles', {}).result()
2422 2422
2423 2423 # If we call the wire protocol command, that's good enough to record the
2424 2424 # attempt.
2425 2425 pullop.clonebundleattempted = True
2426 2426
2427 2427 entries = parseclonebundlesmanifest(repo, res)
2428 2428 if not entries:
2429 2429 repo.ui.note(_('no clone bundles available on remote; '
2430 2430 'falling back to regular clone\n'))
2431 2431 return
2432 2432
2433 2433 entries = filterclonebundleentries(
2434 2434 repo, entries, streamclonerequested=pullop.streamclonerequested)
2435 2435
2436 2436 if not entries:
2437 2437 # There is a thundering herd concern here. However, if a server
2438 2438 # operator doesn't advertise bundles appropriate for its clients,
2439 2439 # they deserve what's coming. Furthermore, from a client's
2440 2440 # perspective, no automatic fallback would mean not being able to
2441 2441 # clone!
2442 2442 repo.ui.warn(_('no compatible clone bundles available on server; '
2443 2443 'falling back to regular clone\n'))
2444 2444 repo.ui.warn(_('(you may want to report this to the server '
2445 2445 'operator)\n'))
2446 2446 return
2447 2447
2448 2448 entries = sortclonebundleentries(repo.ui, entries)
2449 2449
2450 2450 url = entries[0]['URL']
2451 2451 repo.ui.status(_('applying clone bundle from %s\n') % url)
2452 2452 if trypullbundlefromurl(repo.ui, repo, url):
2453 2453 repo.ui.status(_('finished applying clone bundle\n'))
2454 2454 # Bundle failed.
2455 2455 #
2456 2456 # We abort by default to avoid the thundering herd of
2457 2457 # clients flooding a server that was expecting expensive
2458 2458 # clone load to be offloaded.
2459 2459 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2460 2460 repo.ui.warn(_('falling back to normal clone\n'))
2461 2461 else:
2462 2462 raise error.Abort(_('error applying bundle'),
2463 2463 hint=_('if this error persists, consider contacting '
2464 2464 'the server operator or disable clone '
2465 2465 'bundles via '
2466 2466 '"--config ui.clonebundles=false"'))
2467 2467
2468 2468 def parseclonebundlesmanifest(repo, s):
2469 2469 """Parses the raw text of a clone bundles manifest.
2470 2470
2471 2471 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2472 2472 to the URL and other keys are the attributes for the entry.
2473 2473 """
2474 2474 m = []
2475 2475 for line in s.splitlines():
2476 2476 fields = line.split()
2477 2477 if not fields:
2478 2478 continue
2479 2479 attrs = {'URL': fields[0]}
2480 2480 for rawattr in fields[1:]:
2481 2481 key, value = rawattr.split('=', 1)
2482 2482 key = urlreq.unquote(key)
2483 2483 value = urlreq.unquote(value)
2484 2484 attrs[key] = value
2485 2485
2486 2486 # Parse BUNDLESPEC into components. This makes client-side
2487 2487 # preferences easier to specify since you can prefer a single
2488 2488 # component of the BUNDLESPEC.
2489 2489 if key == 'BUNDLESPEC':
2490 2490 try:
2491 2491 bundlespec = parsebundlespec(repo, value)
2492 2492 attrs['COMPRESSION'] = bundlespec.compression
2493 2493 attrs['VERSION'] = bundlespec.version
2494 2494 except error.InvalidBundleSpecification:
2495 2495 pass
2496 2496 except error.UnsupportedBundleSpecification:
2497 2497 pass
2498 2498
2499 2499 m.append(attrs)
2500 2500
2501 2501 return m
2502 2502
2503 2503 def isstreamclonespec(bundlespec):
2504 2504 # Stream clone v1
2505 2505 if (bundlespec.wirecompression == 'UN' and bundlespec.wireversion == 's1'):
2506 2506 return True
2507 2507
2508 2508 # Stream clone v2
2509 2509 if (bundlespec.wirecompression == 'UN' and \
2510 2510 bundlespec.wireversion == '02' and \
2511 2511 bundlespec.contentopts.get('streamv2')):
2512 2512 return True
2513 2513
2514 2514 return False
2515 2515
2516 2516 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2517 2517 """Remove incompatible clone bundle manifest entries.
2518 2518
2519 2519 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2520 2520 and returns a new list consisting of only the entries that this client
2521 2521 should be able to apply.
2522 2522
2523 2523 There is no guarantee we'll be able to apply all returned entries because
2524 2524 the metadata we use to filter on may be missing or wrong.
2525 2525 """
2526 2526 newentries = []
2527 2527 for entry in entries:
2528 2528 spec = entry.get('BUNDLESPEC')
2529 2529 if spec:
2530 2530 try:
2531 2531 bundlespec = parsebundlespec(repo, spec, strict=True)
2532 2532
2533 2533 # If a stream clone was requested, filter out non-streamclone
2534 2534 # entries.
2535 2535 if streamclonerequested and not isstreamclonespec(bundlespec):
2536 2536 repo.ui.debug('filtering %s because not a stream clone\n' %
2537 2537 entry['URL'])
2538 2538 continue
2539 2539
2540 2540 except error.InvalidBundleSpecification as e:
2541 2541 repo.ui.debug(stringutil.forcebytestr(e) + '\n')
2542 2542 continue
2543 2543 except error.UnsupportedBundleSpecification as e:
2544 2544 repo.ui.debug('filtering %s because unsupported bundle '
2545 2545 'spec: %s\n' % (
2546 2546 entry['URL'], stringutil.forcebytestr(e)))
2547 2547 continue
2548 2548 # If we don't have a spec and requested a stream clone, we don't know
2549 2549 # what the entry is so don't attempt to apply it.
2550 2550 elif streamclonerequested:
2551 2551 repo.ui.debug('filtering %s because cannot determine if a stream '
2552 2552 'clone bundle\n' % entry['URL'])
2553 2553 continue
2554 2554
2555 2555 if 'REQUIRESNI' in entry and not sslutil.hassni:
2556 2556 repo.ui.debug('filtering %s because SNI not supported\n' %
2557 2557 entry['URL'])
2558 2558 continue
2559 2559
2560 2560 newentries.append(entry)
2561 2561
2562 2562 return newentries
2563 2563
2564 2564 class clonebundleentry(object):
2565 2565 """Represents an item in a clone bundles manifest.
2566 2566
2567 2567 This rich class is needed to support sorting since sorted() in Python 3
2568 2568 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2569 2569 won't work.
2570 2570 """
2571 2571
2572 2572 def __init__(self, value, prefers):
2573 2573 self.value = value
2574 2574 self.prefers = prefers
2575 2575
2576 2576 def _cmp(self, other):
2577 2577 for prefkey, prefvalue in self.prefers:
2578 2578 avalue = self.value.get(prefkey)
2579 2579 bvalue = other.value.get(prefkey)
2580 2580
2581 2581 # Special case for b missing attribute and a matches exactly.
2582 2582 if avalue is not None and bvalue is None and avalue == prefvalue:
2583 2583 return -1
2584 2584
2585 2585 # Special case for a missing attribute and b matches exactly.
2586 2586 if bvalue is not None and avalue is None and bvalue == prefvalue:
2587 2587 return 1
2588 2588
2589 2589 # We can't compare unless attribute present on both.
2590 2590 if avalue is None or bvalue is None:
2591 2591 continue
2592 2592
2593 2593 # Same values should fall back to next attribute.
2594 2594 if avalue == bvalue:
2595 2595 continue
2596 2596
2597 2597 # Exact matches come first.
2598 2598 if avalue == prefvalue:
2599 2599 return -1
2600 2600 if bvalue == prefvalue:
2601 2601 return 1
2602 2602
2603 2603 # Fall back to next attribute.
2604 2604 continue
2605 2605
2606 2606 # If we got here we couldn't sort by attributes and prefers. Fall
2607 2607 # back to index order.
2608 2608 return 0
2609 2609
2610 2610 def __lt__(self, other):
2611 2611 return self._cmp(other) < 0
2612 2612
2613 2613 def __gt__(self, other):
2614 2614 return self._cmp(other) > 0
2615 2615
2616 2616 def __eq__(self, other):
2617 2617 return self._cmp(other) == 0
2618 2618
2619 2619 def __le__(self, other):
2620 2620 return self._cmp(other) <= 0
2621 2621
2622 2622 def __ge__(self, other):
2623 2623 return self._cmp(other) >= 0
2624 2624
2625 2625 def __ne__(self, other):
2626 2626 return self._cmp(other) != 0
2627 2627
2628 2628 def sortclonebundleentries(ui, entries):
2629 2629 prefers = ui.configlist('ui', 'clonebundleprefers')
2630 2630 if not prefers:
2631 2631 return list(entries)
2632 2632
2633 2633 prefers = [p.split('=', 1) for p in prefers]
2634 2634
2635 2635 items = sorted(clonebundleentry(v, prefers) for v in entries)
2636 2636 return [i.value for i in items]
2637 2637
2638 2638 def trypullbundlefromurl(ui, repo, url):
2639 2639 """Attempt to apply a bundle from a URL."""
2640 2640 with repo.lock(), repo.transaction('bundleurl') as tr:
2641 2641 try:
2642 2642 fh = urlmod.open(ui, url)
2643 2643 cg = readbundle(ui, fh, 'stream')
2644 2644
2645 2645 if isinstance(cg, streamclone.streamcloneapplier):
2646 2646 cg.apply(repo)
2647 2647 else:
2648 2648 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2649 2649 return True
2650 2650 except urlerr.httperror as e:
2651 2651 ui.warn(_('HTTP error fetching bundle: %s\n') %
2652 2652 stringutil.forcebytestr(e))
2653 2653 except urlerr.urlerror as e:
2654 2654 ui.warn(_('error fetching bundle: %s\n') %
2655 2655 stringutil.forcebytestr(e.reason))
2656 2656
2657 2657 return False
General Comments 0
You need to be logged in to leave comments. Login now