##// END OF EJS Templates
fix: make the order of the work queue deterministic...
fix: make the order of the work queue deterministic This makes any output generated during the parallel phase of execution stable if parallelism is disabled. This helps write tests like that in the future. Differential Revision: https://phab.mercurial-scm.org/D6166

File last commit:

r41926:1eb2fc21 default
r42176:8f427f7c default
Show More
exchange.py
2698 lines | 100.4 KiB | text/x-python | PythonLexer
Mads Kiilerich
spelling: fixes from spell checker
r21024 # exchange.py - utility to exchange data between repos.
Pierre-Yves David
exchange: extract push function from localrepo...
r20345 #
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
Gregory Szorc
exchange: use absolute_import
r27523 from __future__ import absolute_import
Boris Feld
pull: use 'phase-heads' to retrieve phase information...
r34323 import collections
Augie Fackler
cleanup: replace uses of util.(md5|sha1|sha256|sha512) with hashlib.\1...
r29341 import hashlib
Gregory Szorc
exchange: use absolute_import
r27523
from .i18n import _
from .node import (
Boris Feld
push: include a 'check:bookmarks' part when possible...
r35260 bin,
Gregory Szorc
exchange: use absolute_import
r27523 hex,
nullid,
Gregory Szorc
exchange: move _computeellipsis() from narrow...
r38827 nullrev,
Gregory Szorc
exchange: use absolute_import
r27523 )
Boris Feld
bundlespec: introduce an attr-based class for bundlespec...
r37181 from .thirdparty import (
attr,
)
Gregory Szorc
exchange: use absolute_import
r27523 from . import (
bookmarks as bookmod,
bundle2,
changegroup,
discovery,
error,
Gregory Szorc
exchangev2: start to implement pull with wire protocol v2...
r39665 exchangev2,
Gregory Szorc
exchange: use absolute_import
r27523 lock as lockmod,
Pulkit Goyal
remotenames: rename related file and storage dir to logexchange...
r35348 logexchange,
Gregory Szorc
exchange: move narrow acl functionality into core...
r38826 narrowspec,
Gregory Szorc
exchange: use absolute_import
r27523 obsolete,
phases,
pushkey,
Pulkit Goyal
py3: use pycompat.strkwargs() to convert kwargs keys to str before passing
r32896 pycompat,
Martin von Zweigbergk
narrow: move requirement constant from changegroup to repository...
r38871 repository,
Gregory Szorc
exchange: use absolute_import
r27523 scmutil,
sslutil,
streamclone,
url as urlmod,
util,
Pulkit Goyal
exchange: pass includepats and excludepats as arguments to getbundle()...
r40527 wireprototypes,
Gregory Szorc
exchange: use absolute_import
r27523 )
Yuya Nishihara
stringutil: bulk-replace call sites to point to new module...
r37102 from .utils import (
stringutil,
)
Pierre-Yves David
exchange: extract push function from localrepo...
r20345
timeless
pycompat: switch to util.urlreq/util.urlerr for py3 compat
r28883 urlerr = util.urlerr
urlreq = util.urlreq
Gregory Szorc
exchange: move disabling of rev-branch-cache bundle part out of narrow...
r38825 _NARROWACL_SECTION = 'narrowhgacl'
Gregory Szorc
exchange: refactor bundle specification parsing...
r26640 # Maps bundle version human names to changegroup versions.
_bundlespeccgversions = {'v1': '01',
'v2': '02',
Gregory Szorc
exchange: support for streaming clone bundles...
r26756 'packed1': 's1',
Gregory Szorc
exchange: refactor bundle specification parsing...
r26640 'bundle2': '02', #legacy
}
Boris Feld
bundlespec: move computing the bundle contentops in parsebundlespec...
r37182 # Maps bundle version with content opts to choose which part to bundle
_bundlespeccontentopts = {
'v1': {
'changegroup': True,
'cg.version': '01',
'obsolescence': False,
'phases': False,
'tagsfnodescache': False,
'revbranchcache': False
},
'v2': {
'changegroup': True,
'cg.version': '02',
'obsolescence': False,
'phases': False,
'tagsfnodescache': True,
'revbranchcache': True
},
'packed1' : {
'cg.version': 's1'
}
}
_bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
Boris Feld
bundlespec: add support for some variants...
r37185 _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
"tagsfnodescache": False,
"revbranchcache": False}}
Gregory Szorc
exchange: reject new compression engines for v1 bundles (issue5506)...
r31473 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
Martin von Zweigbergk
cleanup: use set literals...
r32291 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
Gregory Szorc
exchange: reject new compression engines for v1 bundles (issue5506)...
r31473
Boris Feld
bundlespec: introduce an attr-based class for bundlespec...
r37181 @attr.s
class bundlespec(object):
compression = attr.ib()
Joerg Sonnenberger
bundlespec: drop externalnames flag...
r37786 wirecompression = attr.ib()
Boris Feld
bundlespec: introduce an attr-based class for bundlespec...
r37181 version = attr.ib()
Joerg Sonnenberger
bundlespec: drop externalnames flag...
r37786 wireversion = attr.ib()
Boris Feld
bundlespec: introduce an attr-based class for bundlespec...
r37181 params = attr.ib()
Boris Feld
bundlespec: move computing the bundle contentops in parsebundlespec...
r37182 contentopts = attr.ib()
Boris Feld
bundlespec: introduce an attr-based class for bundlespec...
r37181
Joerg Sonnenberger
bundlespec: drop externalnames flag...
r37786 def parsebundlespec(repo, spec, strict=True):
Gregory Szorc
exchange: refactor bundle specification parsing...
r26640 """Parse a bundle string specification into parts.
Bundle specifications denote a well-defined bundle/exchange format.
The content of a given specification should not change over time in
order to ensure that bundles produced by a newer version of Mercurial are
readable from an older version.
The string currently has the form:
Gregory Szorc
exchange: move bundle specification parsing from cmdutil...
r26639
Gregory Szorc
exchange: support parameters in bundle specification strings...
r26759 <compression>-<type>[;<parameter0>[;<parameter1>]]
Gregory Szorc
exchange: refactor bundle specification parsing...
r26640
Where <compression> is one of the supported compression formats
Gregory Szorc
exchange: support parameters in bundle specification strings...
r26759 and <type> is (currently) a version string. A ";" can follow the type and
Mads Kiilerich
spelling: fixes of non-dictionary words
r30332 all text afterwards is interpreted as URI encoded, ";" delimited key=value
Gregory Szorc
exchange: support parameters in bundle specification strings...
r26759 pairs.
Gregory Szorc
exchange: move bundle specification parsing from cmdutil...
r26639
Gregory Szorc
exchange: refactor bundle specification parsing...
r26640 If ``strict`` is True (the default) <compression> is required. Otherwise,
it is optional.
Gregory Szorc
exchange: move bundle specification parsing from cmdutil...
r26639
Boris Feld
bundlespec: introduce an attr-based class for bundlespec...
r37181 Returns a bundlespec object of (compression, version, parameters).
Compression will be ``None`` if not in strict mode and a compression isn't
defined.
Gregory Szorc
exchange: move bundle specification parsing from cmdutil...
r26639
Gregory Szorc
exchange: refactor bundle specification parsing...
r26640 An ``InvalidBundleSpecification`` is raised when the specification is
not syntactically well formed.
An ``UnsupportedBundleSpecification`` is raised when the compression or
bundle type/version is not recognized.
Gregory Szorc
exchange: move bundle specification parsing from cmdutil...
r26639
Gregory Szorc
exchange: refactor bundle specification parsing...
r26640 Note: this function will likely eventually return a more complex data
structure, including bundle2 part information.
Gregory Szorc
exchange: move bundle specification parsing from cmdutil...
r26639 """
Gregory Szorc
exchange: support parameters in bundle specification strings...
r26759 def parseparams(s):
if ';' not in s:
return s, {}
params = {}
version, paramstr = s.split(';', 1)
for p in paramstr.split(';'):
if '=' not in p:
raise error.InvalidBundleSpecification(
_('invalid bundle specification: '
'missing "=" in parameter: %s') % p)
key, value = p.split('=', 1)
timeless
pycompat: switch to util.urlreq/util.urlerr for py3 compat
r28883 key = urlreq.unquote(key)
value = urlreq.unquote(value)
Gregory Szorc
exchange: support parameters in bundle specification strings...
r26759 params[key] = value
return version, params
Gregory Szorc
exchange: refactor bundle specification parsing...
r26640 if strict and '-' not in spec:
raise error.InvalidBundleSpecification(
_('invalid bundle specification; '
'must be prefixed with compression: %s') % spec)
Gregory Szorc
exchange: move bundle specification parsing from cmdutil...
r26639
if '-' in spec:
Gregory Szorc
exchange: refactor bundle specification parsing...
r26640 compression, version = spec.split('-', 1)
Gregory Szorc
exchange: move bundle specification parsing from cmdutil...
r26639
Gregory Szorc
exchange: obtain compression engines from the registrar...
r30440 if compression not in util.compengines.supportedbundlenames:
Gregory Szorc
exchange: refactor bundle specification parsing...
r26640 raise error.UnsupportedBundleSpecification(
_('%s compression is not supported') % compression)
Gregory Szorc
exchange: support parameters in bundle specification strings...
r26759 version, params = parseparams(version)
Gregory Szorc
exchange: refactor bundle specification parsing...
r26640 if version not in _bundlespeccgversions:
raise error.UnsupportedBundleSpecification(
_('%s is not a recognized bundle version') % version)
Gregory Szorc
exchange: move bundle specification parsing from cmdutil...
r26639 else:
Gregory Szorc
exchange: refactor bundle specification parsing...
r26640 # Value could be just the compression or just the version, in which
# case some defaults are assumed (but only when not in strict mode).
assert not strict
Gregory Szorc
exchange: move bundle specification parsing from cmdutil...
r26639
Gregory Szorc
exchange: support parameters in bundle specification strings...
r26759 spec, params = parseparams(spec)
Gregory Szorc
exchange: obtain compression engines from the registrar...
r30440 if spec in util.compengines.supportedbundlenames:
Gregory Szorc
exchange: refactor bundle specification parsing...
r26640 compression = spec
version = 'v1'
Gregory Szorc
exchange: use v2 bundles for modern compression engines (issue5506)...
r31474 # Generaldelta repos require v2.
Gregory Szorc
exchange: refactor bundle specification parsing...
r26640 if 'generaldelta' in repo.requirements:
version = 'v2'
Gregory Szorc
exchange: use v2 bundles for modern compression engines (issue5506)...
r31474 # Modern compression engines require v2.
if compression not in _bundlespecv1compengines:
version = 'v2'
Gregory Szorc
exchange: refactor bundle specification parsing...
r26640 elif spec in _bundlespeccgversions:
Gregory Szorc
exchange: support for streaming clone bundles...
r26756 if spec == 'packed1':
compression = 'none'
else:
compression = 'bzip2'
Gregory Szorc
exchange: refactor bundle specification parsing...
r26640 version = spec
else:
raise error.UnsupportedBundleSpecification(
_('%s is not a recognized bundle specification') % spec)
Gregory Szorc
exchange: move bundle specification parsing from cmdutil...
r26639
Gregory Szorc
exchange: reject new compression engines for v1 bundles (issue5506)...
r31473 # Bundle version 1 only supports a known set of compression engines.
if version == 'v1' and compression not in _bundlespecv1compengines:
raise error.UnsupportedBundleSpecification(
_('compression engine %s is not supported on v1 bundles') %
compression)
Gregory Szorc
exchange: parse requirements from stream clone specification string...
r26760 # The specification for packed1 can optionally declare the data formats
# required to apply it. If we see this metadata, compare against what the
# repo supports and error if the bundle isn't compatible.
if version == 'packed1' and 'requirements' in params:
requirements = set(params['requirements'].split(','))
missingreqs = requirements - repo.supportedformats
if missingreqs:
raise error.UnsupportedBundleSpecification(
_('missing support for repository features: %s') %
', '.join(sorted(missingreqs)))
Boris Feld
bundlespec: move computing the bundle contentops in parsebundlespec...
r37182 # Compute contentopts based on the version
contentopts = _bundlespeccontentopts.get(version, {}).copy()
Boris Feld
bundlespec: add support for some variants...
r37185 # Process the variants
if "stream" in params and params["stream"] == "v2":
variant = _bundlespecvariants["streamv2"]
contentopts.update(variant)
Joerg Sonnenberger
bundlespec: drop externalnames flag...
r37786 engine = util.compengines.forbundlename(compression)
compression, wirecompression = engine.bundletype()
wireversion = _bundlespeccgversions[version]
Boris Feld
bundlespec: introduce an attr-based class for bundlespec...
r37181
Joerg Sonnenberger
bundlespec: drop externalnames flag...
r37786 return bundlespec(compression, wirecompression, version, wireversion,
params, contentopts)
Gregory Szorc
exchange: move bundle specification parsing from cmdutil...
r26639
Pierre-Yves David
bundle2: add a ui argument to readbundle...
r21064 def readbundle(ui, fh, fname, vfs=None):
Pierre-Yves David
bundle2: prepare readbundle to return more that one type of bundle...
r21065 header = changegroup.readexactly(fh, 4)
Pierre-Yves David
bundle2: move `readbundle` into the `exchange` module...
r21063
Pierre-Yves David
bundle2: prepare readbundle to return more that one type of bundle...
r21065 alg = None
Pierre-Yves David
bundle2: move `readbundle` into the `exchange` module...
r21063 if not fname:
fname = "stream"
if not header.startswith('HG') and header.startswith('\0'):
fh = changegroup.headerlessfixup(fh, header)
Pierre-Yves David
bundle2: prepare readbundle to return more that one type of bundle...
r21065 header = "HG10"
alg = 'UN'
Pierre-Yves David
bundle2: move `readbundle` into the `exchange` module...
r21063 elif vfs:
fname = vfs.join(fname)
Pierre-Yves David
bundle2: prepare readbundle to return more that one type of bundle...
r21065 magic, version = header[0:2], header[2:4]
Pierre-Yves David
bundle2: move `readbundle` into the `exchange` module...
r21063
if magic != 'HG':
Pierre-Yves David
error: get Abort from 'error' instead of 'util'...
r26587 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
Pierre-Yves David
bundle2: prepare readbundle to return more that one type of bundle...
r21065 if version == '10':
if alg is None:
alg = changegroup.readexactly(fh, 2)
Sune Foldager
changegroup: rename bundle-related functions and classes...
r22390 return changegroup.cg1unpacker(fh, alg)
Pierre-Yves David
bundle2: detect bundle2 stream/request on /HG2./ instead of /HG2Y/...
r24649 elif version.startswith('2'):
Pierre-Yves David
bundle2.getunbundler: rename "header" to "magicstring"...
r25640 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
Gregory Szorc
exchange: support for streaming clone bundles...
r26756 elif version == 'S1':
return streamclone.streamcloneapplier(fh)
Pierre-Yves David
bundle2: prepare readbundle to return more that one type of bundle...
r21065 else:
Pierre-Yves David
error: get Abort from 'error' instead of 'util'...
r26587 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
Pierre-Yves David
bundle2: move `readbundle` into the `exchange` module...
r21063
Gregory Szorc
exchange: implement function for inferring bundle specification...
r27883 def getbundlespec(ui, fh):
"""Infer the bundlespec from a bundle file handle.
The input file handle is seeked and the original seek position is not
restored.
"""
def speccompression(alg):
Gregory Szorc
exchange: obtain compression engines from the registrar...
r30440 try:
return util.compengines.forbundletype(alg).bundletype()[0]
except KeyError:
return None
Gregory Szorc
exchange: implement function for inferring bundle specification...
r27883
b = readbundle(ui, fh, None)
if isinstance(b, changegroup.cg1unpacker):
alg = b._type
if alg == '_truncatedBZ':
alg = 'BZ'
comp = speccompression(alg)
if not comp:
raise error.Abort(_('unknown compression algorithm: %s') % alg)
return '%s-v1' % comp
elif isinstance(b, bundle2.unbundle20):
if 'Compression' in b.params:
comp = speccompression(b.params['Compression'])
if not comp:
raise error.Abort(_('unknown compression algorithm: %s') % comp)
else:
comp = 'none'
version = None
for part in b.iterparts():
if part.type == 'changegroup':
version = part.params['version']
if version in ('01', '02'):
version = 'v2'
else:
raise error.Abort(_('changegroup version %s does not have '
'a known bundlespec') % version,
hint=_('try upgrading your Mercurial '
'client'))
Boris Feld
bundlespec: add support for some variants...
r37185 elif part.type == 'stream2' and version is None:
# A stream2 part requires to be part of a v2 bundle
requirements = urlreq.unquote(part.params['requirements'])
splitted = requirements.split()
params = bundle2._formatrequirementsparams(splitted)
return 'none-v2;stream=v2;%s' % params
Gregory Szorc
exchange: implement function for inferring bundle specification...
r27883
if not version:
raise error.Abort(_('could not identify changegroup version in '
'bundle'))
return '%s-%s' % (comp, version)
elif isinstance(b, streamclone.streamcloneapplier):
requirements = streamclone.readbundle1header(fh)[2]
Boris Feld
bundle: add the possibility to bundle a stream v2 part...
r37184 formatted = bundle2._formatrequirementsparams(requirements)
return 'none-packed1;%s' % formatted
Gregory Szorc
exchange: implement function for inferring bundle specification...
r27883 else:
raise error.Abort(_('unknown bundle type: %s') % b)
Pierre-Yves David
computeoutgoing: move the function from 'changegroup' to 'exchange'...
r29808 def _computeoutgoing(repo, heads, common):
"""Computes which revs are outgoing given a set of common
and a set of heads.
This is a separate function so extensions can have access to
the logic.
Returns a discovery.outgoing object.
"""
cl = repo.changelog
if common:
hasnode = cl.hasnode
common = [n for n in common if hasnode(n)]
else:
common = [nullid]
if not heads:
heads = cl.heads()
return discovery.outgoing(repo, common, heads)
av6
push: config option to control behavior when pushing to a publishing server...
r40803 def _checkpublish(pushop):
repo = pushop.repo
ui = repo.ui
behavior = ui.config('experimental', 'auto-publish')
if pushop.publish or behavior not in ('warn', 'confirm', 'abort'):
return
remotephases = listkeys(pushop.remote, 'phases')
if not remotephases.get('publishing', False):
return
if pushop.revs is None:
published = repo.filtered('served').revs('not public()')
else:
published = repo.revs('::%ln - public()', pushop.revs)
if published:
if behavior == 'warn':
ui.warn(_('%i changesets about to be published\n')
% len(published))
elif behavior == 'confirm':
if ui.promptchoice(_('push and publish %i changesets (yn)?'
'$$ &Yes $$ &No') % len(published)):
raise error.Abort(_('user quit'))
elif behavior == 'abort':
msg = _('push would publish %i changesets') % len(published)
hint = _("use --publish or adjust 'experimental.auto-publish'"
" config")
raise error.Abort(msg, hint=hint)
Pierre-Yves David
bundle2: rename the _canusebundle2 method to _forcebundle1...
r29682 def _forcebundle1(op):
"""return true if a pull/push must use bundle1
Pierre-Yves David
exchange: introduce a '_canusebundle2' function...
r24650
Pierre-Yves David
bundle2: add a devel option controling bundle version used for exchange...
r29683 This function is used to allow testing of the older bundle version"""
ui = op.repo.ui
Mads Kiilerich
spelling: fixes of non-dictionary words
r30332 # The goal is this config is to allow developer to choose the bundle
Pierre-Yves David
bundle2: add a devel option controling bundle version used for exchange...
r29683 # version used during exchanged. This is especially handy during test.
# Value is a list of bundle version to be picked from, highest version
# should be used.
#
# developer config: devel.legacy.exchange
exchange = ui.configlist('devel', 'legacy.exchange')
Pierre-Yves David
bundle2: remove 'experimental.bundle2-exp' boolean config (BC)...
r29689 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
Pierre-Yves David
bundle2: add a devel option controling bundle version used for exchange...
r29683 return forcebundle1 or not op.remote.capable('bundle2')
Pierre-Yves David
exchange: introduce a '_canusebundle2' function...
r24650
Pierre-Yves David
push: introduce a pushoperation object...
r20346 class pushoperation(object):
"""A object that represent a single push operation
Nathan Goldbaum
pushoperation: fix language issues in docstring
r28456 Its purpose is to carry push related state and very common operations.
Pierre-Yves David
push: introduce a pushoperation object...
r20346
Nathan Goldbaum
pushoperation: fix language issues in docstring
r28456 A new pushoperation should be created at the beginning of each push and
discarded afterward.
Pierre-Yves David
push: introduce a pushoperation object...
r20346 """
Pierre-Yves David
push: pass list of bookmark to `exchange.push`...
r22623 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
av6
push: add --publish flag to change phase of pushed changesets...
r40722 bookmarks=(), publish=False, pushvars=None):
Pierre-Yves David
push: introduce a pushoperation object...
r20346 # repo we push from
self.repo = repo
Pierre-Yves David
push: ease access to current ui object...
r20347 self.ui = repo.ui
Pierre-Yves David
push: move `remote` argument in the push object...
r20348 # repo we push to
self.remote = remote
Pierre-Yves David
push: move `force` argument into the push object...
r20349 # force option provided
self.force = force
Pierre-Yves David
push: move `revs` argument into the push object...
r20350 # revs to be pushed (None is "all")
self.revs = revs
Pierre-Yves David
push: pass list of bookmark to `exchange.push`...
r22623 # bookmark explicitly pushed
self.bookmarks = bookmarks
Pierre-Yves David
push: move `newbranch` argument into the push object...
r20351 # allow push of new branch
self.newbranch = newbranch
Pierre-Yves David
push: add a ``pushop.stepsdone`` attribute...
r21901 # step already performed
# (used to check what steps have been already performed through bundle2)
self.stepsdone = set()
Pierre-Yves David
push: rename `pushop.ret` to `pushop.cgresult`...
r22615 # Integer version of the changegroup push result
Pierre-Yves David
push: move push return value in the push object...
r20439 # - None means nothing to push
# - 0 means HTTP error
# - 1 means we pushed and remote head count is unchanged *or*
# we have outgoing changesets but refused to push
# - other values as described by addchangegroup()
Pierre-Yves David
push: rename `pushop.ret` to `pushop.cgresult`...
r22615 self.cgresult = None
Pierre-Yves David
push: add `pushoperation.bkresult`...
r22624 # Boolean value for the bookmark push
self.bkresult = None
Mads Kiilerich
spelling: fixes from spell checker
r21024 # discover.outgoing object (contains common and outgoing data)
Pierre-Yves David
push: move outgoing object in the push object...
r20440 self.outgoing = None
push: add a way to allow concurrent pushes on unrelated heads...
r32709 # all remote topological heads before the push
Pierre-Yves David
push: move `remoteheads` into the push object...
r20462 self.remoteheads = None
push: add a way to allow concurrent pushes on unrelated heads...
r32709 # Details of the remote branch pre and post push
#
# mapping: {'branch': ([remoteheads],
# [newheads],
# [unsyncedheads],
# [discardedheads])}
# - branch: the branch name
# - remoteheads: the list of remote heads known locally
# None if the branch is new
# - newheads: the new remote heads (known locally) with outgoing pushed
# - unsyncedheads: the list of remote heads unknown locally.
# - discardedheads: the list of remote heads made obsolete by the push
self.pushbranchmap = None
Pierre-Yves David
push: move `incoming` into the push object...
r20464 # testable as a boolean indicating if any nodes are missing locally.
self.incoming = None
Boris Feld
phase: gather remote phase information in a summary object...
r34820 # summary of the remote phase situation
self.remotephases = None
Pierre-Yves David
push: perform phases discovery before the push...
r22019 # phases changes that must be pushed along side the changesets
self.outdatedphases = None
# phases changes that must be pushed if changeset push fails
self.fallbackoutdatedphases = None
Pierre-Yves David
push: move the list of obsmarker to push into the push operation...
r22034 # outgoing obsmarkers
Pierre-Yves David
push: introduce a discovery step for obsmarker...
r22035 self.outobsmarkers = set()
Pierre-Yves David
push: move bookmark discovery with other discovery steps...
r22239 # outgoing bookmarks
self.outbookmarks = []
Eric Sumner
push: elevate phase transaction to cover entire operation...
r23437 # transaction manager
self.trmanager = None
Pierre-Yves David
push: catch and process PushkeyFailed error...
r25485 # map { pushkey partid -> callback handling failure}
# used to handle exception from mandatory pushkey part failure
self.pkfailcb = {}
Jun Wu
pushvars: do not mangle repo state...
r33886 # an iterable of pushvars or None
self.pushvars = pushvars
av6
push: add --publish flag to change phase of pushed changesets...
r40722 # publish pushed changesets
self.publish = publish
Pierre-Yves David
push: introduce a pushoperation object...
r20346
Pierre-Yves David
push: extract future heads computation into pushop...
r22014 @util.propertycache
def futureheads(self):
"""future remote heads if the changeset push succeeds"""
return self.outgoing.missingheads
Pierre-Yves David
push: extract fallback heads computation into pushop...
r22015 @util.propertycache
def fallbackheads(self):
"""future remote heads if the changeset push fails"""
if self.revs is None:
# not target to push, all common are relevant
return self.outgoing.commonheads
unfi = self.repo.unfiltered()
# I want cheads = heads(::missingheads and ::commonheads)
# (missingheads is revs with secret changeset filtered out)
#
# This can be expressed as:
# cheads = ( (missingheads and ::commonheads)
# + (commonheads and ::missingheads))"
# )
#
# while trying to push we already computed the following:
# common = (::commonheads)
# missing = ((commonheads::missingheads) - commonheads)
#
# We can pick:
# * missingheads part of common (::commonheads)
Durham Goode
exchange: allow fallbackheads to use lazy set behavior...
r26184 common = self.outgoing.common
Pierre-Yves David
push: extract fallback heads computation into pushop...
r22015 nm = self.repo.changelog.nodemap
cheads = [node for node in self.revs if nm[node] in common]
# and
# * commonheads parents on missing
revset = unfi.set('%ln and parents(roots(%ln))',
self.outgoing.commonheads,
self.outgoing.missing)
cheads.extend(c.node() for c in revset)
return cheads
Pierre-Yves David
push: move common heads computation into pushop...
r22016 @property
def commonheads(self):
"""set of all common heads after changeset bundle push"""
Pierre-Yves David
push: rename `pushop.ret` to `pushop.cgresult`...
r22615 if self.cgresult:
Pierre-Yves David
push: move common heads computation into pushop...
r22016 return self.futureheads
else:
return self.fallbackheads
Pierre-Yves David
push: extract fallback heads computation into pushop...
r22015
Pierre-Yves David
push: prepare the issue of multiple kinds of messages...
r22650 # mapping of message used when pushing bookmark
bookmsgmap = {'update': (_("updating bookmark %s\n"),
_('updating bookmark %s failed!\n')),
'export': (_("exporting bookmark %s\n"),
_('exporting bookmark %s failed!\n')),
'delete': (_("deleting remote bookmark %s\n"),
_('deleting remote bookmark %s failed!\n')),
}
Sean Farley
exchange: add oparg to push so that extensions can wrap pushop
r26729 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
av6
push: add --publish flag to change phase of pushed changesets...
r40722 publish=False, opargs=None):
Pierre-Yves David
exchange: extract push function from localrepo...
r20345 '''Push outgoing changesets (limited by revs) from a local
repository to remote. Return an integer:
- None means nothing to push
- 0 means HTTP error
- 1 means we pushed and remote head count is unchanged *or*
we have outgoing changesets but refused to push
- other values as described by addchangegroup()
'''
Sean Farley
exchange: add oparg to push so that extensions can wrap pushop
r26729 if opargs is None:
opargs = {}
pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
av6
push: add --publish flag to change phase of pushed changesets...
r40722 publish, **pycompat.strkwargs(opargs))
Pierre-Yves David
push: move `remote` argument in the push object...
r20348 if pushop.remote.local():
missing = (set(pushop.repo.requirements)
- pushop.remote.local().supported)
Pierre-Yves David
exchange: extract push function from localrepo...
r20345 if missing:
msg = _("required features are not"
" supported in the destination:"
" %s") % (', '.join(sorted(missing)))
Pierre-Yves David
error: get Abort from 'error' instead of 'util'...
r26587 raise error.Abort(msg)
Pierre-Yves David
exchange: extract push function from localrepo...
r20345
Pierre-Yves David
push: move `remote` argument in the push object...
r20348 if not pushop.remote.canpush():
Pierre-Yves David
error: get Abort from 'error' instead of 'util'...
r26587 raise error.Abort(_("destination does not support push"))
Gregory Szorc
exchange: drop support for lock-based unbundling (BC)...
r33667
if not pushop.remote.capable('unbundle'):
raise error.Abort(_('cannot push: destination does not support the '
'unbundle wire protocol command'))
Martin von Zweigbergk
exchange: drop now-unnecessary "local" from lock name variables...
r33788 # get lock as we might write phase data
wlock = lock = None
Pierre-Yves David
exchange: extract push function from localrepo...
r20345 try:
Pierre-Yves David
push: acquire local 'wlock' if "pushback" is expected (BC) (issue4596)...
r24754 # bundle2 push may receive a reply bundle touching bookmarks or other
# things requiring the wlock. Take it now to ensure proper ordering.
maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
Pierre-Yves David
bundle2: rename the _canusebundle2 method to _forcebundle1...
r29682 if (not _forcebundle1(pushop)) and maypushback:
Martin von Zweigbergk
exchange: drop now-unnecessary "local" from lock name variables...
r33788 wlock = pushop.repo.wlock()
lock = pushop.repo.lock()
Martin von Zweigbergk
exchange: remove need for "locked" variable...
r33789 pushop.trmanager = transactionmanager(pushop.repo,
'push-response',
pushop.remote.url())
Yuya Nishihara
push: continue without locking on lock failure other than EEXIST (issue5882)...
r38111 except error.LockUnavailable as err:
Pierre-Yves David
exchange: extract push function from localrepo...
r20345 # source repo cannot be locked.
# We do not abort the push, but just disable the local phase
# synchronisation.
Matt Harbison
py3: use util.forcebytestr() to convert push lock error to bytes
r39947 msg = ('cannot lock source repository: %s\n'
% stringutil.forcebytestr(err))
Pierre-Yves David
push: ease access to current ui object...
r20347 pushop.ui.debug(msg)
Martin von Zweigbergk
exchange: remove need for "locked" variable...
r33789
Augie Fackler
cleanup: prefer nested context managers to \-continuations...
r41926 with wlock or util.nullcontextmanager():
with lock or util.nullcontextmanager():
with pushop.trmanager or util.nullcontextmanager():
pushop.repo.checkpush(pushop)
_checkpublish(pushop)
_pushdiscovery(pushop)
if not _forcebundle1(pushop):
_pushbundle2(pushop)
_pushchangeset(pushop)
_pushsyncphase(pushop)
_pushobsolete(pushop)
_pushbookmark(pushop)
Gregory Szorc
exchange: drop support for lock-based unbundling (BC)...
r33667
Pulkit Goyal
remotenames: synchronise remotenames after push also...
r38634 if repo.ui.configbool('experimental', 'remotenames'):
logexchange.pullremotenames(repo, remote)
Pierre-Yves David
push: `exchange.push` now returns the `pushoperation` object...
r22616 return pushop
Pierre-Yves David
push: move bookmarks exchange in the exchange module...
r20352
Pierre-Yves David
push: make discovery extensible...
r22018 # list of steps to perform discovery before push
pushdiscoveryorder = []
# Mapping between step name and function
#
# This exists to help extensions wrap steps if necessary
pushdiscoverymapping = {}
def pushdiscovery(stepname):
"""decorator for function performing discovery before push
The function is added to the step -> function mapping and appended to the
list of steps. Beware that decorated function will be added in order (this
may matter).
You can only use this decorator for a new step, if you want to wrap a step
from an extension, change the pushdiscovery dictionary directly."""
def dec(func):
assert stepname not in pushdiscoverymapping
pushdiscoverymapping[stepname] = func
pushdiscoveryorder.append(stepname)
return func
return dec
Pierre-Yves David
push: move discovery in its own function...
r20466 def _pushdiscovery(pushop):
Pierre-Yves David
push: make discovery extensible...
r22018 """Run all discovery steps"""
for stepname in pushdiscoveryorder:
step = pushdiscoverymapping[stepname]
step(pushop)
@pushdiscovery('changeset')
def _pushdiscoverychangeset(pushop):
"""discover the changeset that need to be pushed"""
Pierre-Yves David
push: move discovery in its own function...
r20466 fci = discovery.findcommonincoming
Boris Feld
push: restrict common discovery to the pushed set...
r35306 if pushop.revs:
commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
ancestorsof=pushop.revs)
else:
commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
Pierre-Yves David
push: move discovery in its own function...
r20466 common, inc, remoteheads = commoninc
fco = discovery.findcommonoutgoing
Pierre-Yves David
discovery: run discovery on filtered repository...
r23848 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
Pierre-Yves David
push: move discovery in its own function...
r20466 commoninc=commoninc, force=pushop.force)
pushop.outgoing = outgoing
pushop.remoteheads = remoteheads
pushop.incoming = inc
Pierre-Yves David
push: perform phases discovery before the push...
r22019 @pushdiscovery('phase')
def _pushdiscoveryphase(pushop):
"""discover the phase that needs to be pushed
(computed for both success and failure case for changesets push)"""
outgoing = pushop.outgoing
unfi = pushop.repo.unfiltered()
Gregory Szorc
exchange: use command executor interface for calling listkeys...
r37775 remotephases = listkeys(pushop.remote, 'phases')
Jun Wu
codemod: register core configitems using a script...
r33499 if (pushop.ui.configbool('ui', '_usedassubrepo')
Pierre-Yves David
subrepo: detect issue3781 case earlier so it apply to bundle2...
r25337 and remotephases # server supports phases
and not pushop.outgoing.missing # no changesets to be pushed
Boris Feld
phase: simplify the check for issue3781 shortcut in discovery...
r34819 and remotephases.get('publishing', False)):
Pierre-Yves David
subrepo: detect issue3781 case earlier so it apply to bundle2...
r25337 # When:
# - this is a subrepo push
# - and remote support phase
# - and no changeset are to be pushed
# - and remote is publishing
Boris Feld
exchange: fix issue3781 reference in the comment...
r34818 # We may be in issue 3781 case!
Pierre-Yves David
subrepo: detect issue3781 case earlier so it apply to bundle2...
r25337 # We drop the possible phase synchronisation done by
# courtesy to publish changesets possibly locally draft
# on the remote.
Boris Feld
phase: simplify the check for issue3781 shortcut in discovery...
r34819 pushop.outdatedphases = []
pushop.fallbackoutdatedphases = []
return
Boris Feld
phase: gather remote phase information in a summary object...
r34820
pushop.remotephases = phases.remotephasessummary(pushop.repo,
pushop.fallbackheads,
remotephases)
droots = pushop.remotephases.draftroots
Pierre-Yves David
push: perform phases discovery before the push...
r22019 extracond = ''
Boris Feld
phase: gather remote phase information in a summary object...
r34820 if not pushop.remotephases.publishing:
Pierre-Yves David
push: perform phases discovery before the push...
r22019 extracond = ' and public()'
revset = 'heads((%%ln::%%ln) %s)' % extracond
# Get the list of all revs draft on remote by public here.
# XXX Beware that revset break if droots is not strictly
# XXX root we may want to ensure it is but it is costly
fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
av6
push: add --publish flag to change phase of pushed changesets...
r40722 if not pushop.remotephases.publishing and pushop.publish:
future = list(unfi.set('%ln and (not public() or %ln::)',
pushop.futureheads, droots))
elif not outgoing.missing:
Pierre-Yves David
push: perform phases discovery before the push...
r22019 future = fallback
else:
# adds changeset we are going to push as draft
#
Mads Kiilerich
spelling: fixes from proofreading of spell checker issues
r23139 # should not be necessary for publishing server, but because of an
Pierre-Yves David
push: perform phases discovery before the push...
r22019 # issue fixed in xxxxx we have to do it anyway.
fdroots = list(unfi.set('roots(%ln + %ln::)',
outgoing.missing, droots))
fdroots = [f.node() for f in fdroots]
future = list(unfi.set(revset, fdroots, pushop.futureheads))
pushop.outdatedphases = future
pushop.fallbackoutdatedphases = fallback
Pierre-Yves David
push: introduce a discovery step for obsmarker...
r22035 @pushdiscovery('obsmarker')
def _pushdiscoveryobsmarkers(pushop):
Gregory Szorc
exchange: use command executor interface for calling listkeys...
r37775 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
return
if not pushop.repo.obsstore:
return
if 'obsolete' not in listkeys(pushop.remote, 'namespaces'):
return
repo = pushop.repo
# very naive computation, that can be quite expensive on big repo.
# However: evolution is currently slow on them anyway.
nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
Pierre-Yves David
push: introduce a discovery step for obsmarker...
r22035
Pierre-Yves David
push: move bookmark discovery with other discovery steps...
r22239 @pushdiscovery('bookmarks')
def _pushdiscoverybookmarks(pushop):
ui = pushop.ui
repo = pushop.repo.unfiltered()
remote = pushop.remote
ui.debug("checking for updated bookmarks\n")
ancestors = ()
if pushop.revs:
Yuya Nishihara
py3: fix revnums in bookmark discovery to be consumable more than once
r38623 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
Pierre-Yves David
push: move bookmark discovery with other discovery steps...
r22239 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
Gregory Szorc
exchange: use command executor interface for calling listkeys...
r37775
remotebookmark = listkeys(remote, 'bookmarks')
Pierre-Yves David
push: move bookmark discovery with other discovery steps...
r22239
liscju
bookmarks: add 'hg push -B .' for pushing the active bookmark (issue4917)
r28182 explicit = set([repo._bookmarks.expandname(bookmark)
for bookmark in pushop.bookmarks])
Pierre-Yves David
push: gather all bookmark decisions together...
r22651
Stanislau Hlebik
bookmarks: make bookmarks.comparebookmarks accept binary nodes (API)...
r30583 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
def safehex(x):
if x is None:
return x
return hex(x)
def hexifycompbookmarks(bookmarks):
Boris Feld
push-discovery: don't turn use generator when comparing bookmarks...
r36955 return [(b, safehex(scid), safehex(dcid))
for (b, scid, dcid) in bookmarks]
Stanislau Hlebik
bookmarks: make bookmarks.comparebookmarks accept binary nodes (API)...
r30583
comp = [hexifycompbookmarks(marks) for marks in comp]
Boris Feld
push-discovery: extract the bookmark comparison logic in its own function...
r36956 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
def _processcompared(pushop, pushed, explicit, remotebms, comp):
"""take decision on bookmark to pull from the remote bookmark
Exist to help extensions who want to alter this behavior.
"""
Gregory Szorc
bookmarks: explicitly track identical bookmarks...
r23081 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
Stanislau Hlebik
bookmarks: make bookmarks.comparebookmarks accept binary nodes (API)...
r30583
Boris Feld
push-discovery: extract the bookmark comparison logic in its own function...
r36956 repo = pushop.repo
Pierre-Yves David
push: move bookmark discovery with other discovery steps...
r22239 for b, scid, dcid in advsrc:
Pierre-Yves David
push: gather all bookmark decisions together...
r22651 if b in explicit:
explicit.remove(b)
Boris Feld
push-discovery: extract the bookmark comparison logic in its own function...
r36956 if not pushed or repo[scid].rev() in pushed:
Pierre-Yves David
push: move bookmark discovery with other discovery steps...
r22239 pushop.outbookmarks.append((b, dcid, scid))
Pierre-Yves David
push: gather all bookmark decisions together...
r22651 # search added bookmark
for b, scid, dcid in addsrc:
if b in explicit:
explicit.remove(b)
pushop.outbookmarks.append((b, '', scid))
# search for overwritten bookmark
Stanislau Hlebik
bookmarks: make bookmarks.comparebookmarks accept binary nodes (API)...
r30583 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
Pierre-Yves David
push: gather all bookmark decisions together...
r22651 if b in explicit:
explicit.remove(b)
pushop.outbookmarks.append((b, dcid, scid))
# search for bookmark to delete
for b, scid, dcid in adddst:
if b in explicit:
explicit.remove(b)
# treat as "deleted locally"
pushop.outbookmarks.append((b, dcid, ''))
Gregory Szorc
exchange: don't report failure from identical bookmarks...
r23082 # identical bookmarks shouldn't get reported
for b, scid, dcid in same:
if b in explicit:
explicit.remove(b)
Pierre-Yves David
push: gather all bookmark decisions together...
r22651
if explicit:
explicit = sorted(explicit)
# we should probably list all of them
Boris Feld
push-discovery: extract the bookmark comparison logic in its own function...
r36956 pushop.ui.warn(_('bookmark %s does not exist on the local '
'or remote repository!\n') % explicit[0])
Pierre-Yves David
push: gather all bookmark decisions together...
r22651 pushop.bkresult = 2
pushop.outbookmarks.sort()
Pierre-Yves David
push: move bookmark discovery with other discovery steps...
r22239
Pierre-Yves David
push: move outgoing check logic in its own function...
r20465 def _pushcheckoutgoing(pushop):
outgoing = pushop.outgoing
unfi = pushop.repo.unfiltered()
if not outgoing.missing:
# nothing to push
scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
return False
# something to push
if not pushop.force:
# if repo.obsstore == False --> no obsolete
# then, save the iteration
if unfi.obsstore:
# this message are here for 80 char limit reason
mso = _("push includes obsolete changeset: %s!")
Boris Feld
evolution: rename bumped to phase-divergent...
r33652 mspd = _("push includes phase-divergent changeset: %s!")
Boris Feld
evolution: rename divergent to content-divergent...
r33651 mscd = _("push includes content-divergent changeset: %s!")
Boris Feld
evolution: rename unstable to orphan...
r33632 mst = {"orphan": _("push includes orphan changeset: %s!"),
Boris Feld
evolution: rename bumped to phase-divergent...
r33652 "phase-divergent": mspd,
Boris Feld
evolution: rename divergent to content-divergent...
r33651 "content-divergent": mscd}
Pierre-Yves David
push: move outgoing check logic in its own function...
r20465 # If we are to push if there is at least one
# obsolete or unstable changeset in missing, at
# least one of the missinghead will be obsolete or
# unstable. So checking heads only is ok
for node in outgoing.missingheads:
ctx = unfi[node]
if ctx.obsolete():
Pierre-Yves David
error: get Abort from 'error' instead of 'util'...
r26587 raise error.Abort(mso % ctx)
Boris Feld
context: rename troubled into isunstable...
r33696 elif ctx.isunstable():
Boris Feld
context: rename troubles into instabilities...
r33692 # TODO print more than one instability in the abort
# message
raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
Matt Mackall
bookmarks: mark internal-only config option
r25836
Ryan McElroy
exchange: pass pushop to discovery.checkheads...
r26935 discovery.checkheads(pushop)
Pierre-Yves David
push: move outgoing check logic in its own function...
r20465 return True
Pierre-Yves David
push: rework the bundle2partsgenerators logic...
r22017 # List of names of steps to perform for an outgoing bundle2, order matters.
b2partsgenorder = []
# Mapping between step name and function
#
# This exists to help extensions wrap steps if necessary
b2partsgenmapping = {}
Pierre-Yves David
bundle2: add an 'idx' argument to the 'b2partsgenerator'...
r24731 def b2partsgenerator(stepname, idx=None):
Pierre-Yves David
push: rework the bundle2partsgenerators logic...
r22017 """decorator for function generating bundle2 part
The function is added to the step -> function mapping and appended to the
list of steps. Beware that decorated functions will be added in order
(this may matter).
You can only use this decorator for new steps, if you want to wrap a step
from an extension, attack the b2partsgenmapping dictionary directly."""
def dec(func):
assert stepname not in b2partsgenmapping
b2partsgenmapping[stepname] = func
Pierre-Yves David
bundle2: add an 'idx' argument to the 'b2partsgenerator'...
r24731 if idx is None:
b2partsgenorder.append(stepname)
else:
b2partsgenorder.insert(idx, stepname)
Pierre-Yves David
push: rework the bundle2partsgenerators logic...
r22017 return func
return dec
Ryan McElroy
bundle2: generate check:heads in a independent function
r26428 def _pushb2ctxcheckheads(pushop, bundler):
"""Generate race condition checking parts
Mads Kiilerich
spelling: trivial spell checking
r26781 Exists as an independent function to aid extensions
Ryan McElroy
bundle2: generate check:heads in a independent function
r26428 """
push: add a way to allow concurrent pushes on unrelated heads...
r32709 # * 'force' do not check for push race,
# * if we don't push anything, there are nothing to check.
if not pushop.force and pushop.outgoing.missingheads:
allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
pushrace: avoid crash on bare push when using concurrent push mode...
r33133 emptyremote = pushop.pushbranchmap is None
if not allowunrelated or emptyremote:
push: add a way to allow concurrent pushes on unrelated heads...
r32709 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
else:
affected = set()
for branch, heads in pushop.pushbranchmap.iteritems():
remoteheads, newheads, unsyncedheads, discardedheads = heads
if remoteheads is not None:
remote = set(remoteheads)
affected |= set(discardedheads) & remote
affected |= remote - set(newheads)
if affected:
data = iter(sorted(affected))
bundler.newpart('check:updated-heads', data=data)
Ryan McElroy
bundle2: generate check:heads in a independent function
r26428
Boris Feld
phase: generate a push-race detection part on push...
r34822 def _pushing(pushop):
"""return True if we are pushing anything"""
return bool(pushop.outgoing.missing
or pushop.outdatedphases
or pushop.outobsmarkers
or pushop.outbookmarks)
Boris Feld
push: include a 'check:bookmarks' part when possible...
r35260 @b2partsgenerator('check-bookmarks')
def _pushb2checkbookmarks(pushop, bundler):
"""insert bookmark move checking"""
if not _pushing(pushop) or pushop.force:
return
b2caps = bundle2.bundle2caps(pushop.remote)
hasbookmarkcheck = 'bookmarks' in b2caps
if not (pushop.outbookmarks and hasbookmarkcheck):
return
data = []
for book, old, new in pushop.outbookmarks:
old = bin(old)
data.append((book, old))
checkdata = bookmod.binaryencode(data)
bundler.newpart('check:bookmarks', data=checkdata)
Boris Feld
phase: generate a push-race detection part on push...
r34822 @b2partsgenerator('check-phases')
def _pushb2checkphases(pushop, bundler):
"""insert phase move checking"""
if not _pushing(pushop) or pushop.force:
return
b2caps = bundle2.bundle2caps(pushop.remote)
hasphaseheads = 'heads' in b2caps.get('phases', ())
if pushop.remotephases is not None and hasphaseheads:
# check that the remote phase has not changed
checks = [[] for p in phases.allphases]
checks[phases.public].extend(pushop.remotephases.publicheads)
checks[phases.draft].extend(pushop.remotephases.draftroots)
if any(checks):
for nodes in checks:
nodes.sort()
checkdata = phases.binaryencode(checks)
bundler.newpart('check:phases', data=checkdata)
Pierre-Yves David
push: rework the bundle2partsgenerators logic...
r22017 @b2partsgenerator('changeset')
Pierre-Yves David
bundle2-push: extract changegroup logic in its own function...
r21899 def _pushb2ctx(pushop, bundler):
"""handle changegroup push through bundle2
Pierre-Yves David
push: rename `pushop.ret` to `pushop.cgresult`...
r22615 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
Pierre-Yves David
bundle2-push: extract changegroup logic in its own function...
r21899 """
Pierre-Yves David
push: use `stepsdone` to control changegroup push through bundle10 or bundle20...
r21902 if 'changesets' in pushop.stepsdone:
return
pushop.stepsdone.add('changesets')
Pierre-Yves David
bundle2-push: extract changegroup logic in its own function...
r21899 # Send known heads to the server for race detection.
Pierre-Yves David
bundle2-push: move changegroup push validation inside _pushb2ctx...
r21903 if not _pushcheckoutgoing(pushop):
return
Mads Kiilerich
localrepo: refactor prepushoutgoinghook to take a pushop...
r28876 pushop.repo.prepushoutgoinghooks(pushop)
Ryan McElroy
bundle2: generate check:heads in a independent function
r26428
_pushb2ctxcheckheads(pushop, bundler)
Pierre-Yves David
push: send highest changegroup format supported by both side...
r23180 b2caps = bundle2.bundle2caps(pushop.remote)
Martin von Zweigbergk
exchange: make _pushb2ctx() look more like _getbundlechangegrouppart()...
r28668 version = '01'
Pierre-Yves David
bundle2: rename format, parts and config to final names...
r24686 cgversions = b2caps.get('changegroup')
Martin von Zweigbergk
exchange: make _pushb2ctx() look more like _getbundlechangegrouppart()...
r28668 if cgversions: # 3.1 and 3.2 ship with an empty value
Martin von Zweigbergk
changegroup: hide packermap behind methods...
r27751 cgversions = [v for v in cgversions
Martin von Zweigbergk
changegroup: fix pulling to treemanifest repo from flat repo (issue5066)...
r27953 if v in changegroup.supportedoutgoingversions(
pushop.repo)]
Pierre-Yves David
push: send highest changegroup format supported by both side...
r23180 if not cgversions:
Gregory Szorc
exchange: raise error.Abort instead of ValueError...
r41853 raise error.Abort(_('no common changegroup version'))
Pierre-Yves David
push: send highest changegroup format supported by both side...
r23180 version = max(cgversions)
Durham Goode
changegroup: replace getlocalchangegroupraw with makestream...
r34100 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
'push')
cgpart = bundler.newpart('changegroup', data=cgstream)
Martin von Zweigbergk
exchange: make _pushb2ctx() look more like _getbundlechangegrouppart()...
r28668 if cgversions:
Pierre-Yves David
push: send highest changegroup format supported by both side...
r23180 cgpart.addparam('version', version)
Martin von Zweigbergk
exchange: set 'treemanifest' param on pushed changegroups too...
r27938 if 'treemanifest' in pushop.repo.requirements:
cgpart.addparam('treemanifest', '1')
Pierre-Yves David
bundle2-push: extract changegroup logic in its own function...
r21899 def handlereply(op):
Mads Kiilerich
spelling: fixes from proofreading of spell checker issues
r23139 """extract addchangegroup returns from server reply"""
Pierre-Yves David
bundle2-push: extract changegroup logic in its own function...
r21899 cgreplies = op.records.getreplies(cgpart.id)
assert len(cgreplies['changegroup']) == 1
Pierre-Yves David
push: rename `pushop.ret` to `pushop.cgresult`...
r22615 pushop.cgresult = cgreplies['changegroup'][0]['return']
Pierre-Yves David
bundle2-push: extract changegroup logic in its own function...
r21899 return handlereply
Pierre-Yves David
push: include phase push in the unified bundle2 push...
r22020 @b2partsgenerator('phase')
def _pushb2phases(pushop, bundler):
"""handle phase push through bundle2"""
if 'phases' in pushop.stepsdone:
return
b2caps = bundle2.bundle2caps(pushop.remote)
Boris Feld
phase: use a binary phase part to push through bundle2 (BC)...
r34837 ui = pushop.repo.ui
legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
haspushkey = 'pushkey' in b2caps
hasphaseheads = 'heads' in b2caps.get('phases', ())
if hasphaseheads and not legacyphase:
Boris Feld
exchange: propagate the subfunctions return...
r34911 return _pushb2phaseheads(pushop, bundler)
Boris Feld
phase: use a binary phase part to push through bundle2 (BC)...
r34837 elif haspushkey:
Boris Feld
exchange: propagate the subfunctions return...
r34911 return _pushb2phasespushkey(pushop, bundler)
Boris Feld
phase: isolate logic to update remote phrase through bundle2 pushkey...
r34823
Boris Feld
phase: use a binary phase part to push through bundle2 (BC)...
r34837 def _pushb2phaseheads(pushop, bundler):
"""push phase information through a bundle2 - binary part"""
pushop.stepsdone.add('phases')
if pushop.outdatedphases:
updates = [[] for p in phases.allphases]
updates[0].extend(h.node() for h in pushop.outdatedphases)
phasedata = phases.binaryencode(updates)
bundler.newpart('phase-heads', data=phasedata)
Boris Feld
phase: isolate logic to update remote phrase through bundle2 pushkey...
r34823 def _pushb2phasespushkey(pushop, bundler):
"""push phase information through a bundle2 - pushkey part"""
Pierre-Yves David
push: include phase push in the unified bundle2 push...
r22020 pushop.stepsdone.add('phases')
part2node = []
Pierre-Yves David
phases: abort the whole push if phases fail to update (BC)...
r25502
def handlefailure(pushop, exc):
targetid = int(exc.partid)
for partid, node in part2node:
if partid == targetid:
raise error.Abort(_('updating %s to public failed') % node)
Pierre-Yves David
push: include phase push in the unified bundle2 push...
r22020 enc = pushkey.encode
for newremotehead in pushop.outdatedphases:
Pierre-Yves David
phases: abort the whole push if phases fail to update (BC)...
r25502 part = bundler.newpart('pushkey')
Pierre-Yves David
push: include phase push in the unified bundle2 push...
r22020 part.addparam('namespace', enc('phases'))
part.addparam('key', enc(newremotehead.hex()))
Augie Fackler
exchange: use '%d' % x instead of str(x) to encode ints...
r34200 part.addparam('old', enc('%d' % phases.draft))
part.addparam('new', enc('%d' % phases.public))
Pierre-Yves David
push: include phase push in the unified bundle2 push...
r22020 part2node.append((part.id, newremotehead))
Pierre-Yves David
phases: abort the whole push if phases fail to update (BC)...
r25502 pushop.pkfailcb[part.id] = handlefailure
Pierre-Yves David
push: include phase push in the unified bundle2 push...
r22020 def handlereply(op):
for partid, node in part2node:
partrep = op.records.getreplies(partid)
results = partrep['pushkey']
assert len(results) <= 1
msg = None
if not results:
msg = _('server ignored update of %s to public!\n') % node
elif not int(results[0]['return']):
msg = _('updating %s to public failed!\n') % node
if msg is not None:
pushop.ui.warn(msg)
return handlereply
Pierre-Yves David
bundle2-push: introduce a list of part generating functions...
r21904
Pierre-Yves David
push: use bundle2 to push obsmarkers when possible
r22347 @b2partsgenerator('obsmarkers')
def _pushb2obsmarkers(pushop, bundler):
if 'obsmarkers' in pushop.stepsdone:
return
remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
if obsolete.commonversion(remoteversions) is None:
return
pushop.stepsdone.add('obsmarkers')
if pushop.outobsmarkers:
Pierre-Yves David
obsolete: sort obsmarkers during exchange...
r25118 markers = sorted(pushop.outobsmarkers)
bundle2: move function building obsmarker-part in the bundle2 module...
r32515 bundle2.buildobsmarkerspart(bundler, markers)
Pierre-Yves David
push: use bundle2 to push obsmarkers when possible
r22347
Pierre-Yves David
push: add bookmarks to the unified bundle2 push...
r22242 @b2partsgenerator('bookmarks')
def _pushb2bookmarks(pushop, bundler):
Martin von Zweigbergk
exchange: s/phase/bookmark/ in _pushb2bookmarks()
r25895 """handle bookmark push through bundle2"""
Pierre-Yves David
push: add bookmarks to the unified bundle2 push...
r22242 if 'bookmarks' in pushop.stepsdone:
return
b2caps = bundle2.bundle2caps(pushop.remote)
Boris Feld
bookmark: use the 'bookmarks' bundle2 part to push bookmark update (issue5165)...
r35265
legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
legacybooks = 'bookmarks' in legacy
if not legacybooks and 'bookmarks' in b2caps:
return _pushb2bookmarkspart(pushop, bundler)
elif 'pushkey' in b2caps:
Boris Feld
push: move bundle2-pushkey based bookmarks exchange in its own function...
r35263 return _pushb2bookmarkspushkey(pushop, bundler)
Boris Feld
bookmark: use the 'bookmarks' bundle2 part to push bookmark update (issue5165)...
r35265 def _bmaction(old, new):
"""small utility for bookmark pushing"""
if not old:
return 'export'
elif not new:
return 'delete'
return 'update'
def _pushb2bookmarkspart(pushop, bundler):
pushop.stepsdone.add('bookmarks')
if not pushop.outbookmarks:
return
allactions = []
data = []
for book, old, new in pushop.outbookmarks:
new = bin(new)
data.append((book, new))
allactions.append((book, _bmaction(old, new)))
checkdata = bookmod.binaryencode(data)
bundler.newpart('bookmarks', data=checkdata)
def handlereply(op):
ui = pushop.ui
# if success
for book, action in allactions:
ui.status(bookmsgmap[action][0] % book)
return handlereply
Boris Feld
push: move bundle2-pushkey based bookmarks exchange in its own function...
r35263 def _pushb2bookmarkspushkey(pushop, bundler):
Pierre-Yves David
push: add bookmarks to the unified bundle2 push...
r22242 pushop.stepsdone.add('bookmarks')
part2book = []
enc = pushkey.encode
Pierre-Yves David
bookmarks: abort the whole push if bookmarks fails to update (BC)...
r25501
def handlefailure(pushop, exc):
targetid = int(exc.partid)
for partid, book, action in part2book:
if partid == targetid:
raise error.Abort(bookmsgmap[action][1].rstrip() % book)
# we should not be called for part we did not generated
assert False
Pierre-Yves David
push: add bookmarks to the unified bundle2 push...
r22242 for book, old, new in pushop.outbookmarks:
Pierre-Yves David
bookmarks: abort the whole push if bookmarks fails to update (BC)...
r25501 part = bundler.newpart('pushkey')
Pierre-Yves David
push: add bookmarks to the unified bundle2 push...
r22242 part.addparam('namespace', enc('bookmarks'))
part.addparam('key', enc(book))
part.addparam('old', enc(old))
part.addparam('new', enc(new))
Pierre-Yves David
push: prepare the issue of multiple kinds of messages...
r22650 action = 'update'
if not old:
action = 'export'
elif not new:
action = 'delete'
part2book.append((part.id, book, action))
Pierre-Yves David
bookmarks: abort the whole push if bookmarks fails to update (BC)...
r25501 pushop.pkfailcb[part.id] = handlefailure
Pierre-Yves David
push: prepare the issue of multiple kinds of messages...
r22650
Pierre-Yves David
push: add bookmarks to the unified bundle2 push...
r22242 def handlereply(op):
Pierre-Yves David
push: prepare the issue of multiple kinds of messages...
r22650 ui = pushop.ui
for partid, book, action in part2book:
Pierre-Yves David
push: add bookmarks to the unified bundle2 push...
r22242 partrep = op.records.getreplies(partid)
results = partrep['pushkey']
assert len(results) <= 1
if not results:
pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
else:
ret = int(results[0]['return'])
if ret:
Pierre-Yves David
push: prepare the issue of multiple kinds of messages...
r22650 ui.status(bookmsgmap[action][0] % book)
Pierre-Yves David
push: add bookmarks to the unified bundle2 push...
r22242 else:
Pierre-Yves David
push: prepare the issue of multiple kinds of messages...
r22650 ui.warn(bookmsgmap[action][1] % book)
Pierre-Yves David
push: set bkresult when pushing bookmarks through bundle2
r22649 if pushop.bkresult is not None:
pushop.bkresult = 1
Pierre-Yves David
push: add bookmarks to the unified bundle2 push...
r22242 return handlereply
Pulkit Goyal
pushvars: move fb extension pushvars to core...
r33656 @b2partsgenerator('pushvars', idx=0)
def _getbundlesendvars(pushop, bundler):
'''send shellvars via bundle2'''
Jun Wu
pushvars: do not mangle repo state...
r33886 pushvars = pushop.pushvars
if pushvars:
shellvars = {}
for raw in pushvars:
if '=' not in raw:
msg = ("unable to parse variable '%s', should follow "
"'KEY=VALUE' or 'KEY=' format")
raise error.Abort(msg % raw)
k, v = raw.split('=', 1)
shellvars[k] = v
Pulkit Goyal
pushvars: move fb extension pushvars to core...
r33656 part = bundler.newpart('pushvars')
Jun Wu
pushvars: do not mangle repo state...
r33886 for key, value in shellvars.iteritems():
Pulkit Goyal
pushvars: move fb extension pushvars to core...
r33656 part.addparam(key, value, mandatory=False)
Pierre-Yves David
push: add bookmarks to the unified bundle2 push...
r22242
Pierre-Yves David
bundle2: allow using bundle2 for push...
r21061 def _pushbundle2(pushop):
"""push data to the remote using bundle2
The only currently supported type of data is changegroup but this will
evolve in the future."""
Pierre-Yves David
bundle2: introduce a bundle2caps function...
r21644 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
Eric Sumner
bundle2-push: provide transaction to reply unbundler...
r23439 pushback = (pushop.trmanager
and pushop.ui.configbool('experimental', 'bundle2.pushback'))
Pierre-Yves David
bundle2: include client capabilities in the pushed bundle...
r21142 # create reply capability
Eric Sumner
bundle2-push: provide transaction to reply unbundler...
r23439 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
Gregory Szorc
bundle2: specify what capabilities will be used for...
r35801 allowpushback=pushback,
role='client'))
Pierre-Yves David
bundle2: rename format, parts and config to final names...
r24686 bundler.newpart('replycaps', data=capsblob)
Pierre-Yves David
bundle2-push: introduce a list of part generating functions...
r21904 replyhandlers = []
Pierre-Yves David
push: rework the bundle2partsgenerators logic...
r22017 for partgenname in b2partsgenorder:
partgen = b2partsgenmapping[partgenname]
Pierre-Yves David
bundle2-push: introduce a list of part generating functions...
r21904 ret = partgen(pushop, bundler)
Pierre-Yves David
bundle2: only use callable return as reply handler...
r21941 if callable(ret):
replyhandlers.append(ret)
Pierre-Yves David
bundle2-push: introduce a list of part generating functions...
r21904 # do not push if nothing to push
Pierre-Yves David
bundle2-push: move changegroup push validation inside _pushb2ctx...
r21903 if bundler.nbparts <= 1:
return
Pierre-Yves David
bundle2: allow using bundle2 for push...
r21061 stream = util.chunkbuffer(bundler.getchunks())
Pierre-Yves David
bundle2: catch UnknownPartError during local push...
r21182 try:
Pierre-Yves David
push: catch and process PushkeyFailed error...
r25485 try:
Gregory Szorc
wireproto: use command executor for unbundle...
r37664 with pushop.remote.commandexecutor() as e:
reply = e.callcommand('unbundle', {
'bundle': stream,
'heads': ['force'],
'url': pushop.remote.url(),
}).result()
Gregory Szorc
global: mass rewrite to use modern exception syntax...
r25660 except error.BundleValueError as exc:
liscju
i18n: translate abort messages...
r29389 raise error.Abort(_('missing support for %s') % exc)
Pierre-Yves David
push: catch and process PushkeyFailed error...
r25485 try:
trgetter = None
if pushback:
trgetter = pushop.trmanager.transaction
op = bundle2.processbundle(pushop.repo, reply, trgetter)
Gregory Szorc
global: mass rewrite to use modern exception syntax...
r25660 except error.BundleValueError as exc:
liscju
i18n: translate abort messages...
r29389 raise error.Abort(_('missing support for %s') % exc)
Gregory Szorc
bundle2: attribute remote failures to remote (issue4788)...
r26829 except bundle2.AbortFromPart as exc:
pushop.ui.status(_('remote: %s\n') % exc)
Pierre-Yves David
bundle2: keep hint close to the primary message when remote abort...
r30908 if exc.hint is not None:
pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
raise error.Abort(_('push failed on remote'))
Gregory Szorc
global: mass rewrite to use modern exception syntax...
r25660 except error.PushkeyFailed as exc:
Pierre-Yves David
push: catch and process PushkeyFailed error...
r25485 partid = int(exc.partid)
if partid not in pushop.pkfailcb:
raise
pushop.pkfailcb[partid](pushop, exc)
Pierre-Yves David
bundle2-push: introduce a list of part generating functions...
r21904 for rephand in replyhandlers:
rephand(op)
Pierre-Yves David
bundle2: allow using bundle2 for push...
r21061
Pierre-Yves David
push: move changeset push logic in its own function...
r20463 def _pushchangeset(pushop):
"""Make the actual push of changeset bundle to remote repo"""
Pierre-Yves David
push: use `stepsdone` to control changegroup push through bundle10 or bundle20...
r21902 if 'changesets' in pushop.stepsdone:
return
pushop.stepsdone.add('changesets')
Pierre-Yves David
bundle2-push: move changegroup push validation inside _pushb2ctx...
r21903 if not _pushcheckoutgoing(pushop):
return
Gregory Szorc
exchange: drop support for lock-based unbundling (BC)...
r33667
# Should have verified this in push().
assert pushop.remote.capable('unbundle')
Mads Kiilerich
localrepo: refactor prepushoutgoinghook to take a pushop...
r28876 pushop.repo.prepushoutgoinghooks(pushop)
Pierre-Yves David
push: move changeset push logic in its own function...
r20463 outgoing = pushop.outgoing
# TODO: get bundlecaps from remote
bundlecaps = None
# create a changegroup from local
if pushop.revs is None and not (outgoing.excluded
or pushop.repo.changelog.filteredrevs):
# push everything,
# use the fast path, no race possible on push
Durham Goode
changegroup: replace getsubset with makechangegroup...
r34098 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
fastpath=True, bundlecaps=bundlecaps)
Pierre-Yves David
push: move changeset push logic in its own function...
r20463 else:
Durham Goode
changegroup: replace getchangegroup with makechangegroup...
r34103 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
'push', bundlecaps=bundlecaps)
Pierre-Yves David
push: move changeset push logic in its own function...
r20463
# apply changegroup to remote
Gregory Szorc
exchange: drop support for lock-based unbundling (BC)...
r33667 # local repo finds heads on server, finds out what
# revs it must push. once revs transferred, if server
# finds it has different heads (someone else won
# commit/push race), server aborts.
if pushop.force:
remoteheads = ['force']
Pierre-Yves David
push: move changeset push logic in its own function...
r20463 else:
Gregory Szorc
exchange: drop support for lock-based unbundling (BC)...
r33667 remoteheads = pushop.remoteheads
# ssh: return remote's addchangegroup()
# http: return remote's addchangegroup() or 0 for error
pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
pushop.repo.url())
Pierre-Yves David
push: move changeset push logic in its own function...
r20463
Pierre-Yves David
push: move phases synchronisation function in its own function...
r20441 def _pushsyncphase(pushop):
Mads Kiilerich
spelling: fixes from spell checker
r21024 """synchronise phase information locally and remotely"""
Pierre-Yves David
push: extract new common set computation from phase synchronisation...
r20468 cheads = pushop.commonheads
Pierre-Yves David
push: move phases synchronisation function in its own function...
r20441 # even when we don't push, exchanging phase data is useful
Gregory Szorc
exchange: use command executor interface for calling listkeys...
r37775 remotephases = listkeys(pushop.remote, 'phases')
Jun Wu
codemod: register core configitems using a script...
r33499 if (pushop.ui.configbool('ui', '_usedassubrepo')
Pierre-Yves David
push: move phases synchronisation function in its own function...
r20441 and remotephases # server supports phases
Pierre-Yves David
push: rename `pushop.ret` to `pushop.cgresult`...
r22615 and pushop.cgresult is None # nothing was pushed
Pierre-Yves David
push: move phases synchronisation function in its own function...
r20441 and remotephases.get('publishing', False)):
# When:
# - this is a subrepo push
# - and remote support phase
# - and no changeset was pushed
# - and remote is publishing
# We may be in issue 3871 case!
# We drop the possible phase synchronisation done by
# courtesy to publish changesets possibly locally draft
# on the remote.
remotephases = {'publishing': 'True'}
Pierre-Yves David
exchange: restore truncated comment...
r21012 if not remotephases: # old server or public only reply from non-publishing
Pierre-Yves David
push: move phases synchronisation function in its own function...
r20441 _localphasemove(pushop, cheads)
# don't push any phase data as there is nothing to push
else:
ana = phases.analyzeremotephases(pushop.repo, cheads,
remotephases)
pheads, droots = ana
### Apply remote phase on local
if remotephases.get('publishing', False):
_localphasemove(pushop, cheads)
else: # publish = False
_localphasemove(pushop, pheads)
_localphasemove(pushop, cheads, phases.draft)
### Apply local phase on remote
Pierre-Yves David
push: rename `pushop.ret` to `pushop.cgresult`...
r22615 if pushop.cgresult:
Pierre-Yves David
push: include phase push in the unified bundle2 push...
r22020 if 'phases' in pushop.stepsdone:
# phases already pushed though bundle2
return
Pierre-Yves David
push: perform phases discovery before the push...
r22019 outdated = pushop.outdatedphases
else:
outdated = pushop.fallbackoutdatedphases
Pierre-Yves David
push: include phase push in the unified bundle2 push...
r22020 pushop.stepsdone.add('phases')
Pierre-Yves David
push: perform phases discovery before the push...
r22019 # filter heads already turned public by the push
outdated = [c for c in outdated if c.node() not in pheads]
Pierre-Yves David
push: stop independent usage of bundle2 in syncphase (issue4454)...
r23376 # fallback to independent pushkey command
for newremotehead in outdated:
Gregory Szorc
exchange: use command executor for pushkey...
r37665 with pushop.remote.commandexecutor() as e:
r = e.callcommand('pushkey', {
'namespace': 'phases',
'key': newremotehead.hex(),
'old': '%d' % phases.draft,
'new': '%d' % phases.public
}).result()
Pierre-Yves David
push: stop independent usage of bundle2 in syncphase (issue4454)...
r23376 if not r:
pushop.ui.warn(_('updating %s to public failed!\n')
% newremotehead)
Pierre-Yves David
push: move phases synchronisation function in its own function...
r20441
Pierre-Yves David
push: move local phase move in a normal function...
r20438 def _localphasemove(pushop, nodes, phase=phases.public):
"""move <nodes> to <phase> in the local source repo"""
Eric Sumner
push: elevate phase transaction to cover entire operation...
r23437 if pushop.trmanager:
phases.advanceboundary(pushop.repo,
pushop.trmanager.transaction(),
phase,
nodes)
Pierre-Yves David
push: move local phase move in a normal function...
r20438 else:
# repo is not locked, do not change any phases!
# Informs the user that phases should have been moved when
# applicable.
actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
phasestr = phases.phasenames[phase]
if actualmoves:
pushop.ui.status(_('cannot lock source repo, skipping '
'local %s phase update\n') % phasestr)
Pierre-Yves David
push: feed pushoperation object to _pushobsolete function...
r20433 def _pushobsolete(pushop):
Pierre-Yves David
push: drop now outdated comment...
r20434 """utility function to push obsolete markers to a remote"""
Pierre-Yves David
push: use stepsdone for obsmarkers push...
r22036 if 'obsmarkers' in pushop.stepsdone:
return
Pierre-Yves David
push: feed pushoperation object to _pushobsolete function...
r20433 repo = pushop.repo
remote = pushop.remote
Pierre-Yves David
push: use stepsdone for obsmarkers push...
r22036 pushop.stepsdone.add('obsmarkers')
Pierre-Yves David
push: only push obsmarkers relevant to the "pushed subset"...
r22350 if pushop.outobsmarkers:
Pierre-Yves David
push: only say we are trying to push obsmarkers when we actually try...
r25559 pushop.ui.debug('try to push obsolete markers to remote\n')
Pierre-Yves David
push: move obsolescence marker exchange in the exchange module...
r20432 rslts = []
Pierre-Yves David
obsolete: sort obsmarkers during exchange...
r25118 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
Pierre-Yves David
push: move obsolescence marker exchange in the exchange module...
r20432 for key in sorted(remotedata, reverse=True):
# reverse sort to ensure we end with dump0
data = remotedata[key]
rslts.append(remote.pushkey('obsolete', key, '', data))
if [r for r in rslts if not r]:
msg = _('failed to push some obsolete markers!\n')
repo.ui.warn(msg)
Pierre-Yves David
push: feed pushoperation object to _pushbookmark function...
r20431 def _pushbookmark(pushop):
Pierre-Yves David
push: move bookmarks exchange in the exchange module...
r20352 """Update bookmark position on remote"""
Pierre-Yves David
push: rename `pushop.ret` to `pushop.cgresult`...
r22615 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
Pierre-Yves David
pushbookmark: do not attempt to update bookmarks if the push failed (BC)...
r22228 return
Pierre-Yves David
push: use stepsdone to control bookmark push...
r22240 pushop.stepsdone.add('bookmarks')
Pierre-Yves David
push: feed pushoperation object to _pushbookmark function...
r20431 ui = pushop.ui
remote = pushop.remote
Pierre-Yves David
push: prepare the issue of multiple kinds of messages...
r22650
Pierre-Yves David
push: move bookmark discovery with other discovery steps...
r22239 for b, old, new in pushop.outbookmarks:
Pierre-Yves David
push: prepare the issue of multiple kinds of messages...
r22650 action = 'update'
if not old:
action = 'export'
elif not new:
action = 'delete'
Gregory Szorc
exchange: use command executor for pushkey...
r37665
with remote.commandexecutor() as e:
r = e.callcommand('pushkey', {
'namespace': 'bookmarks',
'key': b,
'old': old,
'new': new,
}).result()
if r:
Pierre-Yves David
push: prepare the issue of multiple kinds of messages...
r22650 ui.status(bookmsgmap[action][0] % b)
Pierre-Yves David
push: move bookmarks exchange in the exchange module...
r20352 else:
Pierre-Yves David
push: prepare the issue of multiple kinds of messages...
r22650 ui.warn(bookmsgmap[action][1] % b)
# discovery can have set the value form invalid entry
if pushop.bkresult is not None:
pushop.bkresult = 1
Pierre-Yves David
exchange: extract pull function from localrepo...
r20469
Pierre-Yves David
pull: introduce a pulloperation object...
r20472 class pulloperation(object):
"""A object that represent a single pull operation
Mike Edgar
exchange: swap "push" for "pull" in pulloperation docstring
r23219 It purpose is to carry pull related state and very common operation.
Pierre-Yves David
pull: introduce a pulloperation object...
r20472
Mads Kiilerich
spelling: fixes from spell checker
r21024 A new should be created at the beginning of each pull and discarded
Pierre-Yves David
pull: introduce a pulloperation object...
r20472 afterward.
"""
Pierre-Yves David
pull: prevent race condition in bookmark update when using -B (issue4689)...
r25446 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
Gregory Szorc
exchange: support defining narrow file patterns for pull...
r39589 remotebookmarks=None, streamclonerequested=None,
Gregory Szorc
exchange: support declaring pull depth...
r40367 includepats=None, excludepats=None, depth=None):
Siddharth Agarwal
exchange: fix docs for pulloperation...
r20596 # repo we pull into
Pierre-Yves David
pull: introduce a pulloperation object...
r20472 self.repo = repo
Siddharth Agarwal
exchange: fix docs for pulloperation...
r20596 # repo we pull from
Pierre-Yves David
pull: move `remote` argument into pull object...
r20473 self.remote = remote
Pierre-Yves David
pull: move `heads` argument into pull object...
r20474 # revision we try to pull (None is "all")
self.heads = heads
Pierre-Yves David
pull: move bookmark pulling into its own function...
r22654 # bookmark pulled explicitly
liscju
bookmarks: add 'hg pull -B .' for pulling the active bookmark (issue5258)
r29376 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
for bookmark in bookmarks]
Pierre-Yves David
pull: move `force` argument into pull object...
r20475 # do we force pull?
self.force = force
Gregory Szorc
exchange: teach pull about requested stream clones...
r26448 # whether a streaming clone was requested
self.streamclonerequested = streamclonerequested
Eric Sumner
pull: extract transaction logic into separate object...
r23436 # transaction manager
self.trmanager = None
Pierre-Yves David
pull: make pulled subset a propertycache of the pull object...
r20487 # set of common changeset between local and remote before pull
self.common = None
# set of pulled head
self.rheads = None
Mads Kiilerich
spelling: fixes from spell checker
r21024 # list of missing changeset to fetch remotely
Pierre-Yves David
pull: move `fetch` subset into the object...
r20488 self.fetch = None
Pierre-Yves David
pull: move bookmark pulling into its own function...
r22654 # remote bookmarks data
Pierre-Yves David
pull: prevent race condition in bookmark update when using -B (issue4689)...
r25446 self.remotebookmarks = remotebookmarks
Mads Kiilerich
spelling: fixes from spell checker
r21024 # result of changegroup pulling (used as return code by pull)
Pierre-Yves David
pull: move return code in the pull operation object...
r20898 self.cgresult = None
Pierre-Yves David
pull: use `stepsdone` instead of `todosteps`...
r22937 # list of step already done
self.stepsdone = set()
Gregory Szorc
exchange: record that we attempted to fetch a clone bundle...
r26689 # Whether we attempted a clone from pre-generated bundles.
self.clonebundleattempted = False
Gregory Szorc
exchange: support defining narrow file patterns for pull...
r39589 # Set of file patterns to include.
self.includepats = includepats
# Set of file patterns to exclude.
self.excludepats = excludepats
Gregory Szorc
exchange: support declaring pull depth...
r40367 # Number of ancestor changesets to pull from each pulled head.
self.depth = depth
Pierre-Yves David
pull: make pulled subset a propertycache of the pull object...
r20487
@util.propertycache
def pulledsubset(self):
"""heads of the set of changeset target by the pull"""
# compute target subset
if self.heads is None:
# We pulled every thing possible
# sync on everything common
Pierre-Yves David
pull: prevent duplicated entry in `op.pulledsubset`...
r20878 c = set(self.common)
ret = list(self.common)
for n in self.rheads:
if n not in c:
ret.append(n)
return ret
Pierre-Yves David
pull: make pulled subset a propertycache of the pull object...
r20487 else:
# We pulled a specific subset
# sync on this subset
return self.heads
Pierre-Yves David
pull: move transaction logic into the pull object...
r20477
Gregory Szorc
exchange: expose bundle2 capabilities on pulloperation...
r26464 @util.propertycache
Gregory Szorc
exchange: expose bundle2 availability on pulloperation...
r26465 def canusebundle2(self):
Pierre-Yves David
bundle2: rename the _canusebundle2 method to _forcebundle1...
r29682 return not _forcebundle1(self)
Gregory Szorc
exchange: expose bundle2 availability on pulloperation...
r26465
@util.propertycache
Gregory Szorc
exchange: expose bundle2 capabilities on pulloperation...
r26464 def remotebundle2caps(self):
return bundle2.bundle2caps(self.remote)
Pierre-Yves David
pull: move transaction logic into the pull object...
r20477 def gettransaction(self):
Eric Sumner
pull: extract transaction logic into separate object...
r23436 # deprecated; talk to trmanager directly
return self.trmanager.transaction()
Martin von Zweigbergk
util: add base class for transactional context managers...
r33790 class transactionmanager(util.transactional):
Mads Kiilerich
spelling: fixes from proofreading of spell checker issues
r23543 """An object to manage the life cycle of a transaction
Eric Sumner
pull: extract transaction logic into separate object...
r23436
It creates the transaction on demand and calls the appropriate hooks when
closing the transaction."""
def __init__(self, repo, source, url):
self.repo = repo
self.source = source
self.url = url
self._tr = None
def transaction(self):
"""Return an open transaction object, constructing if necessary"""
if not self._tr:
trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
self._tr = self.repo.transaction(trname)
self._tr.hookargs['source'] = self.source
self._tr.hookargs['url'] = self.url
Pierre-Yves David
pull: move transaction logic into the pull object...
r20477 return self._tr
Eric Sumner
pull: extract transaction logic into separate object...
r23436 def close(self):
Pierre-Yves David
pull: move transaction logic into the pull object...
r20477 """close transaction if created"""
if self._tr is not None:
Pierre-Yves David
exchange: use the postclose API on transaction...
r23222 self._tr.close()
Pierre-Yves David
pull: move transaction logic into the pull object...
r20477
Eric Sumner
pull: extract transaction logic into separate object...
r23436 def release(self):
Pierre-Yves David
pull: move transaction logic into the pull object...
r20477 """release transaction if created"""
if self._tr is not None:
self._tr.release()
Pierre-Yves David
exchange: extract pull function from localrepo...
r20469
Gregory Szorc
exchange: use command executor interface for calling listkeys...
r37775 def listkeys(remote, namespace):
with remote.commandexecutor() as e:
return e.callcommand('listkeys', {'namespace': namespace}).result()
Joerg Sonnenberger
wireproto: support for pullbundles...
r37516 def _fullpullbundle2(repo, pullop):
# The server may send a partial reply, i.e. when inlining
# pre-computed bundles. In that case, update the common
# set based on the results and pull another bundle.
#
# There are two indicators that the process is finished:
# - no changeset has been added, or
# - all remote heads are known locally.
# The head check must use the unfiltered view as obsoletion
# markers can hide heads.
unfi = repo.unfiltered()
unficl = unfi.changelog
def headsofdiff(h1, h2):
"""Returns heads(h1 % h2)"""
res = unfi.set('heads(%ln %% %ln)', h1, h2)
return set(ctx.node() for ctx in res)
def headsofunion(h1, h2):
"""Returns heads((h1 + h2) - null)"""
res = unfi.set('heads((%ln + %ln - null))', h1, h2)
return set(ctx.node() for ctx in res)
while True:
old_heads = unficl.heads()
clstart = len(unficl)
_pullbundle2(pullop)
Martin von Zweigbergk
narrow: move requirement constant from changegroup to repository...
r38871 if repository.NARROW_REQUIREMENT in repo.requirements:
Joerg Sonnenberger
wireproto: support for pullbundles...
r37516 # XXX narrow clones filter the heads on the server side during
# XXX getbundle and result in partial replies as well.
# XXX Disable pull bundles in this case as band aid to avoid
# XXX extra round trips.
break
if clstart == len(unficl):
break
if all(unficl.hasnode(n) for n in pullop.rheads):
break
new_heads = headsofdiff(unficl.heads(), old_heads)
pullop.common = headsofunion(new_heads, pullop.common)
pullop.rheads = set(pullop.rheads) - pullop.common
Gregory Szorc
exchange: teach pull about requested stream clones...
r26448 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
Gregory Szorc
exchange: support declaring pull depth...
r40367 streamclonerequested=None, includepats=None, excludepats=None,
depth=None):
Gregory Szorc
exchange: add docstring to pull()...
r26440 """Fetch repository data from a remote.
This is the main function used to retrieve data from a remote repository.
``repo`` is the local repository to clone into.
``remote`` is a peer instance.
``heads`` is an iterable of revisions we want to pull. ``None`` (the
default) means to pull everything from the remote.
``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
default, all remote bookmarks are pulled.
``opargs`` are additional keyword arguments to pass to ``pulloperation``
initialization.
Gregory Szorc
exchange: teach pull about requested stream clones...
r26448 ``streamclonerequested`` is a boolean indicating whether a "streaming
clone" is requested. A "streaming clone" is essentially a raw file copy
of revlogs from the server. This only works when the local repository is
empty. The default value of ``None`` means to respect the server
configuration for preferring stream clones.
Gregory Szorc
exchange: support defining narrow file patterns for pull...
r39589 ``includepats`` and ``excludepats`` define explicit file patterns to
include and exclude in storage, respectively. If not defined, narrow
patterns from the repo instance are used, if available.
Gregory Szorc
exchange: support declaring pull depth...
r40367 ``depth`` is an integer indicating the DAG depth of history we're
interested in. If defined, for each revision specified in ``heads``, we
will fetch up to this many of its ancestors and data associated with them.
Gregory Szorc
exchange: add docstring to pull()...
r26440
Returns the ``pulloperation`` created for this pull.
"""
Pierre-Yves David
pull: allow a generic way to pass parameters to the pull operation...
r25445 if opargs is None:
opargs = {}
Gregory Szorc
exchange: support defining narrow file patterns for pull...
r39589
# We allow the narrow patterns to be passed in explicitly to provide more
# flexibility for API consumers.
if includepats or excludepats:
includepats = includepats or set()
excludepats = excludepats or set()
else:
includepats, excludepats = repo.narrowpats
narrowspec.validatepatterns(includepats)
narrowspec.validatepatterns(excludepats)
Pierre-Yves David
pull: allow a generic way to pass parameters to the pull operation...
r25445 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
Pulkit Goyal
py3: handle keyword arguments correctly in exchange.py...
r35356 streamclonerequested=streamclonerequested,
Gregory Szorc
exchange: support defining narrow file patterns for pull...
r39589 includepats=includepats, excludepats=excludepats,
Gregory Szorc
exchange: support declaring pull depth...
r40367 depth=depth,
Pulkit Goyal
py3: handle keyword arguments correctly in exchange.py...
r35356 **pycompat.strkwargs(opargs))
Gregory Szorc
exchange: access requirements on repo instead of peer...
r33668
peerlocal = pullop.remote.local()
if peerlocal:
missing = set(peerlocal.requirements) - pullop.repo.supported
Pierre-Yves David
exchange: extract pull function from localrepo...
r20469 if missing:
msg = _("required features are not"
" supported in the destination:"
" %s") % (', '.join(sorted(missing)))
Pierre-Yves David
error: get Abort from 'error' instead of 'util'...
r26587 raise error.Abort(msg)
Pierre-Yves David
exchange: extract pull function from localrepo...
r20469
Martin von Zweigbergk
exchange: use context manager for locks and transaction in unbundle()...
r35596 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
with repo.wlock(), repo.lock(), pullop.trmanager:
Gregory Szorc
exchangev2: start to implement pull with wire protocol v2...
r39665 # Use the modern wire protocol, if available.
Gregory Szorc
wireprotov2: define and implement "changesetdata" command...
r39666 if remote.capable('command-changesetdata'):
Gregory Szorc
exchangev2: start to implement pull with wire protocol v2...
r39665 exchangev2.pull(pullop)
else:
# This should ideally be in _pullbundle2(). However, it needs to run
# before discovery to avoid extra work.
_maybeapplyclonebundle(pullop)
streamclone.maybeperformlegacystreamclone(pullop)
_pulldiscovery(pullop)
if pullop.canusebundle2:
_fullpullbundle2(repo, pullop)
_pullchangeset(pullop)
_pullphase(pullop)
_pullbookmarks(pullop)
_pullobsolete(pullop)
Pierre-Yves David
exchange: extract pull function from localrepo...
r20469
Pulkit Goyal
remotenames: move function to pull remotenames from the remoterepo to core...
r35236 # storing remotenames
if repo.ui.configbool('experimental', 'remotenames'):
Pulkit Goyal
remotenames: rename related file and storage dir to logexchange...
r35348 logexchange.pullremotenames(repo, remote)
Pulkit Goyal
remotenames: move function to pull remotenames from the remoterepo to core...
r35236
Pierre-Yves David
exchange: have `pull` return the pulloperation object...
r22693 return pullop
Pierre-Yves David
pull: move obsolescence marker exchange in the exchange module...
r20476
Pierre-Yves David
pull: make discovery phase extensible...
r22936 # list of steps to perform discovery before pull
pulldiscoveryorder = []
# Mapping between step name and function
#
# This exists to help extensions wrap steps if necessary
pulldiscoverymapping = {}
def pulldiscovery(stepname):
"""decorator for function performing discovery before pull
The function is added to the step -> function mapping and appended to the
list of steps. Beware that decorated function will be added in order (this
may matter).
You can only use this decorator for a new step, if you want to wrap a step
from an extension, change the pulldiscovery dictionary directly."""
def dec(func):
assert stepname not in pulldiscoverymapping
pulldiscoverymapping[stepname] = func
pulldiscoveryorder.append(stepname)
return func
return dec
Pierre-Yves David
pull: put discovery step in its own function...
r20900 def _pulldiscovery(pullop):
Pierre-Yves David
pull: make discovery phase extensible...
r22936 """Run all discovery steps"""
for stepname in pulldiscoveryorder:
step = pulldiscoverymapping[stepname]
step(pullop)
Pierre-Yves David
pull: only prefetch bookmarks when using bundle1...
r25369 @pulldiscovery('b1:bookmarks')
def _pullbookmarkbundle1(pullop):
"""fetch bookmark data in bundle1 case
If not using bundle2, we have to fetch bookmarks before changeset
discovery to reduce the chance and impact of race conditions."""
Pierre-Yves David
pull: skip pulling remote bookmarks with bundle1 if a value already exist...
r25443 if pullop.remotebookmarks is not None:
return
Gregory Szorc
exchange: expose bundle2 availability on pulloperation...
r26465 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
Pierre-Yves David
bundle2: pull bookmark the old way if no bundle2 listkeys support (issue4701)...
r25479 # all known bundle2 servers now support listkeys, but lets be nice with
# new implementation.
return
Gregory Szorc
exchange: use command executor interface for calling listkeys...
r37775 books = listkeys(pullop.remote, 'bookmarks')
Boris Feld
pull: store binary node in pullop.remotebookmarks...
r35030 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
Pierre-Yves David
pull: only prefetch bookmarks when using bundle1...
r25369
Pierre-Yves David
pull: make discovery phase extensible...
r22936 @pulldiscovery('changegroup')
def _pulldiscoverychangegroup(pullop):
Pierre-Yves David
pull: put discovery step in its own function...
r20900 """discovery phase for the pull
Current handle changeset discovery only, will change handle all discovery
at some point."""
Pierre-Yves David
discovery: run discovery on filtered repository...
r23848 tmp = discovery.findcommonincoming(pullop.repo,
Pierre-Yves David
pull: put discovery step in its own function...
r20900 pullop.remote,
heads=pullop.heads,
force=pullop.force)
Pierre-Yves David
discovery: run discovery on filtered repository...
r23848 common, fetch, rheads = tmp
nm = pullop.repo.unfiltered().changelog.nodemap
if fetch and rheads:
Boris Feld
discovery: avoid dropping remote heads hidden locally...
r34318 # If a remote heads is filtered locally, put in back in common.
Pierre-Yves David
discovery: run discovery on filtered repository...
r23848 #
# This is a hackish solution to catch most of "common but locally
# hidden situation". We do not performs discovery on unfiltered
# repository because it end up doing a pathological amount of round
# trip for w huge amount of changeset we do not care about.
#
# If a set of such "common but filtered" changeset exist on the server
# but are not including a remote heads, we'll not be able to detect it,
scommon = set(common)
for n in rheads:
Pierre-Yves David
discovery: properly exclude locally known but filtered heads...
r23975 if n in nm:
if n not in scommon:
common.append(n)
Boris Feld
discovery: avoid dropping remote heads hidden locally...
r34318 if set(rheads).issubset(set(common)):
Pierre-Yves David
discovery: run discovery on filtered repository...
r23848 fetch = []
pullop.common = common
pullop.fetch = fetch
pullop.rheads = rheads
Pierre-Yves David
pull: put discovery step in its own function...
r20900
Pierre-Yves David
bundle2: allow pulling changegroups using bundle2...
r20955 def _pullbundle2(pullop):
"""pull data using bundle2
For now, the only supported data are changegroup."""
Gregory Szorc
bundle2: specify what capabilities will be used for...
r35801 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
Gregory Szorc
exchange: add "streaming all changes" to bundle2 pulling...
r26471
Boris Feld
pull: reorganize bundle2 argument bundling...
r35779 # make ui easier to access
ui = pullop.repo.ui
Siddharth Agarwal
bundle2: don't check for whether we can do stream clones...
r32257 # At the moment we don't do stream clones over bundle2. If that is
# implemented then here's where the check for that will go.
Boris Feld
streamclone: add support for bundle2 based stream clone...
r35781 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
Gregory Szorc
exchange: add "streaming all changes" to bundle2 pulling...
r26471
Boris Feld
pull: reorganize bundle2 argument bundling...
r35779 # declare pull perimeters
kwargs['common'] = pullop.common
kwargs['heads'] = pullop.heads or pullop.rheads
Pulkit Goyal
exchange: pass includepats and excludepats as arguments to getbundle()...
r40527 # check server supports narrow and then adding includepats and excludepats
servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
if servernarrow and pullop.includepats:
kwargs['includepats'] = pullop.includepats
if servernarrow and pullop.excludepats:
kwargs['excludepats'] = pullop.excludepats
Boris Feld
streamclone: add support for bundle2 based stream clone...
r35781 if streaming:
kwargs['cg'] = False
kwargs['stream'] = True
pullop.stepsdone.add('changegroup')
Boris Feld
streamclone: add support for cloning non append-only file...
r35783 pullop.stepsdone.add('phases')
Boris Feld
streamclone: add support for bundle2 based stream clone...
r35781
else:
Boris Feld
pull: preindent some code...
r35780 # pulling changegroup
pullop.stepsdone.add('changegroup')
Durham Goode
bundle2: fix bundle2 pulling all revs on empty pulls...
r21259
Boris Feld
pull: preindent some code...
r35780 kwargs['cg'] = pullop.fetch
Boris Feld
pull: use 'phase-heads' to retrieve phase information...
r34323
Boris Feld
streamclone: add support for cloning non append-only file...
r35783 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
if (not legacyphase and hasbinaryphase):
kwargs['phases'] = True
pullop.stepsdone.add('phases')
Boris Feld
pull: use 'phase-heads' to retrieve phase information...
r34323
Boris Feld
streamclone: add support for cloning non append-only file...
r35783 if 'listkeys' in pullop.remotebundle2caps:
if 'phases' not in pullop.stepsdone:
kwargs['listkeys'] = ['phases']
Boris Feld
pull: reorganize bundle2 argument bundling...
r35779
Boris Feld
pull: retrieve bookmarks through the binary part when possible...
r35269 bookmarksrequested = False
legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
if pullop.remotebookmarks is not None:
pullop.stepsdone.add('request-bookmarks')
if ('request-bookmarks' not in pullop.stepsdone
and pullop.remotebookmarks is None
and not legacybookmark and hasbinarybook):
kwargs['bookmarks'] = True
bookmarksrequested = True
Gregory Szorc
exchange: expose bundle2 capabilities on pulloperation...
r26464 if 'listkeys' in pullop.remotebundle2caps:
Boris Feld
pull: retrieve bookmarks through the binary part when possible...
r35269 if 'request-bookmarks' not in pullop.stepsdone:
Pierre-Yves David
pull: skip pulling remote bookmarks with bundle2 if a value already exists...
r25444 # make sure to always includes bookmark data when migrating
# `hg incoming --bundle` to using this function.
Boris Feld
pull: retrieve bookmarks through the binary part when possible...
r35269 pullop.stepsdone.add('request-bookmarks')
Boris Feld
pull: use 'phase-heads' to retrieve phase information...
r34323 kwargs.setdefault('listkeys', []).append('bookmarks')
Gregory Szorc
exchange: advertise if a clone bundle was attempted...
r26690
# If this is a full pull / clone and the server supports the clone bundles
# feature, tell the server whether we attempted a clone bundle. The
# presence of this flag indicates the client supports clone bundles. This
# will enable the server to treat clients that support clone bundles
# differently from those that don't.
if (pullop.remote.capable('clonebundles')
and pullop.heads is None and list(pullop.common) == [nullid]):
kwargs['cbattempted'] = pullop.clonebundleattempted
Gregory Szorc
exchange: add "streaming all changes" to bundle2 pulling...
r26471 if streaming:
pullop.repo.ui.status(_('streaming all changes\n'))
elif not pullop.fetch:
Pierre-Yves David
exchange: fix bad indentation...
r21258 pullop.repo.ui.status(_("no changes found\n"))
pullop.cgresult = 0
Pierre-Yves David
bundle2: allow pulling changegroups using bundle2...
r20955 else:
if pullop.heads is None and list(pullop.common) == [nullid]:
pullop.repo.ui.status(_("requesting all changes\n"))
Durham Goode
obsolete: add exchange option...
r22953 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
Gregory Szorc
exchange: expose bundle2 capabilities on pulloperation...
r26464 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
Pierre-Yves David
bundle2: pull obsmarkers relevant to the pulled set through bundle2...
r22354 if obsolete.commonversion(remoteversions) is not None:
kwargs['obsmarkers'] = True
Pierre-Yves David
pull: use `stepsdone` instead of `todosteps`...
r22937 pullop.stepsdone.add('obsmarkers')
Pierre-Yves David
bundle2: allow extensions to extend the getbundle request...
r21159 _pullbundle2extraprepare(pullop, kwargs)
Gregory Szorc
exchange: use command executor for getbundle...
r37666
with pullop.remote.commandexecutor() as e:
args = dict(kwargs)
args['source'] = 'pull'
bundle = e.callcommand('getbundle', args).result()
try:
op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
source='pull')
op.modes['bookmarks'] = 'records'
bundle2.processbundle(pullop.repo, bundle, op=op)
except bundle2.AbortFromPart as exc:
pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
raise error.Abort(_('pull failed on remote'), hint=exc.hint)
except error.BundleValueError as exc:
raise error.Abort(_('missing support for %s') % exc)
Durham Goode
bundle2: fix bundle2 pulling all revs on empty pulls...
r21259
if pullop.fetch:
Martin von Zweigbergk
bundle: make combinechangegroupresults() take a bundleoperation...
r33037 pullop.cgresult = bundle2.combinechangegroupresults(op)
Pierre-Yves David
bundle2: allow pulling changegroups using bundle2...
r20955
Pierre-Yves David
pull: when remote supports it, pull phase data alongside changesets...
r21658 # processing phases change
for namespace, value in op.records['listkeys']:
if namespace == 'phases':
_pullapplyphases(pullop, value)
Pierre-Yves David
pull: retrieve bookmarks through bundle2...
r22656 # processing bookmark update
Boris Feld
pull: retrieve bookmarks through the binary part when possible...
r35269 if bookmarksrequested:
books = {}
for record in op.records['bookmarks']:
books[record['bookmark']] = record["node"]
pullop.remotebookmarks = books
else:
for namespace, value in op.records['listkeys']:
if namespace == 'bookmarks':
pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
Pierre-Yves David
pull: skip pulling remote bookmarks with bundle2 if a value already exists...
r25444
# bookmark data were either already there or pulled in the bundle
if pullop.remotebookmarks is not None:
_pullbookmarks(pullop)
Pierre-Yves David
pull: retrieve bookmarks through bundle2...
r22656
Pierre-Yves David
bundle2: allow extensions to extend the getbundle request...
r21159 def _pullbundle2extraprepare(pullop, kwargs):
"""hook function so that extensions can extend the getbundle call"""
Pierre-Yves David
pull: move changeset pulling in its own function...
r20489 def _pullchangeset(pullop):
"""pull changeset from unbundle into the local repo"""
# We delay the open of the transaction as late as possible so we
# don't open transaction for nothing or you break future useful
# rollback call
Pierre-Yves David
pull: use `stepsdone` instead of `todosteps`...
r22937 if 'changegroup' in pullop.stepsdone:
Pierre-Yves David
pull: perform the todostep inside functions handling old way of pulling...
r22653 return
Pierre-Yves David
pull: use `stepsdone` instead of `todosteps`...
r22937 pullop.stepsdone.add('changegroup')
Pierre-Yves David
pull: move the cgresult logic in _pullchangeset...
r20899 if not pullop.fetch:
Mike Edgar
exchange: fix indentation in _pullchangeset
r23217 pullop.repo.ui.status(_("no changes found\n"))
pullop.cgresult = 0
return
Martin von Zweigbergk
changegroup: let callers pass in transaction to apply() (API)...
r32930 tr = pullop.gettransaction()
Pierre-Yves David
pull: move changeset pulling in its own function...
r20489 if pullop.heads is None and list(pullop.common) == [nullid]:
pullop.repo.ui.status(_("requesting all changes\n"))
elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
# issue1320, avoid a race if remote changed after discovery
pullop.heads = pullop.rheads
if pullop.remote.capable('getbundle'):
# TODO: get bundlecaps from remote
cg = pullop.remote.getbundle('pull', common=pullop.common,
heads=pullop.heads or pullop.rheads)
elif pullop.heads is None:
Gregory Szorc
wireproto: convert legacy commands to command executor...
r37653 with pullop.remote.commandexecutor() as e:
cg = e.callcommand('changegroup', {
'nodes': pullop.fetch,
'source': 'pull',
}).result()
Pierre-Yves David
pull: move changeset pulling in its own function...
r20489 elif not pullop.remote.capable('changegroupsubset'):
Pierre-Yves David
error: get Abort from 'error' instead of 'util'...
r26587 raise error.Abort(_("partial pull cannot be done because "
Pierre-Yves David
exchange: fix indentation level
r21554 "other repository doesn't support "
"changegroupsubset."))
Pierre-Yves David
pull: move changeset pulling in its own function...
r20489 else:
Gregory Szorc
wireproto: convert legacy commands to command executor...
r37653 with pullop.remote.commandexecutor() as e:
cg = e.callcommand('changegroupsubset', {
'bases': pullop.fetch,
'heads': pullop.heads,
'source': 'pull',
}).result()
Martin von Zweigbergk
bundle: make applybundle() delegate v1 bundles to applybundle1()
r33043 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
pullop.remote.url())
Martin von Zweigbergk
bundle: make applybundle1() return a bundleoperation...
r33040 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
Pierre-Yves David
pull: move changeset pulling in its own function...
r20489
Pierre-Yves David
pull: move phases synchronisation in its own function...
r20486 def _pullphase(pullop):
# Get remote phases data from remote
Pierre-Yves David
pull: use `stepsdone` instead of `todosteps`...
r22937 if 'phases' in pullop.stepsdone:
Pierre-Yves David
pull: perform the todostep inside functions handling old way of pulling...
r22653 return
Gregory Szorc
exchange: use command executor interface for calling listkeys...
r37775 remotephases = listkeys(pullop.remote, 'phases')
Pierre-Yves David
pull: split remote phases retrieval from actual application...
r21654 _pullapplyphases(pullop, remotephases)
def _pullapplyphases(pullop, remotephases):
"""apply phase movement from observed remote state"""
Pierre-Yves David
pull: use `stepsdone` instead of `todosteps`...
r22937 if 'phases' in pullop.stepsdone:
return
pullop.stepsdone.add('phases')
Pierre-Yves David
pull: move phases synchronisation in its own function...
r20486 publishing = bool(remotephases.get('publishing', False))
if remotephases and not publishing:
Mads Kiilerich
spelling: fixes of non-dictionary words
r30332 # remote is new and non-publishing
Pierre-Yves David
pull: move phases synchronisation in its own function...
r20486 pheads, _dr = phases.analyzeremotephases(pullop.repo,
pullop.pulledsubset,
remotephases)
Pierre-Yves David
pull: pre-filter remote phases before moving local ones...
r22068 dheads = pullop.pulledsubset
Pierre-Yves David
pull: move phases synchronisation in its own function...
r20486 else:
# Remote is old or publishing all common changesets
# should be seen as public
Pierre-Yves David
pull: pre-filter remote phases before moving local ones...
r22068 pheads = pullop.pulledsubset
dheads = []
unfi = pullop.repo.unfiltered()
phase = unfi._phasecache.phase
rev = unfi.changelog.nodemap.get
public = phases.public
draft = phases.draft
# exclude changesets already public locally and update the others
pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
if pheads:
Pierre-Yves David
phase: add a transaction argument to advanceboundary...
r22069 tr = pullop.gettransaction()
phases.advanceboundary(pullop.repo, tr, public, pheads)
Pierre-Yves David
pull: pre-filter remote phases before moving local ones...
r22068
# exclude changesets already draft locally and update the others
dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
if dheads:
Pierre-Yves David
phase: add a transaction argument to advanceboundary...
r22069 tr = pullop.gettransaction()
phases.advanceboundary(pullop.repo, tr, draft, dheads)
Pierre-Yves David
pull: move phases synchronisation in its own function...
r20486
Pierre-Yves David
pull: move bookmark pulling into its own function...
r22654 def _pullbookmarks(pullop):
"""process the remote bookmark information to update the local one"""
Pierre-Yves David
pull: use `stepsdone` instead of `todosteps`...
r22937 if 'bookmarks' in pullop.stepsdone:
Pierre-Yves David
pull: move bookmark pulling into its own function...
r22654 return
Pierre-Yves David
pull: use `stepsdone` instead of `todosteps`...
r22937 pullop.stepsdone.add('bookmarks')
Pierre-Yves David
pull: move bookmark pulling into its own function...
r22654 repo = pullop.repo
remotebookmarks = pullop.remotebookmarks
bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
Pierre-Yves David
pull: gather explicit bookmark pulls with bookmark updates...
r22658 pullop.remote.url(),
Pierre-Yves David
pull: perform bookmark updates in the transaction
r22666 pullop.gettransaction,
Pierre-Yves David
pull: gather explicit bookmark pulls with bookmark updates...
r22658 explicit=pullop.explicitbookmarks)
Pierre-Yves David
pull: move bookmark pulling into its own function...
r22654
Pierre-Yves David
push: feed pulloperation object to _pullobsolete function...
r20478 def _pullobsolete(pullop):
Pierre-Yves David
pull: move obsolescence marker exchange in the exchange module...
r20476 """utility function to pull obsolete markers from a remote
The `gettransaction` is function that return the pull transaction, creating
one if necessary. We return the transaction to inform the calling code that
a new transaction have been created (when applicable).
Exists mostly to allow overriding for experimentation purpose"""
Pierre-Yves David
pull: use `stepsdone` instead of `todosteps`...
r22937 if 'obsmarkers' in pullop.stepsdone:
Pierre-Yves David
pull: perform the todostep inside functions handling old way of pulling...
r22653 return
Pierre-Yves David
pull: use `stepsdone` instead of `todosteps`...
r22937 pullop.stepsdone.add('obsmarkers')
Pierre-Yves David
pull: move obsolescence marker exchange in the exchange module...
r20476 tr = None
Durham Goode
obsolete: add exchange option...
r22953 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
Pierre-Yves David
push: feed pulloperation object to _pullobsolete function...
r20478 pullop.repo.ui.debug('fetching remote obsolete markers\n')
Gregory Szorc
exchange: use command executor interface for calling listkeys...
r37775 remoteobs = listkeys(pullop.remote, 'obsolete')
Pierre-Yves David
pull: move obsolescence marker exchange in the exchange module...
r20476 if 'dump0' in remoteobs:
Pierre-Yves David
push: feed pulloperation object to _pullobsolete function...
r20478 tr = pullop.gettransaction()
Matt Mackall
pull: make a single call to obsstore.add (issue5006)...
r27558 markers = []
Pierre-Yves David
pull: move obsolescence marker exchange in the exchange module...
r20476 for key in sorted(remoteobs, reverse=True):
if key.startswith('dump'):
Yuya Nishihara
base85: proxy through util module...
r32200 data = util.b85decode(remoteobs[key])
Matt Mackall
pull: make a single call to obsstore.add (issue5006)...
r27558 version, newmarks = obsolete._readmarkers(data)
markers += newmarks
if markers:
pullop.repo.obsstore.add(tr, markers)
Pierre-Yves David
push: feed pulloperation object to _pullobsolete function...
r20478 pullop.repo.invalidatevolatilesets()
Pierre-Yves David
pull: move obsolescence marker exchange in the exchange module...
r20476 return tr
Gregory Szorc
exchange: move narrow acl functionality into core...
r38826 def applynarrowacl(repo, kwargs):
"""Apply narrow fetch access control.
This massages the named arguments for getbundle wire protocol commands
so requested data is filtered through access control rules.
"""
ui = repo.ui
# TODO this assumes existence of HTTP and is a layering violation.
username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
user_includes = ui.configlist(
_NARROWACL_SECTION, username + '.includes',
ui.configlist(_NARROWACL_SECTION, 'default.includes'))
user_excludes = ui.configlist(
_NARROWACL_SECTION, username + '.excludes',
ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
if not user_includes:
raise error.Abort(_("{} configuration for user {} is empty")
.format(_NARROWACL_SECTION, username))
user_includes = [
'path:.' if p == '*' else 'path:' + p for p in user_includes]
user_excludes = [
'path:.' if p == '*' else 'path:' + p for p in user_excludes]
req_includes = set(kwargs.get(r'includepats', []))
req_excludes = set(kwargs.get(r'excludepats', []))
req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
req_includes, req_excludes, user_includes, user_excludes)
if invalid_includes:
raise error.Abort(
_("The following includes are not accessible for {}: {}")
.format(username, invalid_includes))
new_args = {}
new_args.update(kwargs)
Gregory Szorc
exchange: make narrow ACL presence imply narrow=True...
r38843 new_args[r'narrow'] = True
Pulkit Goyal
narrow: only send the narrowspecs back if ACL in play...
r40373 new_args[r'narrow_acl'] = True
Gregory Szorc
exchange: make narrow ACL presence imply narrow=True...
r38843 new_args[r'includepats'] = req_includes
Gregory Szorc
exchange: move narrow acl functionality into core...
r38826 if req_excludes:
Gregory Szorc
exchange: make narrow ACL presence imply narrow=True...
r38843 new_args[r'excludepats'] = req_excludes
Gregory Szorc
exchange: move narrow acl functionality into core...
r38826 return new_args
Gregory Szorc
exchange: move _computeellipsis() from narrow...
r38827 def _computeellipsis(repo, common, heads, known, match, depth=None):
"""Compute the shape of a narrowed DAG.
Args:
repo: The repository we're transferring.
common: The roots of the DAG range we're transferring.
May be just [nullid], which means all ancestors of heads.
heads: The heads of the DAG range we're transferring.
match: The narrowmatcher that allows us to identify relevant changes.
depth: If not None, only consider nodes to be full nodes if they are at
most depth changesets away from one of heads.
Returns:
A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
visitnodes: The list of nodes (either full or ellipsis) which
need to be sent to the client.
relevant_nodes: The set of changelog nodes which change a file inside
the narrowspec. The client needs these as non-ellipsis nodes.
ellipsisroots: A dict of {rev: parents} that is used in
narrowchangegroup to produce ellipsis nodes with the
correct parents.
"""
cl = repo.changelog
mfl = repo.manifestlog
Gregory Szorc
exchange: don't use dagutil...
r39194 clrev = cl.rev
commonrevs = {clrev(n) for n in common} | {nullrev}
headsrevs = {clrev(n) for n in heads}
Gregory Szorc
exchange: move _computeellipsis() from narrow...
r38827 if depth:
revdepth = {h: 0 for h in headsrevs}
ellipsisheads = collections.defaultdict(set)
ellipsisroots = collections.defaultdict(set)
def addroot(head, curchange):
"""Add a root to an ellipsis head, splitting heads with 3 roots."""
ellipsisroots[head].add(curchange)
# Recursively split ellipsis heads with 3 roots by finding the
# roots' youngest common descendant which is an elided merge commit.
# That descendant takes 2 of the 3 roots as its own, and becomes a
# root of the head.
while len(ellipsisroots[head]) > 2:
child, roots = splithead(head)
splitroots(head, child, roots)
head = child # Recurse in case we just added a 3rd root
def splitroots(head, child, roots):
ellipsisroots[head].difference_update(roots)
ellipsisroots[head].add(child)
ellipsisroots[child].update(roots)
ellipsisroots[child].discard(child)
def splithead(head):
r1, r2, r3 = sorted(ellipsisroots[head])
for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
nr1, head, nr2, head)
for j in mid:
if j == nr2:
return nr2, (nr1, nr2)
if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
return j, (nr1, nr2)
raise error.Abort(_('Failed to split up ellipsis node! head: %d, '
'roots: %d %d %d') % (head, r1, r2, r3))
missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
visit = reversed(missing)
relevant_nodes = set()
visitnodes = [cl.node(m) for m in missing]
required = set(headsrevs) | known
for rev in visit:
clrev = cl.changelogrevision(rev)
Gregory Szorc
exchange: don't use dagutil...
r39194 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
Gregory Szorc
exchange: move _computeellipsis() from narrow...
r38827 if depth is not None:
curdepth = revdepth[rev]
for p in ps:
revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
needed = False
shallow_enough = depth is None or revdepth[rev] <= depth
if shallow_enough:
curmf = mfl[clrev.manifest].read()
if ps:
# We choose to not trust the changed files list in
# changesets because it's not always correct. TODO: could
# we trust it for the non-merge case?
p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
needed = bool(curmf.diff(p1mf, match))
if not needed and len(ps) > 1:
# For merge changes, the list of changed files is not
# helpful, since we need to emit the merge if a file
# in the narrow spec has changed on either side of the
# merge. As a result, we do a manifest diff to check.
p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
needed = bool(curmf.diff(p2mf, match))
else:
# For a root node, we need to include the node if any
# files in the node match the narrowspec.
needed = any(curmf.walk(match))
if needed:
for head in ellipsisheads[rev]:
addroot(head, rev)
for p in ps:
required.add(p)
relevant_nodes.add(cl.node(rev))
else:
if not ps:
ps = [nullrev]
if rev in required:
for head in ellipsisheads[rev]:
addroot(head, rev)
for p in ps:
ellipsisheads[p].add(rev)
else:
for p in ps:
ellipsisheads[p] |= ellipsisheads[rev]
# add common changesets as roots of their reachable ellipsis heads
for c in commonrevs:
for head in ellipsisheads[c]:
addroot(head, c)
return visitnodes, relevant_nodes, ellipsisroots
Gregory Szorc
bundle2: specify what capabilities will be used for...
r35801 def caps20to10(repo, role):
Pierre-Yves David
bundle2: introduce a ``caps20to10`` function...
r21645 """return a set with appropriate options to use bundle20 during getbundle"""
Martin von Zweigbergk
cleanup: use set literals...
r32291 caps = {'HG20'}
Gregory Szorc
bundle2: specify what capabilities will be used for...
r35801 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
timeless
pycompat: switch to util.urlreq/util.urlerr for py3 compat
r28883 caps.add('bundle2=' + urlreq.quote(capsblob))
Pierre-Yves David
bundle2: introduce a ``caps20to10`` function...
r21645 return caps
Mike Hommey
bundle2: separate bundle10 and bundle2 cases in getbundle()...
r22542 # List of names of steps to perform for a bundle2 for getbundle, order matters.
getbundle2partsorder = []
# Mapping between step name and function
#
# This exists to help extensions wrap steps if necessary
getbundle2partsmapping = {}
Pierre-Yves David
bundle2: add an 'idx' argument to the 'getbundle2partsgenerator'...
r24732 def getbundle2partsgenerator(stepname, idx=None):
Mike Hommey
bundle2: separate bundle10 and bundle2 cases in getbundle()...
r22542 """decorator for function generating bundle2 part for getbundle
The function is added to the step -> function mapping and appended to the
list of steps. Beware that decorated functions will be added in order
(this may matter).
You can only use this decorator for new steps, if you want to wrap a step
from an extension, attack the getbundle2partsmapping dictionary directly."""
def dec(func):
assert stepname not in getbundle2partsmapping
getbundle2partsmapping[stepname] = func
Pierre-Yves David
bundle2: add an 'idx' argument to the 'getbundle2partsgenerator'...
r24732 if idx is None:
getbundle2partsorder.append(stepname)
else:
getbundle2partsorder.insert(idx, stepname)
Mike Hommey
bundle2: separate bundle10 and bundle2 cases in getbundle()...
r22542 return func
return dec
Gregory Szorc
exchange: standalone function to determine if bundle2 is requested...
r27244 def bundle2requested(bundlecaps):
if bundlecaps is not None:
return any(cap.startswith('HG2') for cap in bundlecaps)
return False
Gregory Szorc
exchange: refactor APIs to obtain bundle data (API)...
r30187 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
**kwargs):
"""Return chunks constituting a bundle's raw data.
Pierre-Yves David
bundle2: add an exchange.getbundle function...
r20954
Pierre-Yves David
bundle2: rename format, parts and config to final names...
r24686 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
Gregory Szorc
exchange: refactor APIs to obtain bundle data (API)...
r30187 passed.
Pierre-Yves David
bundle2: add an exchange.getbundle function...
r20954
Gregory Szorc
exchange: return bundle info from getbundlechunks() (API)...
r35803 Returns a 2-tuple of a dict with metadata about the generated bundle
and an iterator over raw chunks (of varying sizes).
Pierre-Yves David
bundle2: add an exchange.getbundle function...
r20954 """
Pulkit Goyal
py3: convert kwargs keys' back to bytes using pycompat.byteskwargs()
r33016 kwargs = pycompat.byteskwargs(kwargs)
Gregory Szorc
exchange: return bundle info from getbundlechunks() (API)...
r35803 info = {}
Gregory Szorc
exchange: standalone function to determine if bundle2 is requested...
r27244 usebundle2 = bundle2requested(bundlecaps)
Mike Hommey
bundle2: separate bundle10 and bundle2 cases in getbundle()...
r22542 # bundle10 case
Pierre-Yves David
bundle2: detect bundle2 stream/request on /HG2./ instead of /HG2Y/...
r24649 if not usebundle2:
Mike Hommey
bundle2: separate bundle10 and bundle2 cases in getbundle()...
r22542 if bundlecaps and not kwargs.get('cg', True):
raise ValueError(_('request for bundle10 must include changegroup'))
Pierre-Yves David
getbundle: raise error if extra arguments are provided for bundle10...
r21656 if kwargs:
raise ValueError(_('unsupported getbundle arguments: %s')
% ', '.join(sorted(kwargs.keys())))
Pierre-Yves David
computeoutgoing: move the function from 'changegroup' to 'exchange'...
r29808 outgoing = _computeoutgoing(repo, heads, common)
Gregory Szorc
exchange: return bundle info from getbundlechunks() (API)...
r35803 info['bundleversion'] = 1
return info, changegroup.makestream(repo, outgoing, '01', source,
bundlecaps=bundlecaps)
Mike Hommey
bundle2: separate bundle10 and bundle2 cases in getbundle()...
r22542
# bundle20 case
Gregory Szorc
exchange: return bundle info from getbundlechunks() (API)...
r35803 info['bundleversion'] = 2
Pierre-Yves David
bundle2: transmit capabilities to getbundle during pull...
r21143 b2caps = {}
for bcaps in bundlecaps:
if bcaps.startswith('bundle2='):
timeless
pycompat: switch to util.urlreq/util.urlerr for py3 compat
r28883 blob = urlreq.unquote(bcaps[len('bundle2='):])
Pierre-Yves David
bundle2: transmit capabilities to getbundle during pull...
r21143 b2caps.update(bundle2.decodecaps(blob))
bundler = bundle2.bundle20(repo.ui, b2caps)
Mike Hommey
bundle2: separate bundle10 and bundle2 cases in getbundle()...
r22542
Mike Edgar
exchange: prepare kwargs for bundle2 part generation exactly once
r23218 kwargs['heads'] = heads
kwargs['common'] = common
Mike Hommey
bundle2: separate bundle10 and bundle2 cases in getbundle()...
r22542 for name in getbundle2partsorder:
func = getbundle2partsmapping[name]
Mike Hommey
bundle2: remove heads and common arguments to getbundle parts generators
r22543 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
Pulkit Goyal
py3: convert kwargs' keys' to str using pycompat.strkwargs()...
r33017 **pycompat.strkwargs(kwargs))
Mike Hommey
bundle2: separate bundle10 and bundle2 cases in getbundle()...
r22542
Gregory Szorc
exchange: send bundle2 stream clones uncompressed...
r35805 info['prefercompressed'] = bundler.prefercompressed
Gregory Szorc
exchange: return bundle info from getbundlechunks() (API)...
r35803 return info, bundler.getchunks()
Mike Hommey
bundle2: separate bundle10 and bundle2 cases in getbundle()...
r22542
Gregory Szorc
bundle2: move version of stream clone into part name...
r35806 @getbundle2partsgenerator('stream2')
Boris Feld
bundle: add the possibility to bundle a stream v2 part...
r37184 def _getbundlestream2(bundler, repo, *args, **kwargs):
return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
Boris Feld
bundle2: add support for a 'stream' parameter to 'getbundle'...
r35777
Mike Hommey
bundle2: separate bundle10 and bundle2 cases in getbundle()...
r22542 @getbundle2partsgenerator('changegroup')
Mike Hommey
bundle2: remove heads and common arguments to getbundle parts generators
r22543 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
b2caps=None, heads=None, common=None, **kwargs):
Mike Hommey
bundle2: separate bundle10 and bundle2 cases in getbundle()...
r22542 """add a changegroup part to the requested bundle"""
Gregory Szorc
exchange: refactor control flow of _getbundlechangegrouppart()...
r38828 if not kwargs.get(r'cg', True):
return
version = '01'
cgversions = b2caps.get('changegroup')
if cgversions: # 3.1 and 3.2 ship with an empty value
cgversions = [v for v in cgversions
if v in changegroup.supportedoutgoingversions(repo)]
if not cgversions:
Gregory Szorc
exchange: raise error.Abort instead of ValueError...
r41853 raise error.Abort(_('no common changegroup version'))
Gregory Szorc
exchange: refactor control flow of _getbundlechangegrouppart()...
r38828 version = max(cgversions)
Mike Hommey
bundle2: separate bundle10 and bundle2 cases in getbundle()...
r22542
Gregory Szorc
exchange: refactor control flow of _getbundlechangegrouppart()...
r38828 outgoing = _computeoutgoing(repo, heads, common)
if not outgoing.missing:
return
Gregory Szorc
exchange: move simple narrow changegroup generation from extension...
r38844 if kwargs.get(r'narrow', False):
include = sorted(filter(bool, kwargs.get(r'includepats', [])))
exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
Martin von Zweigbergk
narrow: when widening, don't include manifests the client already has...
r40380 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
Gregory Szorc
exchange: move simple narrow changegroup generation from extension...
r38844 else:
Martin von Zweigbergk
narrow: when widening, don't include manifests the client already has...
r40380 matcher = None
Gregory Szorc
exchange: move simple narrow changegroup generation from extension...
r38844
Gregory Szorc
exchange: refactor control flow of _getbundlechangegrouppart()...
r38828 cgstream = changegroup.makestream(repo, outgoing, version, source,
Martin von Zweigbergk
narrow: when widening, don't include manifests the client already has...
r40380 bundlecaps=bundlecaps, matcher=matcher)
Gregory Szorc
exchange: refactor control flow of _getbundlechangegrouppart()...
r38828
part = bundler.newpart('changegroup', data=cgstream)
if cgversions:
part.addparam('version', version)
part.addparam('nbchanges', '%d' % len(outgoing.missing),
mandatory=False)
if 'treemanifest' in repo.requirements:
part.addparam('treemanifest', '1')
Mike Hommey
bundle2: separate bundle10 and bundle2 cases in getbundle()...
r22542
Pulkit Goyal
py3: add a r'' prefix in mercurial/exchange.py...
r40388 if (kwargs.get(r'narrow', False) and kwargs.get(r'narrow_acl', False)
Pulkit Goyal
narrow: only send the narrowspecs back if ACL in play...
r40373 and (include or exclude)):
Gregory Szorc
exchange: move simple narrow changegroup generation from extension...
r38844 narrowspecpart = bundler.newpart('narrow:spec')
if include:
narrowspecpart.addparam(
'include', '\n'.join(include), mandatory=True)
if exclude:
narrowspecpart.addparam(
'exclude', '\n'.join(exclude), mandatory=True)
Boris Feld
getbundle: add support for 'bookmarks' boolean argument...
r35268 @getbundle2partsgenerator('bookmarks')
def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
b2caps=None, **kwargs):
"""add a bookmark part to the requested bundle"""
Pulkit Goyal
py3: handle keyword arguments correctly in exchange.py...
r35356 if not kwargs.get(r'bookmarks', False):
Boris Feld
getbundle: add support for 'bookmarks' boolean argument...
r35268 return
if 'bookmarks' not in b2caps:
Gregory Szorc
exchange: raise error.Abort instead of ValueError...
r41853 raise error.Abort(_('no common bookmarks exchange method'))
Boris Feld
getbundle: add support for 'bookmarks' boolean argument...
r35268 books = bookmod.listbinbookmarks(repo)
data = bookmod.binaryencode(books)
if data:
bundler.newpart('bookmarks', data=data)
Mike Hommey
bundle2: separate bundle10 and bundle2 cases in getbundle()...
r22542 @getbundle2partsgenerator('listkeys')
Mike Hommey
bundle2: remove heads and common arguments to getbundle parts generators
r22543 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
b2caps=None, **kwargs):
Mike Hommey
bundle2: separate bundle10 and bundle2 cases in getbundle()...
r22542 """add parts containing listkeys namespaces to the requested bundle"""
Pulkit Goyal
py3: handle keyword arguments correctly in exchange.py...
r35356 listkeys = kwargs.get(r'listkeys', ())
Pierre-Yves David
getbundle: support of listkeys argument when bundle2 is used...
r21657 for namespace in listkeys:
Pierre-Yves David
bundle2: rename format, parts and config to final names...
r24686 part = bundler.newpart('listkeys')
Pierre-Yves David
getbundle: support of listkeys argument when bundle2 is used...
r21657 part.addparam('namespace', namespace)
keys = repo.listkeys(namespace).items()
part.data = pushkey.encodekeys(keys)
Pierre-Yves David
unbundle: extract checkheads in its own function...
r20967
Mike Hommey
bundle2: separate bundle10 and bundle2 cases in getbundle()...
r22542 @getbundle2partsgenerator('obsmarkers')
Mike Hommey
bundle2: remove heads and common arguments to getbundle parts generators
r22543 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
b2caps=None, heads=None, **kwargs):
Mike Hommey
bundle2: pass b2caps down to functions adding bundle2 parts for getbundle
r22541 """add an obsolescence markers part to the requested bundle"""
Pulkit Goyal
py3: handle keyword arguments correctly in exchange.py...
r35356 if kwargs.get(r'obsmarkers', False):
Pierre-Yves David
getbundle: add `obsmarkers` argument to getbundle...
r22353 if heads is None:
heads = repo.heads()
subset = [c.node() for c in repo.set('::%ln', heads)]
markers = repo.obsstore.relevantmarkers(subset)
Pierre-Yves David
obsolete: sort obsmarkers during exchange...
r25118 markers = sorted(markers)
bundle2: move function building obsmarker-part in the bundle2 module...
r32515 bundle2.buildobsmarkerspart(bundler, markers)
Pierre-Yves David
getbundle: add `obsmarkers` argument to getbundle...
r22353
Boris Feld
pull: use 'phase-heads' to retrieve phase information...
r34323 @getbundle2partsgenerator('phases')
def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
b2caps=None, heads=None, **kwargs):
"""add phase heads part to the requested bundle"""
Pulkit Goyal
py3: handle keyword arguments correctly in exchange.py...
r35356 if kwargs.get(r'phases', False):
Boris Feld
pull: use 'phase-heads' to retrieve phase information...
r34323 if not 'heads' in b2caps.get('phases'):
Gregory Szorc
exchange: raise error.Abort instead of ValueError...
r41853 raise error.Abort(_('no common phases exchange method'))
Boris Feld
pull: use 'phase-heads' to retrieve phase information...
r34323 if heads is None:
heads = repo.heads()
headsbyphase = collections.defaultdict(set)
if repo.publishing():
headsbyphase[phases.public] = heads
else:
# find the appropriate heads to move
phase = repo._phasecache.phase
node = repo.changelog.node
rev = repo.changelog.rev
for h in heads:
headsbyphase[phase(repo, rev(h))].add(h)
seenphases = list(headsbyphase.keys())
# We do not handle anything but public and draft phase for now)
if seenphases:
assert max(seenphases) <= phases.draft
# if client is pulling non-public changesets, we need to find
# intermediate public heads.
draftheads = headsbyphase.get(phases.draft, set())
if draftheads:
publicheads = headsbyphase.get(phases.public, set())
revset = 'heads(only(%ln, %ln) and public())'
extraheads = repo.revs(revset, draftheads, publicheads)
for r in extraheads:
headsbyphase[phases.public].add(node(r))
# transform data in a format used by the encoding function
phasemapping = []
for phase in phases.allphases:
phasemapping.append(sorted(headsbyphase[phase]))
# generate the actual part
phasedata = phases.binaryencode(phasemapping)
bundler.newpart('phase-heads', data=phasedata)
Gregory Szorc
exchange: support transferring .hgtags fnodes mapping...
r25402 @getbundle2partsgenerator('hgtagsfnodes')
def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
b2caps=None, heads=None, common=None,
**kwargs):
"""Transfer the .hgtags filenodes mapping.
Only values for heads in this bundle will be transferred.
The part data consists of pairs of 20 byte changeset node and .hgtags
filenodes raw values.
"""
# Don't send unless:
# - changeset are being exchanged,
# - the client supports it.
Pulkit Goyal
py3: handle keyword arguments correctly in exchange.py...
r35356 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
Gregory Szorc
exchange: support transferring .hgtags fnodes mapping...
r25402 return
Pierre-Yves David
computeoutgoing: move the function from 'changegroup' to 'exchange'...
r29808 outgoing = _computeoutgoing(repo, heads, common)
bundle2: move tagsfnodecache generation in a generic function...
r32217 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
Gregory Szorc
exchange: support transferring .hgtags fnodes mapping...
r25402
Boris Feld
revbranchcache: add the necessary bit to send 'rbc' data over bundle2...
r36984 @getbundle2partsgenerator('cache:rev-branch-cache')
def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
b2caps=None, heads=None, common=None,
**kwargs):
"""Transfer the rev-branch-cache mapping
The payload is a series of data related to each branch
1) branch name length
2) number of open heads
3) number of closed heads
4) open heads nodes
5) closed heads nodes
"""
# Don't send unless:
# - changeset are being exchanged,
# - the client supports it.
Gregory Szorc
exchange: move disabling of rev-branch-cache bundle part out of narrow...
r38825 # - narrow bundle isn't in play (not currently compatible).
if (not kwargs.get(r'cg', True)
or 'rev-branch-cache' not in b2caps
or kwargs.get(r'narrow', False)
or repo.ui.has_section(_NARROWACL_SECTION)):
Boris Feld
revbranchcache: add the necessary bit to send 'rbc' data over bundle2...
r36984 return
Gregory Szorc
exchange: move disabling of rev-branch-cache bundle part out of narrow...
r38825
Boris Feld
revbranchcache: add the necessary bit to send 'rbc' data over bundle2...
r36984 outgoing = _computeoutgoing(repo, heads, common)
bundle2.addpartrevbranchcache(repo, bundler, outgoing)
Pierre-Yves David
unbundle: extract checkheads in its own function...
r20967 def check_heads(repo, their_heads, context):
"""check if the heads of a repo have been modified
Used by peer for unbundling.
"""
heads = repo.heads()
Augie Fackler
cleanup: replace uses of util.(md5|sha1|sha256|sha512) with hashlib.\1...
r29341 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
Pierre-Yves David
unbundle: extract checkheads in its own function...
r20967 if not (their_heads == ['force'] or their_heads == heads or
their_heads == ['hashed', heads_hash]):
# someone else committed/pushed/unbundled while we
# were transferring data
Pierre-Yves David
bundle2: fix raising errors during heads checking...
r21184 raise error.PushRaced('repository changed while %s - '
'please try again' % context)
Pierre-Yves David
unbundle: extract the core logic in another function...
r20968
def unbundle(repo, cg, heads, source, url):
"""Apply a bundle to a repo.
this function makes sure the repo is locked during the application and have
Mads Kiilerich
spelling: fixes from spell checker
r21024 mechanism to check that no push race occurred between the creation of the
Pierre-Yves David
unbundle: extract the core logic in another function...
r20968 bundle and its application.
If the push was raced as PushRaced exception is raised."""
r = 0
Pierre-Yves David
bundle2: allow using bundle2 for push...
r21061 # need a transaction when processing a bundle2 stream
Durham Goode
bundle2: allow lazily acquiring the lock...
r26566 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
lockandtr = [None, None, None]
Pierre-Yves David
bundle2: capture transaction rollback message output (issue4614)...
r24847 recordout = None
Pierre-Yves David
bundle2: disable ouput capture unless we use http (issue4613 issue4615)...
r24878 # quick fix for output mismatch with bundle2 in 3.4
Jun Wu
codemod: register core configitems using a script...
r33499 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
Pierre-Yves David
bundle2: stop capturing output for ssh again...
r25423 if url.startswith('remote:http:') or url.startswith('remote:https:'):
Pierre-Yves David
bundle2: disable ouput capture unless we use http (issue4613 issue4615)...
r24878 captureoutput = True
Pierre-Yves David
unbundle: extract the core logic in another function...
r20968 try:
Pierre-Yves David
unbundle: add a small comment to clarify the 'check_heads' call...
r30868 # note: outside bundle1, 'heads' is expected to be empty and this
# 'check_heads' call wil be a no-op
Pierre-Yves David
unbundle: extract the core logic in another function...
r20968 check_heads(repo, heads, 'uploading changes')
# push can proceed
Martin von Zweigbergk
exchange: switch to usual way of testing for bundle2-ness...
r32891 if not isinstance(cg, bundle2.unbundle20):
Pierre-Yves David
unbundle: swap conditional branches for clarity...
r30870 # legacy case: bundle1 (changegroup 01)
Martin von Zweigbergk
exchange: create transaction for bundle1 unbundling earlier...
r32927 txnname = "\n".join([source, util.hidepassword(url)])
Martin von Zweigbergk
changegroup: let callers pass in transaction to apply() (API)...
r32930 with repo.lock(), repo.transaction(txnname) as tr:
Martin von Zweigbergk
bundle: make applybundle() delegate v1 bundles to applybundle1()
r33043 op = bundle2.applybundle(repo, cg, tr, source, url)
Martin von Zweigbergk
bundle: make applybundle1() return a bundleoperation...
r33040 r = bundle2.combinechangegroupresults(op)
Pierre-Yves David
unbundle: swap conditional branches for clarity...
r30870 else:
Pierre-Yves David
bundle2: store the salvaged output on the exception object...
r24795 r = None
Pierre-Yves David
bundle2: gracefully handle hook abort...
r21187 try:
Durham Goode
bundle2: allow lazily acquiring the lock...
r26566 def gettransaction():
if not lockandtr[2]:
lockandtr[0] = repo.wlock()
lockandtr[1] = repo.lock()
lockandtr[2] = repo.transaction(source)
lockandtr[2].hookargs['source'] = source
lockandtr[2].hookargs['url'] = url
lockandtr[2].hookargs['bundle2'] = '1'
return lockandtr[2]
# Do greedy locking by default until we're satisfied with lazy
# locking.
if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
gettransaction()
op = bundle2.bundleoperation(repo, gettransaction,
Pulkit Goyal
bundleoperation: pass the source argument from all the users...
r37254 captureoutput=captureoutput,
source='push')
Pierre-Yves David
bundle2: also save output when error happens during part processing...
r24851 try:
Martin von Zweigbergk
exchange: fix dead assignment...
r25896 op = bundle2.processbundle(repo, cg, op=op)
Pierre-Yves David
bundle2: also save output when error happens during part processing...
r24851 finally:
r = op.reply
Pierre-Yves David
bundle2: disable ouput capture unless we use http (issue4613 issue4615)...
r24878 if captureoutput and r is not None:
Pierre-Yves David
bundle2: also save output when error happens during part processing...
r24851 repo.ui.pushbuffer(error=True, subproc=True)
def recordout(output):
r.newpart('output', data=output, mandatory=False)
Durham Goode
bundle2: allow lazily acquiring the lock...
r26566 if lockandtr[2] is not None:
lockandtr[2].close()
Gregory Szorc
global: mass rewrite to use modern exception syntax...
r25660 except BaseException as exc:
Pierre-Yves David
bundle2: gracefully handle hook abort...
r21187 exc.duringunbundle2 = True
Pierre-Yves David
bundle2: disable ouput capture unless we use http (issue4613 issue4615)...
r24878 if captureoutput and r is not None:
Pierre-Yves David
bundle2: capture transaction rollback message output (issue4614)...
r24847 parts = exc._bundle2salvagedoutput = r.salvageoutput()
def recordout(output):
part = bundle2.bundlepart('output', data=output,
mandatory=False)
parts.append(part)
Pierre-Yves David
bundle2: gracefully handle hook abort...
r21187 raise
Pierre-Yves David
unbundle: extract the core logic in another function...
r20968 finally:
Durham Goode
bundle2: allow lazily acquiring the lock...
r26566 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
Pierre-Yves David
bundle2: capture transaction rollback message output (issue4614)...
r24847 if recordout is not None:
recordout(repo.ui.popbuffer())
Pierre-Yves David
unbundle: extract the core logic in another function...
r20968 return r
Gregory Szorc
clonebundles: support for seeding clones from pre-generated bundles...
r26623
def _maybeapplyclonebundle(pullop):
"""Apply a clone bundle from a remote, if possible."""
repo = pullop.repo
remote = pullop.remote
Jun Wu
codemod: register core configitems using a script...
r33499 if not repo.ui.configbool('ui', 'clonebundles'):
Gregory Szorc
clonebundles: support for seeding clones from pre-generated bundles...
r26623 return
Gregory Szorc
exchange: do not attempt clone bundle if local repo is non-empty (issue4932)
r26855 # Only run if local repo is empty.
if len(repo):
return
Gregory Szorc
clonebundles: support for seeding clones from pre-generated bundles...
r26623 if pullop.heads:
return
if not remote.capable('clonebundles'):
return
Gregory Szorc
wireproto: properly call clonebundles command...
r37667 with remote.commandexecutor() as e:
res = e.callcommand('clonebundles', {}).result()
Gregory Szorc
exchange: record that we attempted to fetch a clone bundle...
r26689
# If we call the wire protocol command, that's good enough to record the
# attempt.
pullop.clonebundleattempted = True
Gregory Szorc
exchange: extract bundle specification components into own attributes...
r26647 entries = parseclonebundlesmanifest(repo, res)
Gregory Szorc
clonebundles: support for seeding clones from pre-generated bundles...
r26623 if not entries:
repo.ui.note(_('no clone bundles available on remote; '
'falling back to regular clone\n'))
return
Gregory Szorc
exchange: perform stream clone with clone bundle with --uncompressed...
r34360 entries = filterclonebundleentries(
repo, entries, streamclonerequested=pullop.streamclonerequested)
Gregory Szorc
clonebundles: filter on bundle specification...
r26644 if not entries:
# There is a thundering herd concern here. However, if a server
# operator doesn't advertise bundles appropriate for its clients,
# they deserve what's coming. Furthermore, from a client's
# perspective, no automatic fallback would mean not being able to
# clone!
repo.ui.warn(_('no compatible clone bundles available on server; '
'falling back to regular clone\n'))
repo.ui.warn(_('(you may want to report this to the server '
'operator)\n'))
return
Gregory Szorc
exchange: support sorting URLs by client-side preferences...
r26648 entries = sortclonebundleentries(repo.ui, entries)
Gregory Szorc
clonebundles: filter on bundle specification...
r26644
Gregory Szorc
clonebundles: support for seeding clones from pre-generated bundles...
r26623 url = entries[0]['URL']
repo.ui.status(_('applying clone bundle from %s\n') % url)
if trypullbundlefromurl(repo.ui, repo, url):
repo.ui.status(_('finished applying clone bundle\n'))
# Bundle failed.
#
# We abort by default to avoid the thundering herd of
# clients flooding a server that was expecting expensive
# clone load to be offloaded.
Jun Wu
codemod: register core configitems using a script...
r33499 elif repo.ui.configbool('ui', 'clonebundlefallback'):
Gregory Szorc
clonebundles: support for seeding clones from pre-generated bundles...
r26623 repo.ui.warn(_('falling back to normal clone\n'))
else:
raise error.Abort(_('error applying bundle'),
Gregory Szorc
exchange: provide hint on how to disable clone bundles...
r26688 hint=_('if this error persists, consider contacting '
'the server operator or disable clone '
'bundles via '
Gregory Szorc
exchange: make clone bundles non-experimental and enabled by default...
r27738 '"--config ui.clonebundles=false"'))
Gregory Szorc
clonebundles: support for seeding clones from pre-generated bundles...
r26623
Gregory Szorc
exchange: extract bundle specification components into own attributes...
r26647 def parseclonebundlesmanifest(repo, s):
Gregory Szorc
clonebundles: support for seeding clones from pre-generated bundles...
r26623 """Parses the raw text of a clone bundles manifest.
Returns a list of dicts. The dicts have a ``URL`` key corresponding
to the URL and other keys are the attributes for the entry.
"""
m = []
for line in s.splitlines():
fields = line.split()
if not fields:
continue
attrs = {'URL': fields[0]}
for rawattr in fields[1:]:
key, value = rawattr.split('=', 1)
timeless
pycompat: switch to util.urlreq/util.urlerr for py3 compat
r28883 key = urlreq.unquote(key)
value = urlreq.unquote(value)
Gregory Szorc
exchange: extract bundle specification components into own attributes...
r26647 attrs[key] = value
# Parse BUNDLESPEC into components. This makes client-side
# preferences easier to specify since you can prefer a single
# component of the BUNDLESPEC.
if key == 'BUNDLESPEC':
try:
Joerg Sonnenberger
bundlespec: drop externalnames flag...
r37786 bundlespec = parsebundlespec(repo, value)
Boris Feld
bundlespec: introduce an attr-based class for bundlespec...
r37181 attrs['COMPRESSION'] = bundlespec.compression
attrs['VERSION'] = bundlespec.version
Gregory Szorc
exchange: extract bundle specification components into own attributes...
r26647 except error.InvalidBundleSpecification:
pass
except error.UnsupportedBundleSpecification:
pass
Gregory Szorc
clonebundles: support for seeding clones from pre-generated bundles...
r26623
m.append(attrs)
return m
Boris Feld
streamclonebundle: make sure we accept new stream clone bundle spec...
r37187 def isstreamclonespec(bundlespec):
# Stream clone v1
Joerg Sonnenberger
bundlespec: drop externalnames flag...
r37786 if (bundlespec.wirecompression == 'UN' and bundlespec.wireversion == 's1'):
Boris Feld
streamclonebundle: make sure we accept new stream clone bundle spec...
r37187 return True
# Stream clone v2
Augie Fackler
cleanup: use () to wrap long lines instead of \...
r41925 if (bundlespec.wirecompression == 'UN' and
bundlespec.wireversion == '02' and
Boris Feld
streamclonebundle: make sure we accept new stream clone bundle spec...
r37187 bundlespec.contentopts.get('streamv2')):
return True
return False
Gregory Szorc
exchange: perform stream clone with clone bundle with --uncompressed...
r34360 def filterclonebundleentries(repo, entries, streamclonerequested=False):
Gregory Szorc
exchange: document filterclonebundleentries
r26687 """Remove incompatible clone bundle manifest entries.
Accepts a list of entries parsed with ``parseclonebundlesmanifest``
and returns a new list consisting of only the entries that this client
should be able to apply.
There is no guarantee we'll be able to apply all returned entries because
the metadata we use to filter on may be missing or wrong.
"""
Gregory Szorc
clonebundles: filter on bundle specification...
r26644 newentries = []
for entry in entries:
spec = entry.get('BUNDLESPEC')
if spec:
try:
Boris Feld
bundlespec: introduce an attr-based class for bundlespec...
r37181 bundlespec = parsebundlespec(repo, spec, strict=True)
Gregory Szorc
exchange: perform stream clone with clone bundle with --uncompressed...
r34360
# If a stream clone was requested, filter out non-streamclone
# entries.
Boris Feld
streamclonebundle: make sure we accept new stream clone bundle spec...
r37187 if streamclonerequested and not isstreamclonespec(bundlespec):
Gregory Szorc
exchange: perform stream clone with clone bundle with --uncompressed...
r34360 repo.ui.debug('filtering %s because not a stream clone\n' %
entry['URL'])
continue
Gregory Szorc
clonebundles: filter on bundle specification...
r26644 except error.InvalidBundleSpecification as e:
Pulkit Goyal
py3: use stringutil.forcebytestr() instead of str()...
r37681 repo.ui.debug(stringutil.forcebytestr(e) + '\n')
Gregory Szorc
clonebundles: filter on bundle specification...
r26644 continue
except error.UnsupportedBundleSpecification as e:
repo.ui.debug('filtering %s because unsupported bundle '
Augie Fackler
py3: hunt down str(exception) instances and use util.forcebytestr...
r36440 'spec: %s\n' % (
Yuya Nishihara
stringutil: bulk-replace call sites to point to new module...
r37102 entry['URL'], stringutil.forcebytestr(e)))
Gregory Szorc
clonebundles: filter on bundle specification...
r26644 continue
Gregory Szorc
exchange: perform stream clone with clone bundle with --uncompressed...
r34360 # If we don't have a spec and requested a stream clone, we don't know
# what the entry is so don't attempt to apply it.
elif streamclonerequested:
repo.ui.debug('filtering %s because cannot determine if a stream '
'clone bundle\n' % entry['URL'])
continue
Gregory Szorc
clonebundles: filter on bundle specification...
r26644
Gregory Szorc
clonebundles: filter on SNI requirement...
r26645 if 'REQUIRESNI' in entry and not sslutil.hassni:
repo.ui.debug('filtering %s because SNI not supported\n' %
entry['URL'])
continue
Gregory Szorc
clonebundles: filter on bundle specification...
r26644 newentries.append(entry)
return newentries
Gregory Szorc
exchange: use rich class for sorting clone bundle entries...
r30685 class clonebundleentry(object):
"""Represents an item in a clone bundles manifest.
This rich class is needed to support sorting since sorted() in Python 3
doesn't support ``cmp`` and our comparison is complex enough that ``key=``
won't work.
"""
Gregory Szorc
exchange: support sorting URLs by client-side preferences...
r26648
Gregory Szorc
exchange: use rich class for sorting clone bundle entries...
r30685 def __init__(self, value, prefers):
self.value = value
self.prefers = prefers
Gregory Szorc
exchange: support sorting URLs by client-side preferences...
r26648
Gregory Szorc
exchange: use rich class for sorting clone bundle entries...
r30685 def _cmp(self, other):
for prefkey, prefvalue in self.prefers:
avalue = self.value.get(prefkey)
bvalue = other.value.get(prefkey)
Gregory Szorc
exchange: support sorting URLs by client-side preferences...
r26648
# Special case for b missing attribute and a matches exactly.
if avalue is not None and bvalue is None and avalue == prefvalue:
return -1
# Special case for a missing attribute and b matches exactly.
if bvalue is not None and avalue is None and bvalue == prefvalue:
return 1
# We can't compare unless attribute present on both.
if avalue is None or bvalue is None:
continue
# Same values should fall back to next attribute.
if avalue == bvalue:
continue
# Exact matches come first.
if avalue == prefvalue:
return -1
if bvalue == prefvalue:
return 1
# Fall back to next attribute.
continue
# If we got here we couldn't sort by attributes and prefers. Fall
# back to index order.
return 0
Gregory Szorc
exchange: use rich class for sorting clone bundle entries...
r30685 def __lt__(self, other):
return self._cmp(other) < 0
def __gt__(self, other):
return self._cmp(other) > 0
def __eq__(self, other):
return self._cmp(other) == 0
def __le__(self, other):
return self._cmp(other) <= 0
def __ge__(self, other):
return self._cmp(other) >= 0
def __ne__(self, other):
return self._cmp(other) != 0
def sortclonebundleentries(ui, entries):
configitems: register 'ui.clonebundleprefers' as example for 'configlist'...
r32989 prefers = ui.configlist('ui', 'clonebundleprefers')
Gregory Szorc
exchange: use rich class for sorting clone bundle entries...
r30685 if not prefers:
return list(entries)
prefers = [p.split('=', 1) for p in prefers]
items = sorted(clonebundleentry(v, prefers) for v in entries)
return [i.value for i in items]
Gregory Szorc
exchange: support sorting URLs by client-side preferences...
r26648
Gregory Szorc
clonebundles: support for seeding clones from pre-generated bundles...
r26623 def trypullbundlefromurl(ui, repo, url):
"""Attempt to apply a bundle from a URL."""
Martin von Zweigbergk
clonebundle: use context managers for lock and transaction
r32843 with repo.lock(), repo.transaction('bundleurl') as tr:
Gregory Szorc
clonebundles: support for seeding clones from pre-generated bundles...
r26623 try:
Martin von Zweigbergk
clonebundle: use context managers for lock and transaction
r32843 fh = urlmod.open(ui, url)
cg = readbundle(ui, fh, 'stream')
Gregory Szorc
clonebundle: support bundle2...
r26643
Martin von Zweigbergk
bundle: make applybundle() delegate v1 bundles to applybundle1()
r33043 if isinstance(cg, streamclone.streamcloneapplier):
Martin von Zweigbergk
clonebundle: use context managers for lock and transaction
r32843 cg.apply(repo)
else:
Martin von Zweigbergk
bundle: make applybundle() delegate v1 bundles to applybundle1()
r33043 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
Martin von Zweigbergk
clonebundle: use context managers for lock and transaction
r32843 return True
except urlerr.httperror as e:
Augie Fackler
py3: hunt down str(exception) instances and use util.forcebytestr...
r36440 ui.warn(_('HTTP error fetching bundle: %s\n') %
Yuya Nishihara
stringutil: bulk-replace call sites to point to new module...
r37102 stringutil.forcebytestr(e))
Martin von Zweigbergk
clonebundle: use context managers for lock and transaction
r32843 except urlerr.urlerror as e:
Pulkit Goyal
py3: use util.forcebytestr to convert str to bytes...
r36506 ui.warn(_('error fetching bundle: %s\n') %
Yuya Nishihara
stringutil: bulk-replace call sites to point to new module...
r37102 stringutil.forcebytestr(e.reason))
Gregory Szorc
clonebundles: support for seeding clones from pre-generated bundles...
r26623
Martin von Zweigbergk
clonebundle: use context managers for lock and transaction
r32843 return False