exchange.py
1060 lines
| 40.8 KiB
| text/x-python
|
PythonLexer
/ mercurial / exchange.py
Mads Kiilerich
|
r21024 | # exchange.py - utility to exchange data between repos. | ||
Pierre-Yves David
|
r20345 | # | ||
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | ||||
# | ||||
# This software may be used and distributed according to the terms of the | ||||
# GNU General Public License version 2 or any later version. | ||||
from i18n import _ | ||||
Pierre-Yves David
|
r20469 | from node import hex, nullid | ||
Pierre-Yves David
|
r21141 | import errno, urllib | ||
Pierre-Yves David
|
r21184 | import util, scmutil, changegroup, base85, error | ||
Pierre-Yves David
|
r21657 | import discovery, phases, obsolete, bookmarks, bundle2, pushkey | ||
Pierre-Yves David
|
r20345 | |||
Pierre-Yves David
|
r21064 | def readbundle(ui, fh, fname, vfs=None): | ||
Pierre-Yves David
|
r21065 | header = changegroup.readexactly(fh, 4) | ||
Pierre-Yves David
|
r21063 | |||
Pierre-Yves David
|
r21065 | alg = None | ||
Pierre-Yves David
|
r21063 | if not fname: | ||
fname = "stream" | ||||
if not header.startswith('HG') and header.startswith('\0'): | ||||
fh = changegroup.headerlessfixup(fh, header) | ||||
Pierre-Yves David
|
r21065 | header = "HG10" | ||
alg = 'UN' | ||||
Pierre-Yves David
|
r21063 | elif vfs: | ||
fname = vfs.join(fname) | ||||
Pierre-Yves David
|
r21065 | magic, version = header[0:2], header[2:4] | ||
Pierre-Yves David
|
r21063 | |||
if magic != 'HG': | ||||
raise util.Abort(_('%s: not a Mercurial bundle') % fname) | ||||
Pierre-Yves David
|
r21065 | if version == '10': | ||
if alg is None: | ||||
alg = changegroup.readexactly(fh, 2) | ||||
return changegroup.unbundle10(fh, alg) | ||||
Pierre-Yves David
|
r21144 | elif version == '2X': | ||
Pierre-Yves David
|
r21067 | return bundle2.unbundle20(ui, fh, header=magic + version) | ||
Pierre-Yves David
|
r21065 | else: | ||
Pierre-Yves David
|
r21063 | raise util.Abort(_('%s: unknown bundle version %s') % (fname, version)) | ||
Pierre-Yves David
|
r22346 | def buildobsmarkerspart(bundler, markers): | ||
"""add an obsmarker part to the bundler with <markers> | ||||
No part is created if markers is empty. | ||||
Raises ValueError if the bundler doesn't support any known obsmarker format. | ||||
""" | ||||
if markers: | ||||
remoteversions = bundle2.obsmarkersversion(bundler.capabilities) | ||||
version = obsolete.commonversion(remoteversions) | ||||
if version is None: | ||||
raise ValueError('bundler do not support common obsmarker format') | ||||
stream = obsolete.encodemarkers(markers, True, version=version) | ||||
return bundler.newpart('B2X:OBSMARKERS', data=stream) | ||||
return None | ||||
Pierre-Yves David
|
r20346 | |||
class pushoperation(object): | ||||
"""A object that represent a single push operation | ||||
It purpose is to carry push related state and very common operation. | ||||
Mads Kiilerich
|
r21024 | A new should be created at the beginning of each push and discarded | ||
Pierre-Yves David
|
r20346 | afterward. | ||
""" | ||||
Pierre-Yves David
|
r20351 | def __init__(self, repo, remote, force=False, revs=None, newbranch=False): | ||
Pierre-Yves David
|
r20346 | # repo we push from | ||
self.repo = repo | ||||
Pierre-Yves David
|
r20347 | self.ui = repo.ui | ||
Pierre-Yves David
|
r20348 | # repo we push to | ||
self.remote = remote | ||||
Pierre-Yves David
|
r20349 | # force option provided | ||
self.force = force | ||||
Pierre-Yves David
|
r20350 | # revs to be pushed (None is "all") | ||
self.revs = revs | ||||
Pierre-Yves David
|
r20351 | # allow push of new branch | ||
self.newbranch = newbranch | ||||
Pierre-Yves David
|
r20436 | # did a local lock get acquired? | ||
self.locallocked = None | ||||
Pierre-Yves David
|
r21901 | # step already performed | ||
# (used to check what steps have been already performed through bundle2) | ||||
self.stepsdone = set() | ||||
Pierre-Yves David
|
r20439 | # Integer version of the push result | ||
# - None means nothing to push | ||||
# - 0 means HTTP error | ||||
# - 1 means we pushed and remote head count is unchanged *or* | ||||
# we have outgoing changesets but refused to push | ||||
# - other values as described by addchangegroup() | ||||
self.ret = None | ||||
Mads Kiilerich
|
r21024 | # discover.outgoing object (contains common and outgoing data) | ||
Pierre-Yves David
|
r20440 | self.outgoing = None | ||
Pierre-Yves David
|
r20462 | # all remote heads before the push | ||
self.remoteheads = None | ||||
Pierre-Yves David
|
r20464 | # testable as a boolean indicating if any nodes are missing locally. | ||
self.incoming = None | ||||
Pierre-Yves David
|
r22019 | # phases changes that must be pushed along side the changesets | ||
self.outdatedphases = None | ||||
# phases changes that must be pushed if changeset push fails | ||||
self.fallbackoutdatedphases = None | ||||
Pierre-Yves David
|
r22034 | # outgoing obsmarkers | ||
Pierre-Yves David
|
r22035 | self.outobsmarkers = set() | ||
Pierre-Yves David
|
r22239 | # outgoing bookmarks | ||
self.outbookmarks = [] | ||||
Pierre-Yves David
|
r20346 | |||
Pierre-Yves David
|
r22014 | @util.propertycache | ||
def futureheads(self): | ||||
"""future remote heads if the changeset push succeeds""" | ||||
return self.outgoing.missingheads | ||||
Pierre-Yves David
|
r22015 | @util.propertycache | ||
def fallbackheads(self): | ||||
"""future remote heads if the changeset push fails""" | ||||
if self.revs is None: | ||||
# not target to push, all common are relevant | ||||
return self.outgoing.commonheads | ||||
unfi = self.repo.unfiltered() | ||||
# I want cheads = heads(::missingheads and ::commonheads) | ||||
# (missingheads is revs with secret changeset filtered out) | ||||
# | ||||
# This can be expressed as: | ||||
# cheads = ( (missingheads and ::commonheads) | ||||
# + (commonheads and ::missingheads))" | ||||
# ) | ||||
# | ||||
# while trying to push we already computed the following: | ||||
# common = (::commonheads) | ||||
# missing = ((commonheads::missingheads) - commonheads) | ||||
# | ||||
# We can pick: | ||||
# * missingheads part of common (::commonheads) | ||||
common = set(self.outgoing.common) | ||||
nm = self.repo.changelog.nodemap | ||||
cheads = [node for node in self.revs if nm[node] in common] | ||||
# and | ||||
# * commonheads parents on missing | ||||
revset = unfi.set('%ln and parents(roots(%ln))', | ||||
self.outgoing.commonheads, | ||||
self.outgoing.missing) | ||||
cheads.extend(c.node() for c in revset) | ||||
return cheads | ||||
Pierre-Yves David
|
r22016 | @property | ||
def commonheads(self): | ||||
"""set of all common heads after changeset bundle push""" | ||||
if self.ret: | ||||
return self.futureheads | ||||
else: | ||||
return self.fallbackheads | ||||
Pierre-Yves David
|
r22015 | |||
Pierre-Yves David
|
r20345 | def push(repo, remote, force=False, revs=None, newbranch=False): | ||
'''Push outgoing changesets (limited by revs) from a local | ||||
repository to remote. Return an integer: | ||||
- None means nothing to push | ||||
- 0 means HTTP error | ||||
- 1 means we pushed and remote head count is unchanged *or* | ||||
we have outgoing changesets but refused to push | ||||
- other values as described by addchangegroup() | ||||
''' | ||||
Pierre-Yves David
|
r20351 | pushop = pushoperation(repo, remote, force, revs, newbranch) | ||
Pierre-Yves David
|
r20348 | if pushop.remote.local(): | ||
missing = (set(pushop.repo.requirements) | ||||
- pushop.remote.local().supported) | ||||
Pierre-Yves David
|
r20345 | if missing: | ||
msg = _("required features are not" | ||||
" supported in the destination:" | ||||
" %s") % (', '.join(sorted(missing))) | ||||
raise util.Abort(msg) | ||||
# there are two ways to push to remote repo: | ||||
# | ||||
# addchangegroup assumes local user can lock remote | ||||
# repo (local filesystem, old ssh servers). | ||||
# | ||||
# unbundle assumes local user cannot lock remote repo (new ssh | ||||
# servers, http servers). | ||||
Pierre-Yves David
|
r20348 | if not pushop.remote.canpush(): | ||
Pierre-Yves David
|
r20345 | raise util.Abort(_("destination does not support push")) | ||
# get local lock as we might write phase data | ||||
locallock = None | ||||
try: | ||||
Pierre-Yves David
|
r20346 | locallock = pushop.repo.lock() | ||
Pierre-Yves David
|
r20436 | pushop.locallocked = True | ||
Pierre-Yves David
|
r20345 | except IOError, err: | ||
Pierre-Yves David
|
r20436 | pushop.locallocked = False | ||
Pierre-Yves David
|
r20345 | if err.errno != errno.EACCES: | ||
raise | ||||
# source repo cannot be locked. | ||||
# We do not abort the push, but just disable the local phase | ||||
# synchronisation. | ||||
msg = 'cannot lock source repository: %s\n' % err | ||||
Pierre-Yves David
|
r20347 | pushop.ui.debug(msg) | ||
Pierre-Yves David
|
r20345 | try: | ||
Pierre-Yves David
|
r20924 | pushop.repo.checkpush(pushop) | ||
Pierre-Yves David
|
r20345 | lock = None | ||
Pierre-Yves David
|
r20348 | unbundle = pushop.remote.capable('unbundle') | ||
Pierre-Yves David
|
r20345 | if not unbundle: | ||
Pierre-Yves David
|
r20348 | lock = pushop.remote.lock() | ||
Pierre-Yves David
|
r20345 | try: | ||
Pierre-Yves David
|
r20466 | _pushdiscovery(pushop) | ||
Pierre-Yves David
|
r21903 | if (pushop.repo.ui.configbool('experimental', 'bundle2-exp', | ||
False) | ||||
and pushop.remote.capable('bundle2-exp')): | ||||
_pushbundle2(pushop) | ||||
_pushchangeset(pushop) | ||||
Pierre-Yves David
|
r20441 | _pushsyncphase(pushop) | ||
Pierre-Yves David
|
r20433 | _pushobsolete(pushop) | ||
Pierre-Yves David
|
r22224 | _pushbookmark(pushop) | ||
Pierre-Yves David
|
r20345 | finally: | ||
if lock is not None: | ||||
lock.release() | ||||
finally: | ||||
if locallock is not None: | ||||
locallock.release() | ||||
Pierre-Yves David
|
r20439 | return pushop.ret | ||
Pierre-Yves David
|
r20352 | |||
Pierre-Yves David
|
r22018 | # list of steps to perform discovery before push | ||
pushdiscoveryorder = [] | ||||
# Mapping between step name and function | ||||
# | ||||
# This exists to help extensions wrap steps if necessary | ||||
pushdiscoverymapping = {} | ||||
def pushdiscovery(stepname): | ||||
"""decorator for function performing discovery before push | ||||
The function is added to the step -> function mapping and appended to the | ||||
list of steps. Beware that decorated function will be added in order (this | ||||
may matter). | ||||
You can only use this decorator for a new step, if you want to wrap a step | ||||
from an extension, change the pushdiscovery dictionary directly.""" | ||||
def dec(func): | ||||
assert stepname not in pushdiscoverymapping | ||||
pushdiscoverymapping[stepname] = func | ||||
pushdiscoveryorder.append(stepname) | ||||
return func | ||||
return dec | ||||
Pierre-Yves David
|
r20466 | def _pushdiscovery(pushop): | ||
Pierre-Yves David
|
r22018 | """Run all discovery steps""" | ||
for stepname in pushdiscoveryorder: | ||||
step = pushdiscoverymapping[stepname] | ||||
step(pushop) | ||||
@pushdiscovery('changeset') | ||||
def _pushdiscoverychangeset(pushop): | ||||
"""discover the changeset that need to be pushed""" | ||||
Pierre-Yves David
|
r20466 | unfi = pushop.repo.unfiltered() | ||
fci = discovery.findcommonincoming | ||||
commoninc = fci(unfi, pushop.remote, force=pushop.force) | ||||
common, inc, remoteheads = commoninc | ||||
fco = discovery.findcommonoutgoing | ||||
outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs, | ||||
commoninc=commoninc, force=pushop.force) | ||||
pushop.outgoing = outgoing | ||||
pushop.remoteheads = remoteheads | ||||
pushop.incoming = inc | ||||
Pierre-Yves David
|
r22019 | @pushdiscovery('phase') | ||
def _pushdiscoveryphase(pushop): | ||||
"""discover the phase that needs to be pushed | ||||
(computed for both success and failure case for changesets push)""" | ||||
outgoing = pushop.outgoing | ||||
unfi = pushop.repo.unfiltered() | ||||
remotephases = pushop.remote.listkeys('phases') | ||||
publishing = remotephases.get('publishing', False) | ||||
ana = phases.analyzeremotephases(pushop.repo, | ||||
pushop.fallbackheads, | ||||
remotephases) | ||||
pheads, droots = ana | ||||
extracond = '' | ||||
if not publishing: | ||||
extracond = ' and public()' | ||||
revset = 'heads((%%ln::%%ln) %s)' % extracond | ||||
# Get the list of all revs draft on remote by public here. | ||||
# XXX Beware that revset break if droots is not strictly | ||||
# XXX root we may want to ensure it is but it is costly | ||||
fallback = list(unfi.set(revset, droots, pushop.fallbackheads)) | ||||
if not outgoing.missing: | ||||
future = fallback | ||||
else: | ||||
# adds changeset we are going to push as draft | ||||
# | ||||
# should not be necessary for pushblishing server, but because of an | ||||
# issue fixed in xxxxx we have to do it anyway. | ||||
fdroots = list(unfi.set('roots(%ln + %ln::)', | ||||
outgoing.missing, droots)) | ||||
fdroots = [f.node() for f in fdroots] | ||||
future = list(unfi.set(revset, fdroots, pushop.futureheads)) | ||||
pushop.outdatedphases = future | ||||
pushop.fallbackoutdatedphases = fallback | ||||
Pierre-Yves David
|
r22035 | @pushdiscovery('obsmarker') | ||
def _pushdiscoveryobsmarkers(pushop): | ||||
Pierre-Yves David
|
r22269 | if (obsolete._enabled | ||
and pushop.repo.obsstore | ||||
and 'obsolete' in pushop.remote.listkeys('namespaces')): | ||||
Pierre-Yves David
|
r22350 | repo = pushop.repo | ||
# very naive computation, that can be quite expensive on big repo. | ||||
# However: evolution is currently slow on them anyway. | ||||
nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads)) | ||||
pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes) | ||||
Pierre-Yves David
|
r22035 | |||
Pierre-Yves David
|
r22239 | @pushdiscovery('bookmarks') | ||
def _pushdiscoverybookmarks(pushop): | ||||
ui = pushop.ui | ||||
repo = pushop.repo.unfiltered() | ||||
remote = pushop.remote | ||||
ui.debug("checking for updated bookmarks\n") | ||||
ancestors = () | ||||
if pushop.revs: | ||||
revnums = map(repo.changelog.rev, pushop.revs) | ||||
ancestors = repo.changelog.ancestors(revnums, inclusive=True) | ||||
remotebookmark = remote.listkeys('bookmarks') | ||||
comp = bookmarks.compare(repo, repo._bookmarks, remotebookmark, srchex=hex) | ||||
Augie Fackler
|
r22243 | addsrc, adddst, advsrc, advdst, diverge, differ, invalid = comp | ||
Pierre-Yves David
|
r22239 | for b, scid, dcid in advsrc: | ||
if not ancestors or repo[scid].rev() in ancestors: | ||||
pushop.outbookmarks.append((b, dcid, scid)) | ||||
Pierre-Yves David
|
r20465 | def _pushcheckoutgoing(pushop): | ||
outgoing = pushop.outgoing | ||||
unfi = pushop.repo.unfiltered() | ||||
if not outgoing.missing: | ||||
# nothing to push | ||||
scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded) | ||||
return False | ||||
# something to push | ||||
if not pushop.force: | ||||
# if repo.obsstore == False --> no obsolete | ||||
# then, save the iteration | ||||
if unfi.obsstore: | ||||
# this message are here for 80 char limit reason | ||||
mso = _("push includes obsolete changeset: %s!") | ||||
mst = "push includes %s changeset: %s!" | ||||
# plain versions for i18n tool to detect them | ||||
_("push includes unstable changeset: %s!") | ||||
_("push includes bumped changeset: %s!") | ||||
_("push includes divergent changeset: %s!") | ||||
# If we are to push if there is at least one | ||||
# obsolete or unstable changeset in missing, at | ||||
# least one of the missinghead will be obsolete or | ||||
# unstable. So checking heads only is ok | ||||
for node in outgoing.missingheads: | ||||
ctx = unfi[node] | ||||
if ctx.obsolete(): | ||||
raise util.Abort(mso % ctx) | ||||
elif ctx.troubled(): | ||||
raise util.Abort(_(mst) | ||||
% (ctx.troubles()[0], | ||||
ctx)) | ||||
newbm = pushop.ui.configlist('bookmarks', 'pushing') | ||||
discovery.checkheads(unfi, pushop.remote, outgoing, | ||||
pushop.remoteheads, | ||||
pushop.newbranch, | ||||
bool(pushop.incoming), | ||||
newbm) | ||||
return True | ||||
Pierre-Yves David
|
r22017 | # List of names of steps to perform for an outgoing bundle2, order matters. | ||
b2partsgenorder = [] | ||||
# Mapping between step name and function | ||||
# | ||||
# This exists to help extensions wrap steps if necessary | ||||
b2partsgenmapping = {} | ||||
def b2partsgenerator(stepname): | ||||
"""decorator for function generating bundle2 part | ||||
The function is added to the step -> function mapping and appended to the | ||||
list of steps. Beware that decorated functions will be added in order | ||||
(this may matter). | ||||
You can only use this decorator for new steps, if you want to wrap a step | ||||
from an extension, attack the b2partsgenmapping dictionary directly.""" | ||||
def dec(func): | ||||
assert stepname not in b2partsgenmapping | ||||
b2partsgenmapping[stepname] = func | ||||
b2partsgenorder.append(stepname) | ||||
return func | ||||
return dec | ||||
@b2partsgenerator('changeset') | ||||
Pierre-Yves David
|
r21899 | def _pushb2ctx(pushop, bundler): | ||
"""handle changegroup push through bundle2 | ||||
addchangegroup result is stored in the ``pushop.ret`` attribute. | ||||
""" | ||||
Pierre-Yves David
|
r21902 | if 'changesets' in pushop.stepsdone: | ||
return | ||||
pushop.stepsdone.add('changesets') | ||||
Pierre-Yves David
|
r21899 | # Send known heads to the server for race detection. | ||
Pierre-Yves David
|
r21903 | if not _pushcheckoutgoing(pushop): | ||
return | ||||
pushop.repo.prepushoutgoinghooks(pushop.repo, | ||||
pushop.remote, | ||||
pushop.outgoing) | ||||
Pierre-Yves David
|
r21899 | if not pushop.force: | ||
bundler.newpart('B2X:CHECK:HEADS', data=iter(pushop.remoteheads)) | ||||
cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing) | ||||
cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg.getchunks()) | ||||
def handlereply(op): | ||||
"""extract addchangroup returns from server reply""" | ||||
cgreplies = op.records.getreplies(cgpart.id) | ||||
assert len(cgreplies['changegroup']) == 1 | ||||
pushop.ret = cgreplies['changegroup'][0]['return'] | ||||
return handlereply | ||||
Pierre-Yves David
|
r22020 | @b2partsgenerator('phase') | ||
def _pushb2phases(pushop, bundler): | ||||
"""handle phase push through bundle2""" | ||||
if 'phases' in pushop.stepsdone: | ||||
return | ||||
b2caps = bundle2.bundle2caps(pushop.remote) | ||||
if not 'b2x:pushkey' in b2caps: | ||||
return | ||||
pushop.stepsdone.add('phases') | ||||
part2node = [] | ||||
enc = pushkey.encode | ||||
for newremotehead in pushop.outdatedphases: | ||||
part = bundler.newpart('b2x:pushkey') | ||||
part.addparam('namespace', enc('phases')) | ||||
part.addparam('key', enc(newremotehead.hex())) | ||||
part.addparam('old', enc(str(phases.draft))) | ||||
part.addparam('new', enc(str(phases.public))) | ||||
part2node.append((part.id, newremotehead)) | ||||
def handlereply(op): | ||||
for partid, node in part2node: | ||||
partrep = op.records.getreplies(partid) | ||||
results = partrep['pushkey'] | ||||
assert len(results) <= 1 | ||||
msg = None | ||||
if not results: | ||||
msg = _('server ignored update of %s to public!\n') % node | ||||
elif not int(results[0]['return']): | ||||
msg = _('updating %s to public failed!\n') % node | ||||
if msg is not None: | ||||
pushop.ui.warn(msg) | ||||
return handlereply | ||||
Pierre-Yves David
|
r21904 | |||
Pierre-Yves David
|
r22347 | @b2partsgenerator('obsmarkers') | ||
def _pushb2obsmarkers(pushop, bundler): | ||||
if 'obsmarkers' in pushop.stepsdone: | ||||
return | ||||
remoteversions = bundle2.obsmarkersversion(bundler.capabilities) | ||||
if obsolete.commonversion(remoteversions) is None: | ||||
return | ||||
pushop.stepsdone.add('obsmarkers') | ||||
if pushop.outobsmarkers: | ||||
buildobsmarkerspart(bundler, pushop.outobsmarkers) | ||||
Pierre-Yves David
|
r22242 | @b2partsgenerator('bookmarks') | ||
def _pushb2bookmarks(pushop, bundler): | ||||
"""handle phase push through bundle2""" | ||||
if 'bookmarks' in pushop.stepsdone: | ||||
return | ||||
b2caps = bundle2.bundle2caps(pushop.remote) | ||||
if 'b2x:pushkey' not in b2caps: | ||||
return | ||||
pushop.stepsdone.add('bookmarks') | ||||
part2book = [] | ||||
enc = pushkey.encode | ||||
for book, old, new in pushop.outbookmarks: | ||||
part = bundler.newpart('b2x:pushkey') | ||||
part.addparam('namespace', enc('bookmarks')) | ||||
part.addparam('key', enc(book)) | ||||
part.addparam('old', enc(old)) | ||||
part.addparam('new', enc(new)) | ||||
part2book.append((part.id, book)) | ||||
def handlereply(op): | ||||
for partid, book in part2book: | ||||
partrep = op.records.getreplies(partid) | ||||
results = partrep['pushkey'] | ||||
assert len(results) <= 1 | ||||
if not results: | ||||
pushop.ui.warn(_('server ignored bookmark %s update\n') % book) | ||||
else: | ||||
ret = int(results[0]['return']) | ||||
if ret: | ||||
pushop.ui.status(_("updating bookmark %s\n") % book) | ||||
else: | ||||
pushop.ui.warn(_('updating bookmark %s failed!\n') % book) | ||||
return handlereply | ||||
Pierre-Yves David
|
r21061 | def _pushbundle2(pushop): | ||
"""push data to the remote using bundle2 | ||||
The only currently supported type of data is changegroup but this will | ||||
evolve in the future.""" | ||||
Pierre-Yves David
|
r21644 | bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote)) | ||
Pierre-Yves David
|
r21142 | # create reply capability | ||
Pierre-Yves David
|
r22342 | capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo)) | ||
Pierre-Yves David
|
r21600 | bundler.newpart('b2x:replycaps', data=capsblob) | ||
Pierre-Yves David
|
r21904 | replyhandlers = [] | ||
Pierre-Yves David
|
r22017 | for partgenname in b2partsgenorder: | ||
partgen = b2partsgenmapping[partgenname] | ||||
Pierre-Yves David
|
r21904 | ret = partgen(pushop, bundler) | ||
Pierre-Yves David
|
r21941 | if callable(ret): | ||
replyhandlers.append(ret) | ||||
Pierre-Yves David
|
r21904 | # do not push if nothing to push | ||
Pierre-Yves David
|
r21903 | if bundler.nbparts <= 1: | ||
return | ||||
Pierre-Yves David
|
r21061 | stream = util.chunkbuffer(bundler.getchunks()) | ||
Pierre-Yves David
|
r21182 | try: | ||
reply = pushop.remote.unbundle(stream, ['force'], 'push') | ||||
Pierre-Yves David
|
r21618 | except error.BundleValueError, exc: | ||
Pierre-Yves David
|
r21182 | raise util.Abort('missing support for %s' % exc) | ||
Pierre-Yves David
|
r21061 | try: | ||
op = bundle2.processbundle(pushop.repo, reply) | ||||
Pierre-Yves David
|
r21618 | except error.BundleValueError, exc: | ||
Pierre-Yves David
|
r21061 | raise util.Abort('missing support for %s' % exc) | ||
Pierre-Yves David
|
r21904 | for rephand in replyhandlers: | ||
rephand(op) | ||||
Pierre-Yves David
|
r21061 | |||
Pierre-Yves David
|
r20463 | def _pushchangeset(pushop): | ||
"""Make the actual push of changeset bundle to remote repo""" | ||||
Pierre-Yves David
|
r21902 | if 'changesets' in pushop.stepsdone: | ||
return | ||||
pushop.stepsdone.add('changesets') | ||||
Pierre-Yves David
|
r21903 | if not _pushcheckoutgoing(pushop): | ||
return | ||||
pushop.repo.prepushoutgoinghooks(pushop.repo, | ||||
pushop.remote, | ||||
pushop.outgoing) | ||||
Pierre-Yves David
|
r20463 | outgoing = pushop.outgoing | ||
unbundle = pushop.remote.capable('unbundle') | ||||
# TODO: get bundlecaps from remote | ||||
bundlecaps = None | ||||
# create a changegroup from local | ||||
if pushop.revs is None and not (outgoing.excluded | ||||
or pushop.repo.changelog.filteredrevs): | ||||
# push everything, | ||||
# use the fast path, no race possible on push | ||||
bundler = changegroup.bundle10(pushop.repo, bundlecaps) | ||||
Pierre-Yves David
|
r20925 | cg = changegroup.getsubset(pushop.repo, | ||
outgoing, | ||||
bundler, | ||||
'push', | ||||
fastpath=True) | ||||
Pierre-Yves David
|
r20463 | else: | ||
Pierre-Yves David
|
r20928 | cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing, | ||
bundlecaps) | ||||
Pierre-Yves David
|
r20463 | |||
# apply changegroup to remote | ||||
if unbundle: | ||||
# local repo finds heads on server, finds out what | ||||
# revs it must push. once revs transferred, if server | ||||
# finds it has different heads (someone else won | ||||
# commit/push race), server aborts. | ||||
if pushop.force: | ||||
remoteheads = ['force'] | ||||
else: | ||||
remoteheads = pushop.remoteheads | ||||
# ssh: return remote's addchangegroup() | ||||
# http: return remote's addchangegroup() or 0 for error | ||||
pushop.ret = pushop.remote.unbundle(cg, remoteheads, | ||||
Matt Mackall
|
r21761 | pushop.repo.url()) | ||
Pierre-Yves David
|
r20463 | else: | ||
# we return an integer indicating remote head count | ||||
# change | ||||
Pierre-Yves David
|
r20971 | pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url()) | ||
Pierre-Yves David
|
r20463 | |||
Pierre-Yves David
|
r20441 | def _pushsyncphase(pushop): | ||
Mads Kiilerich
|
r21024 | """synchronise phase information locally and remotely""" | ||
Pierre-Yves David
|
r20468 | cheads = pushop.commonheads | ||
Pierre-Yves David
|
r20441 | # even when we don't push, exchanging phase data is useful | ||
remotephases = pushop.remote.listkeys('phases') | ||||
if (pushop.ui.configbool('ui', '_usedassubrepo', False) | ||||
and remotephases # server supports phases | ||||
and pushop.ret is None # nothing was pushed | ||||
and remotephases.get('publishing', False)): | ||||
# When: | ||||
# - this is a subrepo push | ||||
# - and remote support phase | ||||
# - and no changeset was pushed | ||||
# - and remote is publishing | ||||
# We may be in issue 3871 case! | ||||
# We drop the possible phase synchronisation done by | ||||
# courtesy to publish changesets possibly locally draft | ||||
# on the remote. | ||||
remotephases = {'publishing': 'True'} | ||||
Pierre-Yves David
|
r21012 | if not remotephases: # old server or public only reply from non-publishing | ||
Pierre-Yves David
|
r20441 | _localphasemove(pushop, cheads) | ||
# don't push any phase data as there is nothing to push | ||||
else: | ||||
ana = phases.analyzeremotephases(pushop.repo, cheads, | ||||
remotephases) | ||||
pheads, droots = ana | ||||
### Apply remote phase on local | ||||
if remotephases.get('publishing', False): | ||||
_localphasemove(pushop, cheads) | ||||
else: # publish = False | ||||
_localphasemove(pushop, pheads) | ||||
_localphasemove(pushop, cheads, phases.draft) | ||||
### Apply local phase on remote | ||||
Pierre-Yves David
|
r22019 | if pushop.ret: | ||
Pierre-Yves David
|
r22020 | if 'phases' in pushop.stepsdone: | ||
# phases already pushed though bundle2 | ||||
return | ||||
Pierre-Yves David
|
r22019 | outdated = pushop.outdatedphases | ||
else: | ||||
outdated = pushop.fallbackoutdatedphases | ||||
Pierre-Yves David
|
r22020 | pushop.stepsdone.add('phases') | ||
Pierre-Yves David
|
r22019 | # filter heads already turned public by the push | ||
outdated = [c for c in outdated if c.node() not in pheads] | ||||
Pierre-Yves David
|
r21662 | b2caps = bundle2.bundle2caps(pushop.remote) | ||
if 'b2x:pushkey' in b2caps: | ||||
# server supports bundle2, let's do a batched push through it | ||||
# | ||||
# This will eventually be unified with the changesets bundle2 push | ||||
bundler = bundle2.bundle20(pushop.ui, b2caps) | ||||
Pierre-Yves David
|
r22342 | capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo)) | ||
Pierre-Yves David
|
r21662 | bundler.newpart('b2x:replycaps', data=capsblob) | ||
part2node = [] | ||||
enc = pushkey.encode | ||||
for newremotehead in outdated: | ||||
part = bundler.newpart('b2x:pushkey') | ||||
part.addparam('namespace', enc('phases')) | ||||
part.addparam('key', enc(newremotehead.hex())) | ||||
part.addparam('old', enc(str(phases.draft))) | ||||
part.addparam('new', enc(str(phases.public))) | ||||
part2node.append((part.id, newremotehead)) | ||||
stream = util.chunkbuffer(bundler.getchunks()) | ||||
try: | ||||
reply = pushop.remote.unbundle(stream, ['force'], 'push') | ||||
op = bundle2.processbundle(pushop.repo, reply) | ||||
except error.BundleValueError, exc: | ||||
raise util.Abort('missing support for %s' % exc) | ||||
for partid, node in part2node: | ||||
partrep = op.records.getreplies(partid) | ||||
results = partrep['pushkey'] | ||||
assert len(results) <= 1 | ||||
msg = None | ||||
if not results: | ||||
msg = _('server ignored update of %s to public!\n') % node | ||||
elif not int(results[0]['return']): | ||||
msg = _('updating %s to public failed!\n') % node | ||||
if msg is not None: | ||||
pushop.ui.warn(msg) | ||||
else: | ||||
# fallback to independant pushkey command | ||||
for newremotehead in outdated: | ||||
r = pushop.remote.pushkey('phases', | ||||
newremotehead.hex(), | ||||
str(phases.draft), | ||||
str(phases.public)) | ||||
if not r: | ||||
pushop.ui.warn(_('updating %s to public failed!\n') | ||||
% newremotehead) | ||||
Pierre-Yves David
|
r20441 | |||
Pierre-Yves David
|
r20438 | def _localphasemove(pushop, nodes, phase=phases.public): | ||
"""move <nodes> to <phase> in the local source repo""" | ||||
if pushop.locallocked: | ||||
Pierre-Yves David
|
r22051 | tr = pushop.repo.transaction('push-phase-sync') | ||
try: | ||||
Pierre-Yves David
|
r22069 | phases.advanceboundary(pushop.repo, tr, phase, nodes) | ||
Pierre-Yves David
|
r22051 | tr.close() | ||
finally: | ||||
tr.release() | ||||
Pierre-Yves David
|
r20438 | else: | ||
# repo is not locked, do not change any phases! | ||||
# Informs the user that phases should have been moved when | ||||
# applicable. | ||||
actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()] | ||||
phasestr = phases.phasenames[phase] | ||||
if actualmoves: | ||||
pushop.ui.status(_('cannot lock source repo, skipping ' | ||||
'local %s phase update\n') % phasestr) | ||||
Pierre-Yves David
|
r20433 | def _pushobsolete(pushop): | ||
Pierre-Yves David
|
r20434 | """utility function to push obsolete markers to a remote""" | ||
Pierre-Yves David
|
r22036 | if 'obsmarkers' in pushop.stepsdone: | ||
return | ||||
Pierre-Yves David
|
r20435 | pushop.ui.debug('try to push obsolete markers to remote\n') | ||
Pierre-Yves David
|
r20433 | repo = pushop.repo | ||
remote = pushop.remote | ||||
Pierre-Yves David
|
r22036 | pushop.stepsdone.add('obsmarkers') | ||
Pierre-Yves David
|
r22350 | if pushop.outobsmarkers: | ||
Pierre-Yves David
|
r20432 | rslts = [] | ||
Pierre-Yves David
|
r22034 | remotedata = obsolete._pushkeyescape(pushop.outobsmarkers) | ||
Pierre-Yves David
|
r20432 | for key in sorted(remotedata, reverse=True): | ||
# reverse sort to ensure we end with dump0 | ||||
data = remotedata[key] | ||||
rslts.append(remote.pushkey('obsolete', key, '', data)) | ||||
if [r for r in rslts if not r]: | ||||
msg = _('failed to push some obsolete markers!\n') | ||||
repo.ui.warn(msg) | ||||
Pierre-Yves David
|
r20431 | def _pushbookmark(pushop): | ||
Pierre-Yves David
|
r20352 | """Update bookmark position on remote""" | ||
Pierre-Yves David
|
r22240 | if pushop.ret == 0 or 'bookmarks' in pushop.stepsdone: | ||
Pierre-Yves David
|
r22228 | return | ||
Pierre-Yves David
|
r22240 | pushop.stepsdone.add('bookmarks') | ||
Pierre-Yves David
|
r20431 | ui = pushop.ui | ||
remote = pushop.remote | ||||
Pierre-Yves David
|
r22239 | for b, old, new in pushop.outbookmarks: | ||
if remote.pushkey('bookmarks', b, old, new): | ||||
Pierre-Yves David
|
r20352 | ui.status(_("updating bookmark %s\n") % b) | ||
else: | ||||
ui.warn(_('updating bookmark %s failed!\n') % b) | ||||
Pierre-Yves David
|
r20469 | |||
Pierre-Yves David
|
r20472 | class pulloperation(object): | ||
"""A object that represent a single pull operation | ||||
It purpose is to carry push related state and very common operation. | ||||
Mads Kiilerich
|
r21024 | A new should be created at the beginning of each pull and discarded | ||
Pierre-Yves David
|
r20472 | afterward. | ||
""" | ||||
Pierre-Yves David
|
r20475 | def __init__(self, repo, remote, heads=None, force=False): | ||
Siddharth Agarwal
|
r20596 | # repo we pull into | ||
Pierre-Yves David
|
r20472 | self.repo = repo | ||
Siddharth Agarwal
|
r20596 | # repo we pull from | ||
Pierre-Yves David
|
r20473 | self.remote = remote | ||
Pierre-Yves David
|
r20474 | # revision we try to pull (None is "all") | ||
self.heads = heads | ||||
Pierre-Yves David
|
r20475 | # do we force pull? | ||
self.force = force | ||||
Pierre-Yves David
|
r20477 | # the name the pull transaction | ||
self._trname = 'pull\n' + util.hidepassword(remote.url()) | ||||
# hold the transaction once created | ||||
self._tr = None | ||||
Pierre-Yves David
|
r20487 | # set of common changeset between local and remote before pull | ||
self.common = None | ||||
# set of pulled head | ||||
self.rheads = None | ||||
Mads Kiilerich
|
r21024 | # list of missing changeset to fetch remotely | ||
Pierre-Yves David
|
r20488 | self.fetch = None | ||
Mads Kiilerich
|
r21024 | # result of changegroup pulling (used as return code by pull) | ||
Pierre-Yves David
|
r20898 | self.cgresult = None | ||
Pierre-Yves David
|
r20901 | # list of step remaining todo (related to future bundle2 usage) | ||
self.todosteps = set(['changegroup', 'phases', 'obsmarkers']) | ||||
Pierre-Yves David
|
r20487 | |||
@util.propertycache | ||||
def pulledsubset(self): | ||||
"""heads of the set of changeset target by the pull""" | ||||
# compute target subset | ||||
if self.heads is None: | ||||
# We pulled every thing possible | ||||
# sync on everything common | ||||
Pierre-Yves David
|
r20878 | c = set(self.common) | ||
ret = list(self.common) | ||||
for n in self.rheads: | ||||
if n not in c: | ||||
ret.append(n) | ||||
return ret | ||||
Pierre-Yves David
|
r20487 | else: | ||
# We pulled a specific subset | ||||
# sync on this subset | ||||
return self.heads | ||||
Pierre-Yves David
|
r20477 | |||
def gettransaction(self): | ||||
"""get appropriate pull transaction, creating it if needed""" | ||||
if self._tr is None: | ||||
self._tr = self.repo.transaction(self._trname) | ||||
return self._tr | ||||
def closetransaction(self): | ||||
"""close transaction if created""" | ||||
if self._tr is not None: | ||||
self._tr.close() | ||||
def releasetransaction(self): | ||||
"""release transaction if created""" | ||||
if self._tr is not None: | ||||
self._tr.release() | ||||
Pierre-Yves David
|
r20469 | |||
def pull(repo, remote, heads=None, force=False): | ||||
Pierre-Yves David
|
r20476 | pullop = pulloperation(repo, remote, heads, force) | ||
Pierre-Yves David
|
r20473 | if pullop.remote.local(): | ||
missing = set(pullop.remote.requirements) - pullop.repo.supported | ||||
Pierre-Yves David
|
r20469 | if missing: | ||
msg = _("required features are not" | ||||
" supported in the destination:" | ||||
" %s") % (', '.join(sorted(missing))) | ||||
raise util.Abort(msg) | ||||
Pierre-Yves David
|
r20472 | lock = pullop.repo.lock() | ||
Pierre-Yves David
|
r20469 | try: | ||
Pierre-Yves David
|
r20900 | _pulldiscovery(pullop) | ||
Durham Goode
|
r21256 | if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False) | ||
Pierre-Yves David
|
r21148 | and pullop.remote.capable('bundle2-exp')): | ||
Pierre-Yves David
|
r20955 | _pullbundle2(pullop) | ||
Pierre-Yves David
|
r20901 | if 'changegroup' in pullop.todosteps: | ||
_pullchangeset(pullop) | ||||
if 'phases' in pullop.todosteps: | ||||
_pullphase(pullop) | ||||
if 'obsmarkers' in pullop.todosteps: | ||||
_pullobsolete(pullop) | ||||
Pierre-Yves David
|
r20477 | pullop.closetransaction() | ||
Pierre-Yves David
|
r20469 | finally: | ||
Pierre-Yves David
|
r20477 | pullop.releasetransaction() | ||
Pierre-Yves David
|
r20469 | lock.release() | ||
Pierre-Yves David
|
r20898 | return pullop.cgresult | ||
Pierre-Yves David
|
r20476 | |||
Pierre-Yves David
|
r20900 | def _pulldiscovery(pullop): | ||
"""discovery phase for the pull | ||||
Current handle changeset discovery only, will change handle all discovery | ||||
at some point.""" | ||||
tmp = discovery.findcommonincoming(pullop.repo.unfiltered(), | ||||
pullop.remote, | ||||
heads=pullop.heads, | ||||
force=pullop.force) | ||||
pullop.common, pullop.fetch, pullop.rheads = tmp | ||||
Pierre-Yves David
|
r20955 | def _pullbundle2(pullop): | ||
"""pull data using bundle2 | ||||
For now, the only supported data are changegroup.""" | ||||
Pierre-Yves David
|
r21658 | remotecaps = bundle2.bundle2caps(pullop.remote) | ||
Pierre-Yves David
|
r21645 | kwargs = {'bundlecaps': caps20to10(pullop.repo)} | ||
Pierre-Yves David
|
r20955 | # pulling changegroup | ||
pullop.todosteps.remove('changegroup') | ||||
Durham Goode
|
r21259 | |||
kwargs['common'] = pullop.common | ||||
kwargs['heads'] = pullop.heads or pullop.rheads | ||||
Pierre-Yves David
|
r21658 | if 'b2x:listkeys' in remotecaps: | ||
kwargs['listkeys'] = ['phase'] | ||||
Pierre-Yves David
|
r20955 | if not pullop.fetch: | ||
Pierre-Yves David
|
r21258 | pullop.repo.ui.status(_("no changes found\n")) | ||
pullop.cgresult = 0 | ||||
Pierre-Yves David
|
r20955 | else: | ||
if pullop.heads is None and list(pullop.common) == [nullid]: | ||||
pullop.repo.ui.status(_("requesting all changes\n")) | ||||
Pierre-Yves David
|
r21159 | _pullbundle2extraprepare(pullop, kwargs) | ||
Pierre-Yves David
|
r20955 | if kwargs.keys() == ['format']: | ||
return # nothing to pull | ||||
bundle = pullop.remote.getbundle('pull', **kwargs) | ||||
try: | ||||
op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction) | ||||
Pierre-Yves David
|
r21618 | except error.BundleValueError, exc: | ||
Pierre-Yves David
|
r20955 | raise util.Abort('missing support for %s' % exc) | ||
Durham Goode
|
r21259 | |||
if pullop.fetch: | ||||
assert len(op.records['changegroup']) == 1 | ||||
pullop.cgresult = op.records['changegroup'][0]['return'] | ||||
Pierre-Yves David
|
r20955 | |||
Pierre-Yves David
|
r21658 | # processing phases change | ||
for namespace, value in op.records['listkeys']: | ||||
if namespace == 'phases': | ||||
_pullapplyphases(pullop, value) | ||||
Pierre-Yves David
|
r21159 | def _pullbundle2extraprepare(pullop, kwargs): | ||
"""hook function so that extensions can extend the getbundle call""" | ||||
pass | ||||
Pierre-Yves David
|
r20489 | def _pullchangeset(pullop): | ||
"""pull changeset from unbundle into the local repo""" | ||||
# We delay the open of the transaction as late as possible so we | ||||
# don't open transaction for nothing or you break future useful | ||||
# rollback call | ||||
Pierre-Yves David
|
r20901 | pullop.todosteps.remove('changegroup') | ||
Pierre-Yves David
|
r20899 | if not pullop.fetch: | ||
pullop.repo.ui.status(_("no changes found\n")) | ||||
pullop.cgresult = 0 | ||||
return | ||||
Pierre-Yves David
|
r20489 | pullop.gettransaction() | ||
if pullop.heads is None and list(pullop.common) == [nullid]: | ||||
pullop.repo.ui.status(_("requesting all changes\n")) | ||||
elif pullop.heads is None and pullop.remote.capable('changegroupsubset'): | ||||
# issue1320, avoid a race if remote changed after discovery | ||||
pullop.heads = pullop.rheads | ||||
if pullop.remote.capable('getbundle'): | ||||
# TODO: get bundlecaps from remote | ||||
cg = pullop.remote.getbundle('pull', common=pullop.common, | ||||
heads=pullop.heads or pullop.rheads) | ||||
elif pullop.heads is None: | ||||
cg = pullop.remote.changegroup(pullop.fetch, 'pull') | ||||
elif not pullop.remote.capable('changegroupsubset'): | ||||
raise util.Abort(_("partial pull cannot be done because " | ||||
Pierre-Yves David
|
r21554 | "other repository doesn't support " | ||
"changegroupsubset.")) | ||||
Pierre-Yves David
|
r20489 | else: | ||
cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull') | ||||
Pierre-Yves David
|
r20933 | pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull', | ||
Pierre-Yves David
|
r20899 | pullop.remote.url()) | ||
Pierre-Yves David
|
r20489 | |||
Pierre-Yves David
|
r20486 | def _pullphase(pullop): | ||
# Get remote phases data from remote | ||||
Pierre-Yves David
|
r21654 | remotephases = pullop.remote.listkeys('phases') | ||
_pullapplyphases(pullop, remotephases) | ||||
def _pullapplyphases(pullop, remotephases): | ||||
"""apply phase movement from observed remote state""" | ||||
Pierre-Yves David
|
r20901 | pullop.todosteps.remove('phases') | ||
Pierre-Yves David
|
r20486 | publishing = bool(remotephases.get('publishing', False)) | ||
if remotephases and not publishing: | ||||
# remote is new and unpublishing | ||||
pheads, _dr = phases.analyzeremotephases(pullop.repo, | ||||
pullop.pulledsubset, | ||||
remotephases) | ||||
Pierre-Yves David
|
r22068 | dheads = pullop.pulledsubset | ||
Pierre-Yves David
|
r20486 | else: | ||
# Remote is old or publishing all common changesets | ||||
# should be seen as public | ||||
Pierre-Yves David
|
r22068 | pheads = pullop.pulledsubset | ||
dheads = [] | ||||
unfi = pullop.repo.unfiltered() | ||||
phase = unfi._phasecache.phase | ||||
rev = unfi.changelog.nodemap.get | ||||
public = phases.public | ||||
draft = phases.draft | ||||
# exclude changesets already public locally and update the others | ||||
pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public] | ||||
if pheads: | ||||
Pierre-Yves David
|
r22069 | tr = pullop.gettransaction() | ||
phases.advanceboundary(pullop.repo, tr, public, pheads) | ||||
Pierre-Yves David
|
r22068 | |||
# exclude changesets already draft locally and update the others | ||||
dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft] | ||||
if dheads: | ||||
Pierre-Yves David
|
r22069 | tr = pullop.gettransaction() | ||
phases.advanceboundary(pullop.repo, tr, draft, dheads) | ||||
Pierre-Yves David
|
r20486 | |||
Pierre-Yves David
|
r20478 | def _pullobsolete(pullop): | ||
Pierre-Yves David
|
r20476 | """utility function to pull obsolete markers from a remote | ||
The `gettransaction` is function that return the pull transaction, creating | ||||
one if necessary. We return the transaction to inform the calling code that | ||||
a new transaction have been created (when applicable). | ||||
Exists mostly to allow overriding for experimentation purpose""" | ||||
Pierre-Yves David
|
r20901 | pullop.todosteps.remove('obsmarkers') | ||
Pierre-Yves David
|
r20476 | tr = None | ||
if obsolete._enabled: | ||||
Pierre-Yves David
|
r20478 | pullop.repo.ui.debug('fetching remote obsolete markers\n') | ||
remoteobs = pullop.remote.listkeys('obsolete') | ||||
Pierre-Yves David
|
r20476 | if 'dump0' in remoteobs: | ||
Pierre-Yves David
|
r20478 | tr = pullop.gettransaction() | ||
Pierre-Yves David
|
r20476 | for key in sorted(remoteobs, reverse=True): | ||
if key.startswith('dump'): | ||||
data = base85.b85decode(remoteobs[key]) | ||||
Pierre-Yves David
|
r20478 | pullop.repo.obsstore.mergemarkers(tr, data) | ||
pullop.repo.invalidatevolatilesets() | ||||
Pierre-Yves David
|
r20476 | return tr | ||
Pierre-Yves David
|
r21645 | def caps20to10(repo): | ||
"""return a set with appropriate options to use bundle20 during getbundle""" | ||||
caps = set(['HG2X']) | ||||
Pierre-Yves David
|
r22342 | capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo)) | ||
Pierre-Yves David
|
r21645 | caps.add('bundle2=' + urllib.quote(capsblob)) | ||
return caps | ||||
Pierre-Yves David
|
r21157 | def getbundle(repo, source, heads=None, common=None, bundlecaps=None, | ||
**kwargs): | ||||
Pierre-Yves David
|
r20954 | """return a full bundle (with potentially multiple kind of parts) | ||
Pierre-Yves David
|
r21144 | Could be a bundle HG10 or a bundle HG2X depending on bundlecaps | ||
Pierre-Yves David
|
r20954 | passed. For now, the bundle can contain only changegroup, but this will | ||
changes when more part type will be available for bundle2. | ||||
This is different from changegroup.getbundle that only returns an HG10 | ||||
changegroup bundle. They may eventually get reunited in the future when we | ||||
have a clearer idea of the API we what to query different data. | ||||
The implementation is at a very early stage and will get massive rework | ||||
when the API of bundle is refined. | ||||
""" | ||||
Pierre-Yves David
|
r21989 | cg = None | ||
if kwargs.get('cg', True): | ||||
# build changegroup bundle here. | ||||
cg = changegroup.getbundle(repo, source, heads=heads, | ||||
common=common, bundlecaps=bundlecaps) | ||||
elif 'HG2X' not in bundlecaps: | ||||
raise ValueError(_('request for bundle10 must include changegroup')) | ||||
Pierre-Yves David
|
r21144 | if bundlecaps is None or 'HG2X' not in bundlecaps: | ||
Pierre-Yves David
|
r21656 | if kwargs: | ||
raise ValueError(_('unsupported getbundle arguments: %s') | ||||
% ', '.join(sorted(kwargs.keys()))) | ||||
Pierre-Yves David
|
r20954 | return cg | ||
# very crude first implementation, | ||||
# the bundle API will change and the generation will be done lazily. | ||||
Pierre-Yves David
|
r21143 | b2caps = {} | ||
for bcaps in bundlecaps: | ||||
if bcaps.startswith('bundle2='): | ||||
blob = urllib.unquote(bcaps[len('bundle2='):]) | ||||
b2caps.update(bundle2.decodecaps(blob)) | ||||
bundler = bundle2.bundle20(repo.ui, b2caps) | ||||
Durham Goode
|
r21259 | if cg: | ||
Pierre-Yves David
|
r21600 | bundler.newpart('b2x:changegroup', data=cg.getchunks()) | ||
Pierre-Yves David
|
r21657 | listkeys = kwargs.get('listkeys', ()) | ||
for namespace in listkeys: | ||||
part = bundler.newpart('b2x:listkeys') | ||||
part.addparam('namespace', namespace) | ||||
keys = repo.listkeys(namespace).items() | ||||
part.data = pushkey.encodekeys(keys) | ||||
Pierre-Yves David
|
r21257 | _getbundleextrapart(bundler, repo, source, heads=heads, common=common, | ||
bundlecaps=bundlecaps, **kwargs) | ||||
Pierre-Yves David
|
r21068 | return util.chunkbuffer(bundler.getchunks()) | ||
Pierre-Yves David
|
r20967 | |||
Pierre-Yves David
|
r21158 | def _getbundleextrapart(bundler, repo, source, heads=None, common=None, | ||
bundlecaps=None, **kwargs): | ||||
"""hook function to let extensions add parts to the requested bundle""" | ||||
pass | ||||
Pierre-Yves David
|
r20967 | def check_heads(repo, their_heads, context): | ||
"""check if the heads of a repo have been modified | ||||
Used by peer for unbundling. | ||||
""" | ||||
heads = repo.heads() | ||||
heads_hash = util.sha1(''.join(sorted(heads))).digest() | ||||
if not (their_heads == ['force'] or their_heads == heads or | ||||
their_heads == ['hashed', heads_hash]): | ||||
# someone else committed/pushed/unbundled while we | ||||
# were transferring data | ||||
Pierre-Yves David
|
r21184 | raise error.PushRaced('repository changed while %s - ' | ||
'please try again' % context) | ||||
Pierre-Yves David
|
r20968 | |||
def unbundle(repo, cg, heads, source, url): | ||||
"""Apply a bundle to a repo. | ||||
this function makes sure the repo is locked during the application and have | ||||
Mads Kiilerich
|
r21024 | mechanism to check that no push race occurred between the creation of the | ||
Pierre-Yves David
|
r20968 | bundle and its application. | ||
If the push was raced as PushRaced exception is raised.""" | ||||
r = 0 | ||||
Pierre-Yves David
|
r21061 | # need a transaction when processing a bundle2 stream | ||
tr = None | ||||
Pierre-Yves David
|
r20968 | lock = repo.lock() | ||
try: | ||||
check_heads(repo, heads, 'uploading changes') | ||||
# push can proceed | ||||
Pierre-Yves David
|
r21061 | if util.safehasattr(cg, 'params'): | ||
Pierre-Yves David
|
r21187 | try: | ||
tr = repo.transaction('unbundle') | ||||
tr.hookargs['bundle2-exp'] = '1' | ||||
r = bundle2.processbundle(repo, cg, lambda: tr).reply | ||||
cl = repo.unfiltered().changelog | ||||
p = cl.writepending() and repo.root or "" | ||||
repo.hook('b2x-pretransactionclose', throw=True, source=source, | ||||
url=url, pending=p, **tr.hookargs) | ||||
tr.close() | ||||
repo.hook('b2x-transactionclose', source=source, url=url, | ||||
**tr.hookargs) | ||||
except Exception, exc: | ||||
exc.duringunbundle2 = True | ||||
raise | ||||
Pierre-Yves David
|
r21061 | else: | ||
r = changegroup.addchangegroup(repo, cg, source, url) | ||||
Pierre-Yves David
|
r20968 | finally: | ||
Pierre-Yves David
|
r21061 | if tr is not None: | ||
tr.release() | ||||
Pierre-Yves David
|
r20968 | lock.release() | ||
return r | ||||