##// END OF EJS Templates
localrepo: support marking repos as having shallow file storage...
localrepo: support marking repos as having shallow file storage Various operations against repositories need to know if repository storage is full or partial. For example, a checkout (including possibly a widening of a sparse checkout), needs to know if it can assume all file revisions are available or whether to look for missing revisions first. This commit lays the plumbing for doing that. We define a repo creation option that indicates that shallow file storage is desired. The SQLite store uses this creation option to add an extra repo requirement indicating file storage is shallow. A new repository feature has been added to indicate that file storage is shallow. The SQLite store adds this feature when the shallow file store requirement is present. Code can now look at repo.features to determine if repo file storage may be shallow and take additional actions if so. While we're here, we also teach the SQLite store to handle the narrow repo requirement, which gets added when making narrow clones. Differential Revision: https://phab.mercurial-scm.org/D5166

File last commit:

r37471:2735d08e default
r40426:7e3b6c4f default
Show More
bundleparts.py
115 lines | 3.5 KiB | text/x-python | PythonLexer
# Copyright 2017 Facebook, Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
from mercurial.i18n import _
from mercurial import (
bundle2,
changegroup,
error,
extensions,
revsetlang,
util,
)
from . import common
isremotebooksenabled = common.isremotebooksenabled
scratchbranchparttype = 'b2x:infinitepush'
def getscratchbranchparts(repo, peer, outgoing, ui, bookmark):
if not outgoing.missing:
raise error.Abort(_('no commits to push'))
if scratchbranchparttype not in bundle2.bundle2caps(peer):
raise error.Abort(_('no server support for %r') % scratchbranchparttype)
_validaterevset(repo, revsetlang.formatspec('%ln', outgoing.missing),
bookmark)
supportedversions = changegroup.supportedoutgoingversions(repo)
# Explicitly avoid using '01' changegroup version in infinitepush to
# support general delta
supportedversions.discard('01')
cgversion = min(supportedversions)
_handlelfs(repo, outgoing.missing)
cg = changegroup.makestream(repo, outgoing, cgversion, 'push')
params = {}
params['cgversion'] = cgversion
if bookmark:
params['bookmark'] = bookmark
# 'prevbooknode' is necessary for pushkey reply part
params['bookprevnode'] = ''
bookmarks = repo._bookmarks
if bookmark in bookmarks:
params['bookprevnode'] = bookmarks.changectx(bookmark).hex()
# Do not send pushback bundle2 part with bookmarks if remotenames extension
# is enabled. It will be handled manually in `_push()`
if not isremotebooksenabled(ui):
params['pushbackbookmarks'] = '1'
parts = []
# .upper() marks this as a mandatory part: server will abort if there's no
# handler
parts.append(bundle2.bundlepart(
scratchbranchparttype.upper(),
advisoryparams=params.iteritems(),
data=cg))
return parts
def _validaterevset(repo, revset, bookmark):
"""Abort if the revs to be pushed aren't valid for a scratch branch."""
if not repo.revs(revset):
raise error.Abort(_('nothing to push'))
if bookmark:
# Allow bundle with many heads only if no bookmark is specified
heads = repo.revs('heads(%r)', revset)
if len(heads) > 1:
raise error.Abort(
_('cannot push more than one head to a scratch branch'))
def _handlelfs(repo, missing):
'''Special case if lfs is enabled
If lfs is enabled then we need to call prepush hook
to make sure large files are uploaded to lfs
'''
try:
lfsmod = extensions.find('lfs')
lfsmod.wrapper.uploadblobsfromrevs(repo, missing)
except KeyError:
# Ignore if lfs extension is not enabled
return
class copiedpart(object):
"""a copy of unbundlepart content that can be consumed later"""
def __init__(self, part):
# copy "public properties"
self.type = part.type
self.id = part.id
self.mandatory = part.mandatory
self.mandatoryparams = part.mandatoryparams
self.advisoryparams = part.advisoryparams
self.params = part.params
self.mandatorykeys = part.mandatorykeys
# copy the buffer
self._io = util.stringio(part.read())
def consume(self):
return
def read(self, size=None):
if size is None:
return self._io.read()
else:
return self._io.read(size)