##// END OF EJS Templates
fix: lfs chunked uploads....
fix: lfs chunked uploads. When testing large file uploads it's found that gunicorn raises NoMoreData instead of returning value. This fixes the problem and doesn't show excesive exceptions for no reason. Previously file upload still worked but spawned errors in logs

File last commit:

r1206:cf300ad3 default
r1280:b2259b07 default
Show More
__init__.py
114 lines | 3.6 KiB | text/x-python | PythonLexer
# RhodeCode VCSServer provides access to different vcs backends via network.
# Copyright (C) 2014-2023 RhodeCode GmbH
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import logging
import threading
from dogpile.cache import register_backend
from . import region_meta
from .utils import (
backend_key_generator,
clear_cache_namespace,
get_default_cache_settings,
get_or_create_region,
make_region,
str2bool,
)
module_name = 'vcsserver'
register_backend(
"dogpile.cache.rc.memory_lru", f"{module_name}.lib.rc_cache.backends",
"LRUMemoryBackend")
register_backend(
"dogpile.cache.rc.file_namespace", f"{module_name}.lib.rc_cache.backends",
"FileNamespaceBackend")
register_backend(
"dogpile.cache.rc.redis", f"{module_name}.lib.rc_cache.backends",
"RedisPickleBackend")
register_backend(
"dogpile.cache.rc.redis_msgpack", f"{module_name}.lib.rc_cache.backends",
"RedisMsgPackBackend")
log = logging.getLogger(__name__)
CACHE_OBJ_CACHE_VER = 'v2'
CLEAR_DELETE = 'delete'
CLEAR_INVALIDATE = 'invalidate'
def async_creation_runner(cache, cache_key, creator, mutex):
def runner():
try:
value = creator()
cache.set(cache_key, value)
finally:
mutex.release()
thread = threading.Thread(target=runner)
thread.start()
def configure_dogpile_cache(settings):
cache_dir = settings.get('cache_dir')
if cache_dir:
region_meta.dogpile_config_defaults['cache_dir'] = cache_dir
rc_cache_data = get_default_cache_settings(settings, prefixes=['rc_cache.'])
# inspect available namespaces
avail_regions = set()
for key in rc_cache_data.keys():
namespace_name = key.split('.', 1)[0]
if namespace_name in avail_regions:
continue
avail_regions.add(namespace_name)
log.debug('dogpile: found following cache regions: %s', namespace_name)
new_region = make_region(
name=namespace_name,
function_key_generator=None,
async_creation_runner=None
)
new_region.configure_from_config(settings, f'rc_cache.{namespace_name}.')
new_region.function_key_generator = backend_key_generator(new_region.actual_backend)
async_creator = str2bool(settings.pop(f'rc_cache.{namespace_name}.async_creator', 'false'))
if async_creator:
log.debug('configuring region %s with async creator', new_region)
new_region.async_creation_runner = async_creation_runner
if log.isEnabledFor(logging.DEBUG):
region_args = dict(backend=new_region.actual_backend,
region_invalidator=new_region.region_invalidator.__class__)
log.debug('dogpile: registering a new region key=`%s` args=%s', namespace_name, region_args)
region_meta.dogpile_cache_regions[namespace_name] = new_region
def includeme(config):
configure_dogpile_cache(config.registry.settings)