backends.py
342 lines
| 10.7 KiB
| text/x-python
|
PythonLexer
r5088 | # Copyright (C) 2015-2023 RhodeCode GmbH | |||
r2845 | # | |||
# This program is free software: you can redistribute it and/or modify | ||||
# it under the terms of the GNU Affero General Public License, version 3 | ||||
# (only), as published by the Free Software Foundation. | ||||
# | ||||
# This program is distributed in the hope that it will be useful, | ||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||||
# GNU General Public License for more details. | ||||
# | ||||
# You should have received a copy of the GNU Affero General Public License | ||||
# along with this program. If not, see <http://www.gnu.org/licenses/>. | ||||
# | ||||
# This program is dual-licensed. If you wish to learn more about the | ||||
# RhodeCode Enterprise Edition, including its added features, Support services, | ||||
# and proprietary license terms, please see https://rhodecode.com/licenses/ | ||||
r3848 | ||||
r2878 | import errno | |||
r5067 | import fcntl | |||
import functools | ||||
r2878 | import logging | |||
r5067 | import os | |||
import pickle | ||||
import time | ||||
r2878 | ||||
r5067 | import gevent | |||
r3851 | import msgpack | |||
r4916 | import redis | |||
r5067 | ||||
r4985 | flock_org = fcntl.flock | |||
from typing import Union | ||||
r2845 | ||||
r5067 | from dogpile.cache.api import Deserializer, Serializer | |||
from dogpile.cache.backends import file as file_backend | ||||
r2845 | from dogpile.cache.backends import memory as memory_backend | |||
from dogpile.cache.backends import redis as redis_backend | ||||
r4985 | from dogpile.cache.backends.file import FileLock | |||
r2878 | from dogpile.cache.util import memoized_property | |||
r4720 | ||||
r5497 | from ...lib.memory_lru_dict import LRUDict, LRUDictDebug | |||
from ...lib.str_utils import safe_bytes, safe_str | ||||
from ...lib.type_utils import str2bool | ||||
r2845 | ||||
_default_max_size = 1024 | ||||
r2878 | log = logging.getLogger(__name__) | |||
r2845 | ||||
class LRUMemoryBackend(memory_backend.MemoryBackend): | ||||
r3851 | key_prefix = 'lru_mem_backend' | |||
r2882 | pickle_values = False | |||
r2845 | ||||
def __init__(self, arguments): | ||||
r5067 | self.max_size = arguments.pop('max_size', _default_max_size) | |||
r2887 | ||||
r2945 | LRUDictClass = LRUDict | |||
if arguments.pop('log_key_count', None): | ||||
LRUDictClass = LRUDictDebug | ||||
r5067 | arguments['cache_dict'] = LRUDictClass(self.max_size) | |||
super().__init__(arguments) | ||||
def __repr__(self): | ||||
return f'{self.__class__}(maxsize=`{self.max_size}`)' | ||||
def __str__(self): | ||||
return self.__repr__() | ||||
r2845 | ||||
r2882 | def delete(self, key): | |||
r2931 | try: | |||
r2882 | del self._cache[key] | |||
r2931 | except KeyError: | |||
# we don't care if key isn't there at deletion | ||||
pass | ||||
r2882 | ||||
r5266 | def list_keys(self, prefix): | |||
return list(self._cache.keys()) | ||||
r2882 | def delete_multi(self, keys): | |||
for key in keys: | ||||
r2931 | self.delete(key) | |||
r2882 | ||||
r5266 | def delete_multi_by_prefix(self, prefix): | |||
cache_keys = self.list_keys(prefix=prefix) | ||||
num_affected_keys = len(cache_keys) | ||||
if num_affected_keys: | ||||
self.delete_multi(cache_keys) | ||||
return num_affected_keys | ||||
r2845 | ||||
r4985 | class PickleSerializer: | |||
r5067 | serializer: None | Serializer = staticmethod( # type: ignore | |||
r4985 | functools.partial(pickle.dumps, protocol=pickle.HIGHEST_PROTOCOL) | |||
) | ||||
r5067 | deserializer: None | Deserializer = staticmethod( # type: ignore | |||
r4985 | functools.partial(pickle.loads) | |||
) | ||||
r2845 | ||||
r5304 | class MsgPackSerializer: | |||
r5067 | serializer: None | Serializer = staticmethod( # type: ignore | |||
r4985 | msgpack.packb | |||
) | ||||
r5067 | deserializer: None | Deserializer = staticmethod( # type: ignore | |||
r4985 | functools.partial(msgpack.unpackb, use_list=False) | |||
) | ||||
r3417 | ||||
r2878 | class CustomLockFactory(FileLock): | |||
@memoized_property | ||||
def _module(self): | ||||
def gevent_flock(fd, operation): | ||||
""" | ||||
Gevent compatible flock | ||||
""" | ||||
# set non-blocking, this will cause an exception if we cannot acquire a lock | ||||
operation |= fcntl.LOCK_NB | ||||
start_lock_time = time.time() | ||||
r3334 | timeout = 60 * 15 # 15min | |||
r2878 | while True: | |||
try: | ||||
flock_org(fd, operation) | ||||
# lock has been acquired | ||||
break | ||||
except (OSError, IOError) as e: | ||||
# raise on other errors than Resource temporarily unavailable | ||||
if e.errno != errno.EAGAIN: | ||||
raise | ||||
elif (time.time() - start_lock_time) > timeout: | ||||
# waited to much time on a lock, better fail than loop for ever | ||||
r2947 | log.error('Failed to acquire lock on `%s` after waiting %ss', | |||
self.filename, timeout) | ||||
r2878 | raise | |||
r2947 | wait_timeout = 0.03 | |||
log.debug('Failed to acquire lock on `%s`, retry in %ss', | ||||
self.filename, wait_timeout) | ||||
gevent.sleep(wait_timeout) | ||||
r2878 | ||||
fcntl.flock = gevent_flock | ||||
return fcntl | ||||
r3851 | class FileNamespaceBackend(PickleSerializer, file_backend.DBMBackend): | |||
key_prefix = 'file_backend' | ||||
r2845 | ||||
def __init__(self, arguments): | ||||
r2878 | arguments['lock_factory'] = CustomLockFactory | |||
r4423 | db_file = arguments.get('filename') | |||
r5067 | log.debug('initialing cache-backend=%s db in %s', self.__class__.__name__, db_file) | |||
db_file_dir = os.path.dirname(db_file) | ||||
if not os.path.isdir(db_file_dir): | ||||
os.makedirs(db_file_dir) | ||||
r4423 | try: | |||
r5067 | super().__init__(arguments) | |||
r4423 | except Exception: | |||
r4766 | log.exception('Failed to initialize db at: %s', db_file) | |||
r4423 | raise | |||
r2845 | ||||
r3933 | def __repr__(self): | |||
r5067 | return f'{self.__class__}(file=`{self.filename}`)' | |||
def __str__(self): | ||||
return self.__repr__() | ||||
def _get_keys_pattern(self, prefix: bytes = b''): | ||||
return b'%b:%b' % (safe_bytes(self.key_prefix), safe_bytes(prefix)) | ||||
r3933 | ||||
r5022 | def list_keys(self, prefix: bytes = b''): | |||
r5067 | prefix = self._get_keys_pattern(prefix) | |||
r3851 | ||||
r5022 | def cond(dbm_key: bytes): | |||
r2846 | if not prefix: | |||
return True | ||||
r5022 | if dbm_key.startswith(prefix): | |||
r2846 | return True | |||
return False | ||||
r2845 | with self._dbm_file(True) as dbm: | |||
r4701 | try: | |||
r5022 | return list(filter(cond, dbm.keys())) | |||
r4701 | except Exception: | |||
log.error('Failed to fetch DBM keys from DB: %s', self.get_store()) | ||||
raise | ||||
r2845 | ||||
r5266 | def delete_multi_by_prefix(self, prefix): | |||
cache_keys = self.list_keys(prefix=prefix) | ||||
num_affected_keys = len(cache_keys) | ||||
if num_affected_keys: | ||||
self.delete_multi(cache_keys) | ||||
return num_affected_keys | ||||
r2845 | def get_store(self): | |||
return self.filename | ||||
r5497 | def cleanup_store(self): | |||
for ext in ("db", "dat", "pag", "dir"): | ||||
final_filename = self.filename + os.extsep + ext | ||||
if os.path.exists(final_filename): | ||||
os.remove(final_filename) | ||||
log.warning('Removed dbm file %s', final_filename) | ||||
r2845 | ||||
r3851 | class BaseRedisBackend(redis_backend.RedisBackend): | |||
r4719 | key_prefix = '' | |||
def __init__(self, arguments): | ||||
r5067 | self.db_conn = arguments.get('host', '') or arguments.get('url', '') or 'redis-host' | |||
super().__init__(arguments) | ||||
r4719 | self._lock_timeout = self.lock_timeout | |||
r5067 | self._lock_auto_renewal = str2bool(arguments.pop("lock_auto_renewal", True)) | |||
r4719 | ||||
if self._lock_auto_renewal and not self._lock_timeout: | ||||
# set default timeout for auto_renewal | ||||
r4720 | self._lock_timeout = 30 | |||
r3930 | ||||
r5067 | def __repr__(self): | |||
return f'{self.__class__}(conn=`{self.db_conn}`)' | ||||
def __str__(self): | ||||
return self.__repr__() | ||||
r3930 | def _create_client(self): | |||
args = {} | ||||
if self.url is not None: | ||||
args.update(url=self.url) | ||||
else: | ||||
args.update( | ||||
host=self.host, password=self.password, | ||||
port=self.port, db=self.db | ||||
) | ||||
connection_pool = redis.ConnectionPool(**args) | ||||
r4985 | self.writer_client = redis.StrictRedis( | |||
connection_pool=connection_pool | ||||
) | ||||
self.reader_client = self.writer_client | ||||
r3930 | ||||
r5067 | def _get_keys_pattern(self, prefix: bytes = b''): | |||
return b'%b:%b*' % (safe_bytes(self.key_prefix), safe_bytes(prefix)) | ||||
def list_keys(self, prefix: bytes = b''): | ||||
prefix = self._get_keys_pattern(prefix) | ||||
r4985 | return self.reader_client.keys(prefix) | |||
r2845 | ||||
r5266 | def delete_multi_by_prefix(self, prefix, use_lua=False): | |||
if use_lua: | ||||
# high efficient LUA script to delete ALL keys by prefix... | ||||
lua = """local keys = redis.call('keys', ARGV[1]) | ||||
for i=1,#keys,5000 do | ||||
redis.call('del', unpack(keys, i, math.min(i+(5000-1), #keys))) | ||||
end | ||||
return #keys""" | ||||
num_affected_keys = self.writer_client.eval( | ||||
lua, | ||||
0, | ||||
f"{prefix}*") | ||||
else: | ||||
cache_keys = self.list_keys(prefix=prefix) | ||||
num_affected_keys = len(cache_keys) | ||||
if num_affected_keys: | ||||
self.delete_multi(cache_keys) | ||||
return num_affected_keys | ||||
r2845 | def get_store(self): | |||
r4985 | return self.reader_client.connection_pool | |||
r3466 | ||||
def get_mutex(self, key): | ||||
if self.distributed_lock: | ||||
r5067 | lock_key = f'_lock_{safe_str(key)}' | |||
r4985 | return get_mutex_lock( | |||
self.writer_client, lock_key, | ||||
self._lock_timeout, | ||||
auto_renewal=self._lock_auto_renewal | ||||
) | ||||
r3466 | else: | |||
return None | ||||
r3851 | ||||
class RedisPickleBackend(PickleSerializer, BaseRedisBackend): | ||||
key_prefix = 'redis_pickle_backend' | ||||
pass | ||||
class RedisMsgPackBackend(MsgPackSerializer, BaseRedisBackend): | ||||
key_prefix = 'redis_msgpack_backend' | ||||
pass | ||||
r4714 | ||||
def get_mutex_lock(client, lock_key, lock_timeout, auto_renewal=False): | ||||
r5497 | from ...lib._vendor import redis_lock | |||
r4714 | ||||
r5304 | class _RedisLockWrapper: | |||
r4714 | """LockWrapper for redis_lock""" | |||
r4723 | @classmethod | |||
def get_lock(cls): | ||||
r4714 | return redis_lock.Lock( | |||
redis_client=client, | ||||
name=lock_key, | ||||
expire=lock_timeout, | ||||
auto_renewal=auto_renewal, | ||||
strict=True, | ||||
) | ||||
r4733 | def __repr__(self): | |||
r5067 | return f"{self.__class__.__name__}:{lock_key}" | |||
r4733 | ||||
def __str__(self): | ||||
r5067 | return f"{self.__class__.__name__}:{lock_key}" | |||
r4733 | ||||
r4723 | def __init__(self): | |||
self.lock = self.get_lock() | ||||
r4733 | self.lock_key = lock_key | |||
r4723 | ||||
r4714 | def acquire(self, wait=True): | |||
r4733 | log.debug('Trying to acquire Redis lock for key %s', self.lock_key) | |||
r4723 | try: | |||
r4733 | acquired = self.lock.acquire(wait) | |||
log.debug('Got lock for key %s, %s', self.lock_key, acquired) | ||||
return acquired | ||||
r4724 | except redis_lock.AlreadyAcquired: | |||
r4723 | return False | |||
r4726 | except redis_lock.AlreadyStarted: | |||
# refresh thread exists, but it also means we acquired the lock | ||||
return True | ||||
r4714 | ||||
def release(self): | ||||
try: | ||||
self.lock.release() | ||||
except redis_lock.NotAcquired: | ||||
pass | ||||
return _RedisLockWrapper() | ||||