# HG changeset patch # User RhodeCode Admin # Date 2024-06-06 14:56:58 # Node ID ecae6663af80480e71246b6e6208260818bc6541 # Parent d32b737d06b0a122bc200d87e4e31ff63ba70255 chore(code-sync): synced code from ce for archive_cache diff --git a/vcsserver/lib/rc_cache/archive_cache/fanout_cache.py b/vcsserver/lib/rc_cache/archive_cache/fanout_cache.py --- a/vcsserver/lib/rc_cache/archive_cache/fanout_cache.py +++ b/vcsserver/lib/rc_cache/archive_cache/fanout_cache.py @@ -25,8 +25,9 @@ import typing import zlib import sqlite3 -from vcsserver.lib.rc_json import json +from ...ext_json import json from .lock import GenerationLock +from .utils import format_size log = logging.getLogger(__name__) @@ -313,6 +314,9 @@ class FanoutCache: select_policy = EVICTION_POLICY[policy]['evict'] + log.debug('Running eviction policy \'%s\', and checking for size limit: %s', + policy, format_size(size_limit)) + if select_policy is None: return 0 @@ -326,21 +330,25 @@ class FanoutCache: key_file_path = os.path.join(shard._directory, key_file) with open(key_file_path, 'rb') as f: metadata = json.loads(f.read()) - # in case we don't have size re-calc it... - if not metadata.get('size'): - fn = metadata.get('full_path') - size = os.stat(fn).st_size + + size = metadata.get('size') + filename = metadata.get('filename') + full_path = metadata.get('full_path') + + if not size: + # in case we don't have size re-calc it... + size = os.stat(full_path).st_size data.append([ cnt, key_file, key_file_path, - metadata.get('filename'), - metadata.get('full_path'), + filename, + full_path, metadata.get('store_time', 0), metadata.get('access_time', 0), metadata.get('access_count', 0), - metadata.get('size', size), + size, ]) cnt += 1 @@ -348,20 +356,27 @@ class FanoutCache: db.bulk_insert(data) ((total_size,),) = db.sql('SELECT COALESCE(SUM(size), 0) FROM archive_cache').fetchall() - + log.debug('Analyzed %s keys, occupied: %s', len(data), format_size(total_size)) select_policy_qry = select_policy.format(fields='key_file_path, full_path, size') sorted_keys = db.sql(select_policy_qry).fetchall() + removed_items = 0 + removed_size = 0 for key, cached_file, size in sorted_keys: # simulate removal impact BEFORE removal total_size -= size + if total_size <= size_limit: # we obtained what we wanted... break os.remove(cached_file) os.remove(key) - return + removed_items += 1 + removed_size += size + + log.debug('Removed %s cache archives, and reduced size: %s', removed_items, format_size(removed_size)) + return removed_items def get_archival_config(config): diff --git a/vcsserver/lib/rc_cache/archive_cache/lock.py b/vcsserver/lib/rc_cache/archive_cache/lock.py --- a/vcsserver/lib/rc_cache/archive_cache/lock.py +++ b/vcsserver/lib/rc_cache/archive_cache/lock.py @@ -16,8 +16,7 @@ # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA import redis -from vcsserver.lib._vendor import redis_lock - +from ..._vendor import redis_lock from .utils import ArchiveCacheLock