##// END OF EJS Templates
feat(archive-cache): added option to define a configurable top-level bucket for all shards
super-admin -
r5445:fdcdfe77 default
parent child Browse files
Show More
@@ -1,221 +1,222 b''
1 1 # Copyright (C) 2010-2023 RhodeCode GmbH
2 2 #
3 3 # This program is free software: you can redistribute it and/or modify
4 4 # it under the terms of the GNU Affero General Public License, version 3
5 5 # (only), as published by the Free Software Foundation.
6 6 #
7 7 # This program is distributed in the hope that it will be useful,
8 8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 10 # GNU General Public License for more details.
11 11 #
12 12 # You should have received a copy of the GNU Affero General Public License
13 13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
14 14 #
15 15 # This program is dual-licensed. If you wish to learn more about the
16 16 # RhodeCode Enterprise Edition, including its added features, Support services,
17 17 # and proprietary license terms, please see https://rhodecode.com/licenses/
18 18
19 19 import os
20 20 import tempfile
21 21 import logging
22 22
23 23 from pyramid.settings import asbool
24 24
25 25 from rhodecode.config.settings_maker import SettingsMaker
26 26 from rhodecode.config import utils as config_utils
27 27
28 28 log = logging.getLogger(__name__)
29 29
30 30
31 31 def sanitize_settings_and_apply_defaults(global_config, settings):
32 32 """
33 33 Applies settings defaults and does all type conversion.
34 34
35 35 We would move all settings parsing and preparation into this place, so that
36 36 we have only one place left which deals with this part. The remaining parts
37 37 of the application would start to rely fully on well-prepared settings.
38 38
39 39 This piece would later be split up per topic to avoid a big fat monster
40 40 function.
41 41 """
42 42 jn = os.path.join
43 43
44 44 global_settings_maker = SettingsMaker(global_config)
45 45 global_settings_maker.make_setting('debug', default=False, parser='bool')
46 46 debug_enabled = asbool(global_config.get('debug'))
47 47
48 48 settings_maker = SettingsMaker(settings)
49 49
50 50 settings_maker.make_setting(
51 51 'logging.autoconfigure',
52 52 default=False,
53 53 parser='bool')
54 54
55 55 logging_conf = jn(os.path.dirname(global_config.get('__file__')), 'logging.ini')
56 56 settings_maker.enable_logging(logging_conf, level='INFO' if debug_enabled else 'DEBUG')
57 57
58 58 # Default includes, possible to change as a user
59 59 pyramid_includes = settings_maker.make_setting('pyramid.includes', [], parser='list:newline')
60 60 log.debug(
61 61 "Using the following pyramid.includes: %s",
62 62 pyramid_includes)
63 63
64 64 settings_maker.make_setting('rhodecode.edition', 'Community Edition')
65 65 settings_maker.make_setting('rhodecode.edition_id', 'CE')
66 66
67 67 if 'mako.default_filters' not in settings:
68 68 # set custom default filters if we don't have it defined
69 69 settings['mako.imports'] = 'from rhodecode.lib.base import h_filter'
70 70 settings['mako.default_filters'] = 'h_filter'
71 71
72 72 if 'mako.directories' not in settings:
73 73 mako_directories = settings.setdefault('mako.directories', [
74 74 # Base templates of the original application
75 75 'rhodecode:templates',
76 76 ])
77 77 log.debug(
78 78 "Using the following Mako template directories: %s",
79 79 mako_directories)
80 80
81 81 # NOTE(marcink): fix redis requirement for schema of connection since 3.X
82 82 if 'beaker.session.type' in settings and settings['beaker.session.type'] == 'ext:redis':
83 83 raw_url = settings['beaker.session.url']
84 84 if not raw_url.startswith(('redis://', 'rediss://', 'unix://')):
85 85 settings['beaker.session.url'] = 'redis://' + raw_url
86 86
87 87 settings_maker.make_setting('__file__', global_config.get('__file__'))
88 88
89 89 # TODO: johbo: Re-think this, usually the call to config.include
90 90 # should allow to pass in a prefix.
91 91 settings_maker.make_setting('rhodecode.api.url', '/_admin/api')
92 92
93 93 # Sanitize generic settings.
94 94 settings_maker.make_setting('default_encoding', 'UTF-8', parser='list')
95 95 settings_maker.make_setting('gzip_responses', False, parser='bool')
96 96 settings_maker.make_setting('startup.import_repos', 'false', parser='bool')
97 97
98 98 # statsd
99 99 settings_maker.make_setting('statsd.enabled', False, parser='bool')
100 100 settings_maker.make_setting('statsd.statsd_host', 'statsd-exporter', parser='string')
101 101 settings_maker.make_setting('statsd.statsd_port', 9125, parser='int')
102 102 settings_maker.make_setting('statsd.statsd_prefix', '')
103 103 settings_maker.make_setting('statsd.statsd_ipv6', False, parser='bool')
104 104
105 105 settings_maker.make_setting('vcs.svn.compatible_version', '')
106 106 settings_maker.make_setting('vcs.svn.proxy.enabled', True, parser='bool')
107 107 settings_maker.make_setting('vcs.svn.proxy.host', 'http://svn:8090', parser='string')
108 108 settings_maker.make_setting('vcs.hooks.protocol', 'http')
109 109 settings_maker.make_setting('vcs.hooks.host', '*')
110 110 settings_maker.make_setting('vcs.scm_app_implementation', 'http')
111 111 settings_maker.make_setting('vcs.server', '')
112 112 settings_maker.make_setting('vcs.server.protocol', 'http')
113 113 settings_maker.make_setting('vcs.server.enable', 'true', parser='bool')
114 114 settings_maker.make_setting('vcs.hooks.direct_calls', 'false', parser='bool')
115 115 settings_maker.make_setting('vcs.start_server', 'false', parser='bool')
116 116 settings_maker.make_setting('vcs.backends', 'hg, git, svn', parser='list')
117 117 settings_maker.make_setting('vcs.connection_timeout', 3600, parser='int')
118 118
119 119 settings_maker.make_setting('vcs.methods.cache', True, parser='bool')
120 120
121 121 # repo_store path
122 122 settings_maker.make_setting('repo_store.path', '/var/opt/rhodecode_repo_store')
123 123 # Support legacy values of vcs.scm_app_implementation. Legacy
124 124 # configurations may use 'rhodecode.lib.middleware.utils.scm_app_http', or
125 125 # disabled since 4.13 'vcsserver.scm_app' which is now mapped to 'http'.
126 126 scm_app_impl = settings['vcs.scm_app_implementation']
127 127 if scm_app_impl in ['rhodecode.lib.middleware.utils.scm_app_http', 'vcsserver.scm_app']:
128 128 settings['vcs.scm_app_implementation'] = 'http'
129 129
130 130 settings_maker.make_setting('appenlight', False, parser='bool')
131 131
132 132 temp_store = tempfile.gettempdir()
133 133 tmp_cache_dir = jn(temp_store, 'rc_cache')
134 134
135 135 # save default, cache dir, and use it for all backends later.
136 136 default_cache_dir = settings_maker.make_setting(
137 137 'cache_dir',
138 138 default=tmp_cache_dir, default_when_empty=True,
139 139 parser='dir:ensured')
140 140
141 141 # exception store cache
142 142 settings_maker.make_setting(
143 143 'exception_tracker.store_path',
144 144 default=jn(default_cache_dir, 'exc_store'), default_when_empty=True,
145 145 parser='dir:ensured'
146 146 )
147 147
148 148 settings_maker.make_setting(
149 149 'celerybeat-schedule.path',
150 150 default=jn(default_cache_dir, 'celerybeat_schedule', 'celerybeat-schedule.db'), default_when_empty=True,
151 151 parser='file:ensured'
152 152 )
153 153
154 154 settings_maker.make_setting('exception_tracker.send_email', False, parser='bool')
155 155 settings_maker.make_setting('exception_tracker.email_prefix', '[RHODECODE ERROR]', default_when_empty=True)
156 156
157 157 # sessions, ensure file since no-value is memory
158 158 settings_maker.make_setting('beaker.session.type', 'file')
159 159 settings_maker.make_setting('beaker.session.data_dir', jn(default_cache_dir, 'session_data'))
160 160
161 161 # cache_general
162 162 settings_maker.make_setting('rc_cache.cache_general.backend', 'dogpile.cache.rc.file_namespace')
163 163 settings_maker.make_setting('rc_cache.cache_general.expiration_time', 60 * 60 * 12, parser='int')
164 164 settings_maker.make_setting('rc_cache.cache_general.arguments.filename', jn(default_cache_dir, 'rhodecode_cache_general.db'))
165 165
166 166 # cache_perms
167 167 settings_maker.make_setting('rc_cache.cache_perms.backend', 'dogpile.cache.rc.file_namespace')
168 168 settings_maker.make_setting('rc_cache.cache_perms.expiration_time', 60 * 60, parser='int')
169 169 settings_maker.make_setting('rc_cache.cache_perms.arguments.filename', jn(default_cache_dir, 'rhodecode_cache_perms_db'))
170 170
171 171 # cache_repo
172 172 settings_maker.make_setting('rc_cache.cache_repo.backend', 'dogpile.cache.rc.file_namespace')
173 173 settings_maker.make_setting('rc_cache.cache_repo.expiration_time', 60 * 60 * 24 * 30, parser='int')
174 174 settings_maker.make_setting('rc_cache.cache_repo.arguments.filename', jn(default_cache_dir, 'rhodecode_cache_repo_db'))
175 175
176 176 # cache_license
177 177 settings_maker.make_setting('rc_cache.cache_license.backend', 'dogpile.cache.rc.file_namespace')
178 178 settings_maker.make_setting('rc_cache.cache_license.expiration_time', 60 * 5, parser='int')
179 179 settings_maker.make_setting('rc_cache.cache_license.arguments.filename', jn(default_cache_dir, 'rhodecode_cache_license_db'))
180 180
181 181 # cache_repo_longterm memory, 96H
182 182 settings_maker.make_setting('rc_cache.cache_repo_longterm.backend', 'dogpile.cache.rc.memory_lru')
183 183 settings_maker.make_setting('rc_cache.cache_repo_longterm.expiration_time', 345600, parser='int')
184 184 settings_maker.make_setting('rc_cache.cache_repo_longterm.max_size', 10000, parser='int')
185 185
186 186 # sql_cache_short
187 187 settings_maker.make_setting('rc_cache.sql_cache_short.backend', 'dogpile.cache.rc.memory_lru')
188 188 settings_maker.make_setting('rc_cache.sql_cache_short.expiration_time', 30, parser='int')
189 189 settings_maker.make_setting('rc_cache.sql_cache_short.max_size', 10000, parser='int')
190 190
191 191 # archive_cache
192 192 settings_maker.make_setting('archive_cache.locking.url', 'redis://redis:6379/1')
193 193 settings_maker.make_setting('archive_cache.backend.type', 'filesystem')
194 194
195 195 settings_maker.make_setting('archive_cache.filesystem.store_dir', jn(default_cache_dir, 'archive_cache'), default_when_empty=True,)
196 196 settings_maker.make_setting('archive_cache.filesystem.cache_shards', 8, parser='int')
197 197 settings_maker.make_setting('archive_cache.filesystem.cache_size_gb', 10, parser='float')
198 198 settings_maker.make_setting('archive_cache.filesystem.eviction_policy', 'least-recently-stored')
199 199
200 200 settings_maker.make_setting('archive_cache.filesystem.retry', False, parser='bool')
201 201 settings_maker.make_setting('archive_cache.filesystem.retry_backoff', 1, parser='int')
202 202 settings_maker.make_setting('archive_cache.filesystem.retry_attempts', 10, parser='int')
203 203
204 204 settings_maker.make_setting('archive_cache.objectstore.url', jn(default_cache_dir, 'archive_cache'), default_when_empty=True,)
205 205 settings_maker.make_setting('archive_cache.objectstore.key', '')
206 206 settings_maker.make_setting('archive_cache.objectstore.secret', '')
207 settings_maker.make_setting('archive_cache.objectstore.bucket_root', 'rhodecode-archive-cache')
207 208 settings_maker.make_setting('archive_cache.objectstore.bucket_shards', 8, parser='int')
208 209
209 210 settings_maker.make_setting('archive_cache.objectstore.cache_size_gb', 10, parser='float')
210 211 settings_maker.make_setting('archive_cache.objectstore.eviction_policy', 'least-recently-stored')
211 212
212 213 settings_maker.make_setting('archive_cache.objectstore.retry', False, parser='bool')
213 214 settings_maker.make_setting('archive_cache.objectstore.retry_backoff', 1, parser='int')
214 215 settings_maker.make_setting('archive_cache.objectstore.retry_attempts', 10, parser='int')
215 216
216 217 settings_maker.env_expand()
217 218
218 219 # configure instance id
219 220 config_utils.set_instance_id(settings)
220 221
221 222 return settings
@@ -1,166 +1,167 b''
1 1 # Copyright (C) 2015-2024 RhodeCode GmbH
2 2 #
3 3 # This program is free software: you can redistribute it and/or modify
4 4 # it under the terms of the GNU Affero General Public License, version 3
5 5 # (only), as published by the Free Software Foundation.
6 6 #
7 7 # This program is distributed in the hope that it will be useful,
8 8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 10 # GNU General Public License for more details.
11 11 #
12 12 # You should have received a copy of the GNU Affero General Public License
13 13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
14 14 #
15 15 # This program is dual-licensed. If you wish to learn more about the
16 16 # RhodeCode Enterprise Edition, including its added features, Support services,
17 17 # and proprietary license terms, please see https://rhodecode.com/licenses/
18 18
19 19 import codecs
20 20 import hashlib
21 21 import logging
22 22 import os
23 23
24 24 import fsspec
25 25
26 26 from .base import BaseCache, BaseShard
27 27 from ..utils import ShardFileReader, NOT_GIVEN
28 28 from ...type_utils import str2bool
29 29
30 30 log = logging.getLogger(__name__)
31 31
32 32
33 33 class FileSystemShard(BaseShard):
34 34
35 35 def __init__(self, index, directory, **settings):
36 36 self._index = index
37 37 self._directory = directory
38 38 self.storage_type = 'directory'
39 39 self.fs = fsspec.filesystem('file')
40 40
41 41 @property
42 42 def directory(self):
43 43 """Cache directory."""
44 44 return self._directory
45 45
46 46 def _get_keyfile(self, archive_key) -> tuple[str, str]:
47 47 key_file = f'{archive_key}.{self.key_suffix}'
48 48 return key_file, os.path.join(self.directory, key_file)
49 49
50 50 def _get_writer(self, path, mode):
51 51 for count in range(1, 11):
52 52 try:
53 53 # Another cache may have deleted the directory before
54 54 # the file could be opened.
55 55 return self.fs.open(path, mode)
56 56 except OSError:
57 57 if count == 10:
58 58 # Give up after 10 tries to open the file.
59 59 raise
60 60 continue
61 61
62 62 def _write_file(self, full_path, iterator, mode):
63 63 # ensure dir exists
64 64 destination, _ = os.path.split(full_path)
65 65 if not self.fs.exists(destination):
66 66 self.fs.makedirs(destination)
67 67
68 68 writer = self._get_writer(full_path, mode)
69 69
70 70 digest = hashlib.sha256()
71 71 with writer:
72 72 size = 0
73 73 for chunk in iterator:
74 74 size += len(chunk)
75 75 digest.update(chunk)
76 76 writer.write(chunk)
77 77 writer.flush()
78 78 # Get the file descriptor
79 79 fd = writer.fileno()
80 80
81 81 # Sync the file descriptor to disk, helps with NFS cases...
82 82 os.fsync(fd)
83 83 sha256 = digest.hexdigest()
84 84 log.debug('written new archive cache under %s, sha256: %s', full_path, sha256)
85 85 return size, sha256
86 86
87 87 def store(self, key, value_reader, metadata: dict | None = None):
88 88 return self._store(key, value_reader, metadata, mode='xb')
89 89
90 90 def fetch(self, key, retry=NOT_GIVEN, retry_attempts=NOT_GIVEN, retry_backoff=1) -> tuple[ShardFileReader, dict]:
91 91 return self._fetch(key, retry, retry_attempts, retry_backoff)
92 92
93 93 def remove(self, key):
94 94 return self._remove(key)
95 95
96 96 def random_filename(self):
97 97 """Return filename and full-path tuple for file storage.
98 98
99 99 Filename will be a randomly generated 28 character hexadecimal string
100 100 with ".archive_cache" suffixed. Two levels of sub-directories will be used to
101 101 reduce the size of directories. On older filesystems, lookups in
102 102 directories with many files may be slow.
103 103 """
104 104
105 105 hex_name = codecs.encode(os.urandom(16), 'hex').decode('utf-8')
106 106
107 107 archive_name = hex_name[4:] + '.archive_cache'
108 108 filename = f"{hex_name[:2]}/{hex_name[2:4]}/{archive_name}"
109 109
110 110 full_path = os.path.join(self.directory, filename)
111 111 return archive_name, full_path
112 112
113 113 def __repr__(self):
114 114 return f'{self.__class__.__name__}(index={self._index}, dir={self.directory})'
115 115
116 116
117 117 class FileSystemFanoutCache(BaseCache):
118 shard_name = 'shard_%03d'
118 119
119 120 def __init__(self, locking_url, **settings):
120 121 """
121 122 Initialize file system cache instance.
122 123
123 124 :param str locking_url: redis url for a lock
124 125 :param settings: settings dict
125 126
126 127 """
127 128 self._locking_url = locking_url
128 129 self._config = settings
129 130 cache_dir = self.get_conf('archive_cache.filesystem.store_dir')
130 131 directory = str(cache_dir)
131 132 directory = os.path.expanduser(directory)
132 133 directory = os.path.expandvars(directory)
133 134 self._directory = directory
134 135 self._storage_path = directory
135 136
136 137 # check if it's ok to write, and re-create the archive cache
137 138 if not os.path.isdir(self._directory):
138 139 os.makedirs(self._directory, exist_ok=True)
139 140
140 141 self._count = int(self.get_conf('archive_cache.filesystem.cache_shards', pop=True))
141 142
142 143 self._eviction_policy = self.get_conf('archive_cache.filesystem.eviction_policy', pop=True)
143 144 self._cache_size_limit = self.gb_to_bytes(int(self.get_conf('archive_cache.filesystem.cache_size_gb')))
144 145
145 146 self.retry = str2bool(self.get_conf('archive_cache.filesystem.retry', pop=True))
146 147 self.retry_attempts = int(self.get_conf('archive_cache.filesystem.retry_attempts', pop=True))
147 148 self.retry_backoff = int(self.get_conf('archive_cache.filesystem.retry_backoff', pop=True))
148 149
149 150 log.debug('Initializing archival cache instance under %s', self._directory)
150 151 self._shards = tuple(
151 152 FileSystemShard(
152 153 index=num,
153 directory=os.path.join(directory, 'shard_%03d' % num),
154 directory=os.path.join(directory, self.shard_name % num),
154 155 **settings,
155 156 )
156 157 for num in range(self._count)
157 158 )
158 159 self._hash = self._shards[0].hash
159 160
160 161 def _get_shard(self, key) -> FileSystemShard:
161 162 index = self._hash(key) % self._count
162 163 shard = self._shards[index]
163 164 return shard
164 165
165 166 def _get_size(self, shard, archive_path):
166 167 return os.stat(archive_path).st_size
@@ -1,150 +1,158 b''
1 1 # Copyright (C) 2015-2024 RhodeCode GmbH
2 2 #
3 3 # This program is free software: you can redistribute it and/or modify
4 4 # it under the terms of the GNU Affero General Public License, version 3
5 5 # (only), as published by the Free Software Foundation.
6 6 #
7 7 # This program is distributed in the hope that it will be useful,
8 8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 10 # GNU General Public License for more details.
11 11 #
12 12 # You should have received a copy of the GNU Affero General Public License
13 13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
14 14 #
15 15 # This program is dual-licensed. If you wish to learn more about the
16 16 # RhodeCode Enterprise Edition, including its added features, Support services,
17 17 # and proprietary license terms, please see https://rhodecode.com/licenses/
18 18
19 19 import codecs
20 20 import hashlib
21 21 import logging
22 22 import os
23 23
24 24 import fsspec
25 25
26 26 from .base import BaseCache, BaseShard
27 27 from ..utils import ShardFileReader, NOT_GIVEN
28 28 from ...type_utils import str2bool
29 29
30 30 log = logging.getLogger(__name__)
31 31
32 32
33 33 class S3Shard(BaseShard):
34 34
35 35 def __init__(self, index, bucket, **settings):
36 36 self._index = index
37 37 self._bucket = bucket
38 38 self.storage_type = 'bucket'
39 39
40 40 endpoint_url = settings.pop('archive_cache.objectstore.url')
41 41 key = settings.pop('archive_cache.objectstore.key')
42 42 secret = settings.pop('archive_cache.objectstore.secret')
43 43
44 # TODO: Add it all over the place...
45 self._bucket_root = settings.pop('archive_cache.objectstore.bucket_root')
46
44 47 self.fs = fsspec.filesystem('s3', anon=False, endpoint_url=endpoint_url, key=key, secret=secret)
45 48
46 49 @property
47 50 def bucket(self):
48 51 """Cache bucket."""
49 return self._bucket
52 return os.path.join(self._bucket_root, self._bucket)
50 53
51 54 def _get_keyfile(self, archive_key) -> tuple[str, str]:
52 55 key_file = f'{archive_key}-{self.key_suffix}'
53 56 return key_file, os.path.join(self.bucket, key_file)
54 57
55 58 def _get_writer(self, path, mode):
56 59 return self.fs.open(path, 'wb')
57 60
58 61 def _write_file(self, full_path, iterator, mode):
62 if self._bucket_root:
63 if not self.fs.exists(self._bucket_root):
64 self.fs.mkdir(self._bucket_root)
65
59 66 # ensure bucket exists
60 67 destination = self.bucket
61 68 if not self.fs.exists(destination):
62 69 self.fs.mkdir(destination, s3_additional_kwargs={})
63 70
64 71 writer = self._get_writer(full_path, mode)
65 72
66 73 digest = hashlib.sha256()
67 74 with writer:
68 75 size = 0
69 76 for chunk in iterator:
70 77 size += len(chunk)
71 78 digest.update(chunk)
72 79 writer.write(chunk)
73 80
74 81 sha256 = digest.hexdigest()
75 82 log.debug('written new archive cache under %s, sha256: %s', full_path, sha256)
76 83 return size, sha256
77 84
78 85 def store(self, key, value_reader, metadata: dict | None = None):
79 86 return self._store(key, value_reader, metadata, mode='wb')
80 87
81 88 def fetch(self, key, retry=NOT_GIVEN, retry_attempts=NOT_GIVEN, retry_backoff=1) -> tuple[ShardFileReader, dict]:
82 89 return self._fetch(key, retry, retry_attempts, retry_backoff)
83 90
84 91 def remove(self, key):
85 92 return self._remove(key)
86 93
87 94 def random_filename(self):
88 95 """Return filename and full-path tuple for file storage.
89 96
90 97 Filename will be a randomly generated 28 character hexadecimal string
91 98 with ".archive_cache" suffixed. Two levels of sub-directories will be used to
92 99 reduce the size of directories. On older filesystems, lookups in
93 100 directories with many files may be slow.
94 101 """
95 102
96 103 hex_name = codecs.encode(os.urandom(16), 'hex').decode('utf-8')
97 104
98 105 archive_name = hex_name[4:] + '.archive_cache'
99 106 filename = f"{hex_name[:2]}-{hex_name[2:4]}-{archive_name}"
100 107
101 108 full_path = os.path.join(self.bucket, filename)
102 109 return archive_name, full_path
103 110
104 111 def __repr__(self):
105 112 return f'{self.__class__.__name__}(index={self._index}, bucket={self.bucket})'
106 113
107 114
108 115 class ObjectStoreCache(BaseCache):
116 shard_name = 'shard-bucket-%03d'
109 117
110 118 def __init__(self, locking_url, **settings):
111 119 """
112 120 Initialize objectstore cache instance.
113 121
114 122 :param str locking_url: redis url for a lock
115 123 :param settings: settings dict
116 124
117 125 """
118 126 self._locking_url = locking_url
119 127 self._config = settings
120 128
121 129 objectstore_url = self.get_conf('archive_cache.objectstore.url')
122 130 self._storage_path = objectstore_url
123 131
124 132 self._count = int(self.get_conf('archive_cache.objectstore.bucket_shards', pop=True))
125 133
126 134 self._eviction_policy = self.get_conf('archive_cache.objectstore.eviction_policy', pop=True)
127 135 self._cache_size_limit = self.gb_to_bytes(int(self.get_conf('archive_cache.objectstore.cache_size_gb')))
128 136
129 137 self.retry = str2bool(self.get_conf('archive_cache.objectstore.retry', pop=True))
130 138 self.retry_attempts = int(self.get_conf('archive_cache.objectstore.retry_attempts', pop=True))
131 139 self.retry_backoff = int(self.get_conf('archive_cache.objectstore.retry_backoff', pop=True))
132 140
133 141 log.debug('Initializing archival cache instance under %s', objectstore_url)
134 142 self._shards = tuple(
135 143 S3Shard(
136 144 index=num,
137 bucket='rhodecode-archivecache-%03d' % num,
145 bucket=self.shard_name % num,
138 146 **settings,
139 147 )
140 148 for num in range(self._count)
141 149 )
142 150 self._hash = self._shards[0].hash
143 151
144 152 def _get_shard(self, key) -> S3Shard:
145 153 index = self._hash(key) % self._count
146 154 shard = self._shards[index]
147 155 return shard
148 156
149 157 def _get_size(self, shard, archive_path):
150 158 return shard.fs.info(archive_path)['size']
General Comments 0
You need to be logged in to leave comments. Login now