##// END OF EJS Templates
feat(redis): added redis prefix support for cache backend
super-admin -
r1313:cbb3f19b default
parent child Browse files
Show More
@@ -1,191 +1,195 b''
1 1
2 2 ; #################################
3 3 ; RHODECODE VCSSERVER CONFIGURATION
4 4 ; #################################
5 5
6 6 [server:main]
7 7 ; COMMON HOST/IP CONFIG, This applies mostly to develop setup,
8 8 ; Host port for gunicorn are controlled by gunicorn_conf.py
9 9 host = 0.0.0.0
10 10 port = 10010
11 11
12 12
13 13 ; ###########################
14 14 ; GUNICORN APPLICATION SERVER
15 15 ; ###########################
16 16
17 17 ; run with gunicorn --config gunicorn_conf.py --paste vcsserver.ini
18 18
19 19 ; Module to use, this setting shouldn't be changed
20 20 use = egg:gunicorn#main
21 21
22 22 [app:main]
23 23 ; The %(here)s variable will be replaced with the absolute path of parent directory
24 24 ; of this file
25 25 ; Each option in the app:main can be override by an environmental variable
26 26 ;
27 27 ;To override an option:
28 28 ;
29 29 ;RC_<KeyName>
30 30 ;Everything should be uppercase, . and - should be replaced by _.
31 31 ;For example, if you have these configuration settings:
32 32 ;rc_cache.repo_object.backend = foo
33 33 ;can be overridden by
34 34 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
35 35
36 36 use = egg:rhodecode-vcsserver
37 37
38 38
39 39 ; #############
40 40 ; DEBUG OPTIONS
41 41 ; #############
42 42
43 43 # During development the we want to have the debug toolbar enabled
44 44 pyramid.includes =
45 45 pyramid_debugtoolbar
46 46
47 47 debugtoolbar.hosts = 0.0.0.0/0
48 48 debugtoolbar.exclude_prefixes =
49 49 /css
50 50 /fonts
51 51 /images
52 52 /js
53 53
54 54 ; #################
55 55 ; END DEBUG OPTIONS
56 56 ; #################
57 57
58 58 ; Pyramid default locales, we need this to be set
59 59 #pyramid.default_locale_name = en
60 60
61 61 ; default locale used by VCS systems
62 62 #locale = en_US.UTF-8
63 63
64 64 ; path to binaries (hg,git,svn) for vcsserver, it should be set by the installer
65 65 ; at installation time, e.g /home/user/.rccontrol/vcsserver-1/profile/bin
66 66 ; or /usr/local/bin/rhodecode_bin/vcs_bin
67 67 core.binary_dir =
68 68
69 69 ; Redis connection settings for svn integrations logic
70 70 ; This connection string needs to be the same on ce and vcsserver
71 71 vcs.svn.redis_conn = redis://redis:6379/0
72 72
73 73 ; Custom exception store path, defaults to TMPDIR
74 74 ; This is used to store exception from RhodeCode in shared directory
75 75 #exception_tracker.store_path =
76 76
77 77 ; #############
78 78 ; DOGPILE CACHE
79 79 ; #############
80 80
81 81 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
82 82 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
83 83 #cache_dir = %(here)s/data
84 84
85 85 ; ***************************************
86 86 ; `repo_object` cache, default file based
87 87 ; ***************************************
88 88
89 89 ; `repo_object` cache settings for vcs methods for repositories
90 90 #rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
91 91
92 92 ; cache auto-expires after N seconds
93 93 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
94 94 #rc_cache.repo_object.expiration_time = 2592000
95 95
96 96 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
97 97 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache_repo_object.db
98 98
99 99 ; ***********************************************************
100 100 ; `repo_object` cache with redis backend
101 101 ; recommended for larger instance, and for better performance
102 102 ; ***********************************************************
103 103
104 104 ; `repo_object` cache settings for vcs methods for repositories
105 105 #rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack
106 106
107 107 ; cache auto-expires after N seconds
108 108 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
109 109 #rc_cache.repo_object.expiration_time = 2592000
110 110
111 111 ; redis_expiration_time needs to be greater then expiration_time
112 112 #rc_cache.repo_object.arguments.redis_expiration_time = 3592000
113 113
114 114 #rc_cache.repo_object.arguments.host = localhost
115 115 #rc_cache.repo_object.arguments.port = 6379
116 116 #rc_cache.repo_object.arguments.db = 5
117 117 #rc_cache.repo_object.arguments.socket_timeout = 30
118 118 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
119 119 #rc_cache.repo_object.arguments.distributed_lock = true
120 120
121 121 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
122 122 #rc_cache.repo_object.arguments.lock_auto_renewal = true
123 123
124 ; prefix for redis keys used for this cache backend, the final key is constructed using {custom-prefix}{key}
125 #rc_cache.repo_object.arguments.key_prefix = custom-prefix-
126
127
124 128 ; Statsd client config, this is used to send metrics to statsd
125 129 ; We recommend setting statsd_exported and scrape them using Promethues
126 130 #statsd.enabled = false
127 131 #statsd.statsd_host = 0.0.0.0
128 132 #statsd.statsd_port = 8125
129 133 #statsd.statsd_prefix =
130 134 #statsd.statsd_ipv6 = false
131 135
132 136 ; configure logging automatically at server startup set to false
133 137 ; to use the below custom logging config.
134 138 ; RC_LOGGING_FORMATTER
135 139 ; RC_LOGGING_LEVEL
136 140 ; env variables can control the settings for logging in case of autoconfigure
137 141
138 142 #logging.autoconfigure = true
139 143
140 144 ; specify your own custom logging config file to configure logging
141 145 #logging.logging_conf_file = /path/to/custom_logging.ini
142 146
143 147 ; #####################
144 148 ; LOGGING CONFIGURATION
145 149 ; #####################
146 150
147 151 [loggers]
148 152 keys = root, vcsserver
149 153
150 154 [handlers]
151 155 keys = console
152 156
153 157 [formatters]
154 158 keys = generic, json
155 159
156 160 ; #######
157 161 ; LOGGERS
158 162 ; #######
159 163 [logger_root]
160 164 level = NOTSET
161 165 handlers = console
162 166
163 167 [logger_vcsserver]
164 168 level = DEBUG
165 169 handlers =
166 170 qualname = vcsserver
167 171 propagate = 1
168 172
169 173 ; ########
170 174 ; HANDLERS
171 175 ; ########
172 176
173 177 [handler_console]
174 178 class = StreamHandler
175 179 args = (sys.stderr, )
176 180 level = DEBUG
177 181 ; To enable JSON formatted logs replace 'generic' with 'json'
178 182 ; This allows sending properly formatted logs to grafana loki or elasticsearch
179 183 formatter = generic
180 184
181 185 ; ##########
182 186 ; FORMATTERS
183 187 ; ##########
184 188
185 189 [formatter_generic]
186 190 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
187 191 datefmt = %Y-%m-%d %H:%M:%S
188 192
189 193 [formatter_json]
190 194 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
191 195 class = vcsserver.lib._vendor.jsonlogger.JsonFormatter
@@ -1,171 +1,175 b''
1 1
2 2 ; #################################
3 3 ; RHODECODE VCSSERVER CONFIGURATION
4 4 ; #################################
5 5
6 6 [server:main]
7 7 ; COMMON HOST/IP CONFIG, This applies mostly to develop setup,
8 8 ; Host port for gunicorn are controlled by gunicorn_conf.py
9 9 host = 0.0.0.0
10 10 port = 10010
11 11
12 12
13 13 ; ###########################
14 14 ; GUNICORN APPLICATION SERVER
15 15 ; ###########################
16 16
17 17 ; run with gunicorn --config gunicorn_conf.py --paste vcsserver.ini
18 18
19 19 ; Module to use, this setting shouldn't be changed
20 20 use = egg:gunicorn#main
21 21
22 22 [app:main]
23 23 ; The %(here)s variable will be replaced with the absolute path of parent directory
24 24 ; of this file
25 25 ; Each option in the app:main can be override by an environmental variable
26 26 ;
27 27 ;To override an option:
28 28 ;
29 29 ;RC_<KeyName>
30 30 ;Everything should be uppercase, . and - should be replaced by _.
31 31 ;For example, if you have these configuration settings:
32 32 ;rc_cache.repo_object.backend = foo
33 33 ;can be overridden by
34 34 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
35 35
36 36 use = egg:rhodecode-vcsserver
37 37
38 38 ; Pyramid default locales, we need this to be set
39 39 #pyramid.default_locale_name = en
40 40
41 41 ; default locale used by VCS systems
42 42 #locale = en_US.UTF-8
43 43
44 44 ; path to binaries (hg,git,svn) for vcsserver, it should be set by the installer
45 45 ; at installation time, e.g /home/user/.rccontrol/vcsserver-1/profile/bin
46 46 ; or /usr/local/bin/rhodecode_bin/vcs_bin
47 47 core.binary_dir =
48 48
49 49 ; Redis connection settings for svn integrations logic
50 50 ; This connection string needs to be the same on ce and vcsserver
51 51 vcs.svn.redis_conn = redis://redis:6379/0
52 52
53 53 ; Custom exception store path, defaults to TMPDIR
54 54 ; This is used to store exception from RhodeCode in shared directory
55 55 #exception_tracker.store_path =
56 56
57 57 ; #############
58 58 ; DOGPILE CACHE
59 59 ; #############
60 60
61 61 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
62 62 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
63 63 #cache_dir = %(here)s/data
64 64
65 65 ; ***************************************
66 66 ; `repo_object` cache, default file based
67 67 ; ***************************************
68 68
69 69 ; `repo_object` cache settings for vcs methods for repositories
70 70 #rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
71 71
72 72 ; cache auto-expires after N seconds
73 73 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
74 74 #rc_cache.repo_object.expiration_time = 2592000
75 75
76 76 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
77 77 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache_repo_object.db
78 78
79 79 ; ***********************************************************
80 80 ; `repo_object` cache with redis backend
81 81 ; recommended for larger instance, and for better performance
82 82 ; ***********************************************************
83 83
84 84 ; `repo_object` cache settings for vcs methods for repositories
85 85 #rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack
86 86
87 87 ; cache auto-expires after N seconds
88 88 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
89 89 #rc_cache.repo_object.expiration_time = 2592000
90 90
91 91 ; redis_expiration_time needs to be greater then expiration_time
92 92 #rc_cache.repo_object.arguments.redis_expiration_time = 3592000
93 93
94 94 #rc_cache.repo_object.arguments.host = localhost
95 95 #rc_cache.repo_object.arguments.port = 6379
96 96 #rc_cache.repo_object.arguments.db = 5
97 97 #rc_cache.repo_object.arguments.socket_timeout = 30
98 98 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
99 99 #rc_cache.repo_object.arguments.distributed_lock = true
100 100
101 101 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
102 102 #rc_cache.repo_object.arguments.lock_auto_renewal = true
103 103
104 ; prefix for redis keys used for this cache backend, the final key is constructed using {custom-prefix}{key}
105 #rc_cache.repo_object.arguments.key_prefix = custom-prefix-
106
107
104 108 ; Statsd client config, this is used to send metrics to statsd
105 109 ; We recommend setting statsd_exported and scrape them using Promethues
106 110 #statsd.enabled = false
107 111 #statsd.statsd_host = 0.0.0.0
108 112 #statsd.statsd_port = 8125
109 113 #statsd.statsd_prefix =
110 114 #statsd.statsd_ipv6 = false
111 115
112 116 ; configure logging automatically at server startup set to false
113 117 ; to use the below custom logging config.
114 118 ; RC_LOGGING_FORMATTER
115 119 ; RC_LOGGING_LEVEL
116 120 ; env variables can control the settings for logging in case of autoconfigure
117 121
118 122 #logging.autoconfigure = true
119 123
120 124 ; specify your own custom logging config file to configure logging
121 125 #logging.logging_conf_file = /path/to/custom_logging.ini
122 126
123 127 ; #####################
124 128 ; LOGGING CONFIGURATION
125 129 ; #####################
126 130
127 131 [loggers]
128 132 keys = root, vcsserver
129 133
130 134 [handlers]
131 135 keys = console
132 136
133 137 [formatters]
134 138 keys = generic, json
135 139
136 140 ; #######
137 141 ; LOGGERS
138 142 ; #######
139 143 [logger_root]
140 144 level = NOTSET
141 145 handlers = console
142 146
143 147 [logger_vcsserver]
144 148 level = INFO
145 149 handlers =
146 150 qualname = vcsserver
147 151 propagate = 1
148 152
149 153 ; ########
150 154 ; HANDLERS
151 155 ; ########
152 156
153 157 [handler_console]
154 158 class = StreamHandler
155 159 args = (sys.stderr, )
156 160 level = INFO
157 161 ; To enable JSON formatted logs replace 'generic' with 'json'
158 162 ; This allows sending properly formatted logs to grafana loki or elasticsearch
159 163 formatter = generic
160 164
161 165 ; ##########
162 166 ; FORMATTERS
163 167 ; ##########
164 168
165 169 [formatter_generic]
166 170 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
167 171 datefmt = %Y-%m-%d %H:%M:%S
168 172
169 173 [formatter_json]
170 174 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
171 175 class = vcsserver.lib._vendor.jsonlogger.JsonFormatter
@@ -1,310 +1,313 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 2 # Copyright (C) 2014-2023 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 18 #import errno
19 19 import fcntl
20 20 import functools
21 21 import logging
22 22 import os
23 23 import pickle
24 24 #import time
25 25
26 26 #import gevent
27 27 import msgpack
28 28 import redis
29 29
30 30 flock_org = fcntl.flock
31 31 from typing import Union
32 32
33 33 from dogpile.cache.api import Deserializer, Serializer
34 34 from dogpile.cache.backends import file as file_backend
35 35 from dogpile.cache.backends import memory as memory_backend
36 36 from dogpile.cache.backends import redis as redis_backend
37 37 from dogpile.cache.backends.file import FileLock
38 38 from dogpile.cache.util import memoized_property
39 39
40 40 from ...lib.memory_lru_dict import LRUDict, LRUDictDebug
41 41 from ...lib.str_utils import safe_bytes, safe_str
42 42 from ...lib.type_utils import str2bool
43 43
44 44 _default_max_size = 1024
45 45
46 46 log = logging.getLogger(__name__)
47 47
48 48
49 49 class LRUMemoryBackend(memory_backend.MemoryBackend):
50 50 key_prefix = 'lru_mem_backend'
51 51 pickle_values = False
52 52
53 53 def __init__(self, arguments):
54 54 self.max_size = arguments.pop('max_size', _default_max_size)
55 55
56 56 LRUDictClass = LRUDict
57 57 if arguments.pop('log_key_count', None):
58 58 LRUDictClass = LRUDictDebug
59 59
60 60 arguments['cache_dict'] = LRUDictClass(self.max_size)
61 61 super().__init__(arguments)
62 62
63 63 def __repr__(self):
64 64 return f'{self.__class__}(maxsize=`{self.max_size}`)'
65 65
66 66 def __str__(self):
67 67 return self.__repr__()
68 68
69 69 def delete(self, key):
70 70 try:
71 71 del self._cache[key]
72 72 except KeyError:
73 73 # we don't care if key isn't there at deletion
74 74 pass
75 75
76 76 def list_keys(self, prefix):
77 77 return list(self._cache.keys())
78 78
79 79 def delete_multi(self, keys):
80 80 for key in keys:
81 81 self.delete(key)
82 82
83 83 def delete_multi_by_prefix(self, prefix):
84 84 cache_keys = self.list_keys(prefix=prefix)
85 85 num_affected_keys = len(cache_keys)
86 86 if num_affected_keys:
87 87 self.delete_multi(cache_keys)
88 88 return num_affected_keys
89 89
90 90
91 91 class PickleSerializer:
92 92 serializer: None | Serializer = staticmethod( # type: ignore
93 93 functools.partial(pickle.dumps, protocol=pickle.HIGHEST_PROTOCOL)
94 94 )
95 95 deserializer: None | Deserializer = staticmethod( # type: ignore
96 96 functools.partial(pickle.loads)
97 97 )
98 98
99 99
100 100 class MsgPackSerializer:
101 101 serializer: None | Serializer = staticmethod( # type: ignore
102 102 msgpack.packb
103 103 )
104 104 deserializer: None | Deserializer = staticmethod( # type: ignore
105 105 functools.partial(msgpack.unpackb, use_list=False)
106 106 )
107 107
108 108
109 109 class CustomLockFactory(FileLock):
110 110
111 111 pass
112 112
113 113
114 114 class FileNamespaceBackend(PickleSerializer, file_backend.DBMBackend):
115 115 key_prefix = 'file_backend'
116 116
117 117 def __init__(self, arguments):
118 118 arguments['lock_factory'] = CustomLockFactory
119 119 db_file = arguments.get('filename')
120 120
121 121 log.debug('initialing cache-backend=%s db in %s', self.__class__.__name__, db_file)
122 122 db_file_dir = os.path.dirname(db_file)
123 123 if not os.path.isdir(db_file_dir):
124 124 os.makedirs(db_file_dir)
125 125
126 126 try:
127 127 super().__init__(arguments)
128 128 except Exception:
129 129 log.exception('Failed to initialize db at: %s', db_file)
130 130 raise
131 131
132 132 def __repr__(self):
133 133 return f'{self.__class__}(file=`{self.filename}`)'
134 134
135 135 def __str__(self):
136 136 return self.__repr__()
137 137
138 138 def _get_keys_pattern(self, prefix: bytes = b''):
139 139 return b'%b:%b' % (safe_bytes(self.key_prefix), safe_bytes(prefix))
140 140
141 141 def list_keys(self, prefix: bytes = b''):
142 142 prefix = self._get_keys_pattern(prefix)
143 143
144 144 def cond(dbm_key: bytes):
145 145 if not prefix:
146 146 return True
147 147
148 148 if dbm_key.startswith(prefix):
149 149 return True
150 150 return False
151 151
152 152 with self._dbm_file(True) as dbm:
153 153 try:
154 154 return list(filter(cond, dbm.keys()))
155 155 except Exception:
156 156 log.error('Failed to fetch DBM keys from DB: %s', self.get_store())
157 157 raise
158 158
159 159 def delete_multi_by_prefix(self, prefix):
160 160 cache_keys = self.list_keys(prefix=prefix)
161 161 num_affected_keys = len(cache_keys)
162 162 if num_affected_keys:
163 163 self.delete_multi(cache_keys)
164 164 return num_affected_keys
165 165
166 166 def get_store(self):
167 167 return self.filename
168 168
169 169 def cleanup_store(self):
170 170 for ext in ("db", "dat", "pag", "dir"):
171 171 final_filename = self.filename + os.extsep + ext
172 172 if os.path.exists(final_filename):
173 173 os.remove(final_filename)
174 174 log.warning('Removed dbm file %s', final_filename)
175 175
176 176
177 177 class BaseRedisBackend(redis_backend.RedisBackend):
178 178 key_prefix = ''
179 179
180 180 def __init__(self, arguments):
181 181 self.db_conn = arguments.get('host', '') or arguments.get('url', '') or 'redis-host'
182 182 super().__init__(arguments)
183 183
184 184 self._lock_timeout = self.lock_timeout
185 185 self._lock_auto_renewal = str2bool(arguments.pop("lock_auto_renewal", True))
186 186
187 self._store_key_prefix = arguments.pop('key_prefix', '')
188 self.key_prefix = f'{self._store_key_prefix}{self.key_prefix}'
189
187 190 if self._lock_auto_renewal and not self._lock_timeout:
188 191 # set default timeout for auto_renewal
189 192 self._lock_timeout = 30
190 193
191 194 def __repr__(self):
192 195 return f'{self.__class__}(conn=`{self.db_conn}`)'
193 196
194 197 def __str__(self):
195 198 return self.__repr__()
196 199
197 200 def _create_client(self):
198 201 args = {}
199 202
200 203 if self.url is not None:
201 204 args.update(url=self.url)
202 205
203 206 else:
204 207 args.update(
205 208 host=self.host, password=self.password,
206 209 port=self.port, db=self.db
207 210 )
208 211
209 212 connection_pool = redis.ConnectionPool(**args)
210 213 self.writer_client = redis.StrictRedis(
211 214 connection_pool=connection_pool
212 215 )
213 216 self.reader_client = self.writer_client
214 217
215 218 def _get_keys_pattern(self, prefix: bytes = b''):
216 219 return b'%b:%b*' % (safe_bytes(self.key_prefix), safe_bytes(prefix))
217 220
218 221 def list_keys(self, prefix: bytes = b''):
219 222 prefix = self._get_keys_pattern(prefix)
220 223 return self.reader_client.keys(prefix)
221 224
222 225 def delete_multi_by_prefix(self, prefix, use_lua=False):
223 226 if use_lua:
224 227 # high efficient LUA script to delete ALL keys by prefix...
225 228 lua = """local keys = redis.call('keys', ARGV[1])
226 229 for i=1,#keys,5000 do
227 230 redis.call('del', unpack(keys, i, math.min(i+(5000-1), #keys)))
228 231 end
229 232 return #keys"""
230 233 num_affected_keys = self.writer_client.eval(
231 234 lua,
232 235 0,
233 236 f"{prefix}*")
234 237 else:
235 238 cache_keys = self.list_keys(prefix=prefix)
236 239 num_affected_keys = len(cache_keys)
237 240 if num_affected_keys:
238 241 self.delete_multi(cache_keys)
239 242 return num_affected_keys
240 243
241 244 def get_store(self):
242 245 return self.reader_client.connection_pool
243 246
244 247 def get_mutex(self, key):
245 248 if self.distributed_lock:
246 lock_key = f'_lock_{safe_str(key)}'
249 lock_key = f'{self._store_key_prefix}_lock_{safe_str(key)}'
247 250 return get_mutex_lock(
248 251 self.writer_client, lock_key,
249 252 self._lock_timeout,
250 253 auto_renewal=self._lock_auto_renewal
251 254 )
252 255 else:
253 256 return None
254 257
255 258
256 259 class RedisPickleBackend(PickleSerializer, BaseRedisBackend):
257 260 key_prefix = 'redis_pickle_backend'
258 261 pass
259 262
260 263
261 264 class RedisMsgPackBackend(MsgPackSerializer, BaseRedisBackend):
262 265 key_prefix = 'redis_msgpack_backend'
263 266 pass
264 267
265 268
266 269 def get_mutex_lock(client, lock_key, lock_timeout, auto_renewal=False):
267 270 from ...lib._vendor import redis_lock
268 271
269 272 class _RedisLockWrapper:
270 273 """LockWrapper for redis_lock"""
271 274
272 275 @classmethod
273 276 def get_lock(cls):
274 277 return redis_lock.Lock(
275 278 redis_client=client,
276 279 name=lock_key,
277 280 expire=lock_timeout,
278 281 auto_renewal=auto_renewal,
279 282 strict=True,
280 283 )
281 284
282 285 def __repr__(self):
283 286 return f"{self.__class__.__name__}:{lock_key}"
284 287
285 288 def __str__(self):
286 289 return f"{self.__class__.__name__}:{lock_key}"
287 290
288 291 def __init__(self):
289 292 self.lock = self.get_lock()
290 293 self.lock_key = lock_key
291 294
292 295 def acquire(self, wait=True):
293 296 log.debug('Trying to acquire Redis lock for key %s', self.lock_key)
294 297 try:
295 298 acquired = self.lock.acquire(wait)
296 299 log.debug('Got lock for key %s, %s', self.lock_key, acquired)
297 300 return acquired
298 301 except redis_lock.AlreadyAcquired:
299 302 return False
300 303 except redis_lock.AlreadyStarted:
301 304 # refresh thread exists, but it also means we acquired the lock
302 305 return True
303 306
304 307 def release(self):
305 308 try:
306 309 self.lock.release()
307 310 except redis_lock.NotAcquired:
308 311 pass
309 312
310 313 return _RedisLockWrapper()
@@ -1,245 +1,243 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 2 # Copyright (C) 2014-2023 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 18 import functools
19 19 import logging
20 20 import os
21 21 import threading
22 22 import time
23 23
24 24 import decorator
25 25 from dogpile.cache import CacheRegion
26 26
27 27
28 28 from ...lib.hash_utils import sha1
29 29 from ...lib.str_utils import safe_bytes
30 30 from ...lib.type_utils import str2bool # noqa :required by imports from .utils
31 31
32 32 from . import region_meta
33 33
34 34 log = logging.getLogger(__name__)
35 35
36 36
37 37 class RhodeCodeCacheRegion(CacheRegion):
38 38
39 39 def __repr__(self):
40 40 return f'`{self.__class__.__name__}(name={self.name}, backend={self.backend.__class__})`'
41 41
42 42 def conditional_cache_on_arguments(
43 43 self, namespace=None,
44 44 expiration_time=None,
45 45 should_cache_fn=None,
46 46 to_str=str,
47 47 function_key_generator=None,
48 48 condition=True):
49 49 """
50 50 Custom conditional decorator, that will not touch any dogpile internals if
51 51 condition isn't meet. This works a bit different from should_cache_fn
52 52 And it's faster in cases we don't ever want to compute cached values
53 53 """
54 54 expiration_time_is_callable = callable(expiration_time)
55 55 if not namespace:
56 56 namespace = getattr(self, '_default_namespace', None)
57 57
58 58 if function_key_generator is None:
59 59 function_key_generator = self.function_key_generator
60 60
61 61 def get_or_create_for_user_func(func_key_generator, user_func, *arg, **kw):
62 62
63 63 if not condition:
64 64 log.debug('Calling un-cached method:%s', user_func.__name__)
65 65 start = time.time()
66 66 result = user_func(*arg, **kw)
67 67 total = time.time() - start
68 68 log.debug('un-cached method:%s took %.4fs', user_func.__name__, total)
69 69 return result
70 70
71 71 key = func_key_generator(*arg, **kw)
72 timeout = expiration_time() if expiration_time_is_callable else expiration_time
73 log.debug('Calling cached (timeout=%s) method:`%s`', timeout, user_func.__name__)
72 74
73 timeout = expiration_time() if expiration_time_is_callable \
74 else expiration_time
75
76 log.debug('Calling cached method:`%s`', user_func.__name__)
77 75 return self.get_or_create(key, user_func, timeout, should_cache_fn, (arg, kw))
78 76
79 77 def cache_decorator(user_func):
80 78 if to_str is str:
81 79 # backwards compatible
82 80 key_generator = function_key_generator(namespace, user_func)
83 81 else:
84 82 key_generator = function_key_generator(namespace, user_func, to_str=to_str)
85 83
86 84 def refresh(*arg, **kw):
87 85 """
88 86 Like invalidate, but regenerates the value instead
89 87 """
90 88 key = key_generator(*arg, **kw)
91 89 value = user_func(*arg, **kw)
92 90 self.set(key, value)
93 91 return value
94 92
95 93 def invalidate(*arg, **kw):
96 94 key = key_generator(*arg, **kw)
97 95 self.delete(key)
98 96
99 97 def set_(value, *arg, **kw):
100 98 key = key_generator(*arg, **kw)
101 99 self.set(key, value)
102 100
103 101 def get(*arg, **kw):
104 102 key = key_generator(*arg, **kw)
105 103 return self.get(key)
106 104
107 105 user_func.set = set_
108 106 user_func.invalidate = invalidate
109 107 user_func.get = get
110 108 user_func.refresh = refresh
111 109 user_func.key_generator = key_generator
112 110 user_func.original = user_func
113 111
114 112 # Use `decorate` to preserve the signature of :param:`user_func`.
115 113 return decorator.decorate(user_func, functools.partial(
116 114 get_or_create_for_user_func, key_generator))
117 115
118 116 return cache_decorator
119 117
120 118
121 119 def make_region(*arg, **kw):
122 120 return RhodeCodeCacheRegion(*arg, **kw)
123 121
124 122
125 123 def get_default_cache_settings(settings, prefixes=None):
126 124 prefixes = prefixes or []
127 125 cache_settings = {}
128 126 for key in settings.keys():
129 127 for prefix in prefixes:
130 128 if key.startswith(prefix):
131 129 name = key.split(prefix)[1].strip()
132 130 val = settings[key]
133 131 if isinstance(val, str):
134 132 val = val.strip()
135 133 cache_settings[name] = val
136 134 return cache_settings
137 135
138 136
139 137 def compute_key_from_params(*args):
140 138 """
141 139 Helper to compute key from given params to be used in cache manager
142 140 """
143 141 return sha1(safe_bytes("_".join(map(str, args))))
144 142
145 143
146 144 def custom_key_generator(backend, namespace, fn):
147 145 func_name = fn.__name__
148 146
149 147 def generate_key(*args):
150 148 backend_pref = getattr(backend, 'key_prefix', None) or 'backend_prefix'
151 149 namespace_pref = namespace or 'default_namespace'
152 150 arg_key = compute_key_from_params(*args)
153 151 final_key = f"{backend_pref}:{namespace_pref}:{func_name}_{arg_key}"
154 152
155 153 return final_key
156 154
157 155 return generate_key
158 156
159 157
160 158 def backend_key_generator(backend):
161 159 """
162 160 Special wrapper that also sends over the backend to the key generator
163 161 """
164 162 def wrapper(namespace, fn):
165 163 return custom_key_generator(backend, namespace, fn)
166 164 return wrapper
167 165
168 166
169 167 def get_or_create_region(region_name, region_namespace: str = None, use_async_runner=False, force=False):
170 168 from .backends import FileNamespaceBackend
171 169 from . import async_creation_runner
172 170
173 171 region_obj = region_meta.dogpile_cache_regions.get(region_name)
174 172 if not region_obj:
175 173 reg_keys = list(region_meta.dogpile_cache_regions.keys())
176 174 raise OSError(f'Region `{region_name}` not in configured: {reg_keys}.')
177 175
178 176 region_uid_name = f'{region_name}:{region_namespace}'
179 177
180 178 # Special case for ONLY the FileNamespaceBackend backend. We register one-file-per-region
181 179 if isinstance(region_obj.actual_backend, FileNamespaceBackend):
182 180 if not region_namespace:
183 181 raise ValueError(f'{FileNamespaceBackend} used requires to specify region_namespace param')
184 182
185 183 region_exist = region_meta.dogpile_cache_regions.get(region_namespace)
186 184 if region_exist and not force:
187 185 log.debug('Using already configured region: %s', region_namespace)
188 186 return region_exist
189 187
190 188 expiration_time = region_obj.expiration_time
191 189
192 190 cache_dir = region_meta.dogpile_config_defaults['cache_dir']
193 191 namespace_cache_dir = cache_dir
194 192
195 193 # we default the namespace_cache_dir to our default cache dir.
196 194 # however, if this backend is configured with filename= param, we prioritize that
197 195 # so all caches within that particular region, even those namespaced end up in the same path
198 196 if region_obj.actual_backend.filename:
199 197 namespace_cache_dir = os.path.dirname(region_obj.actual_backend.filename)
200 198
201 199 if not os.path.isdir(namespace_cache_dir):
202 200 os.makedirs(namespace_cache_dir)
203 201 new_region = make_region(
204 202 name=region_uid_name,
205 203 function_key_generator=backend_key_generator(region_obj.actual_backend)
206 204 )
207 205
208 206 namespace_filename = os.path.join(
209 207 namespace_cache_dir, f"{region_name}_{region_namespace}.cache_db")
210 208 # special type that allows 1db per namespace
211 209 new_region.configure(
212 210 backend='dogpile.cache.rc.file_namespace',
213 211 expiration_time=expiration_time,
214 212 arguments={"filename": namespace_filename}
215 213 )
216 214
217 215 # create and save in region caches
218 216 log.debug('configuring new region: %s', region_uid_name)
219 217 region_obj = region_meta.dogpile_cache_regions[region_namespace] = new_region
220 218
221 219 region_obj._default_namespace = region_namespace
222 220 if use_async_runner:
223 221 region_obj.async_creation_runner = async_creation_runner
224 222 return region_obj
225 223
226 224
227 225 def clear_cache_namespace(cache_region: str | RhodeCodeCacheRegion, cache_namespace_uid: str, method: str) -> int:
228 226 from . import CLEAR_DELETE, CLEAR_INVALIDATE
229 227
230 228 if not isinstance(cache_region, RhodeCodeCacheRegion):
231 229 cache_region = get_or_create_region(cache_region, cache_namespace_uid)
232 230 log.debug('clearing cache region: %s [prefix:%s] with method=%s',
233 231 cache_region, cache_namespace_uid, method)
234 232
235 233 num_affected_keys = 0
236 234
237 235 if method == CLEAR_INVALIDATE:
238 236 # NOTE: The CacheRegion.invalidate() method’s default mode of
239 237 # operation is to set a timestamp local to this CacheRegion in this Python process only.
240 238 # It does not impact other Python processes or regions as the timestamp is only stored locally in memory.
241 239 cache_region.invalidate(hard=True)
242 240
243 241 if method == CLEAR_DELETE:
244 242 num_affected_keys = cache_region.backend.delete_multi_by_prefix(prefix=cache_namespace_uid)
245 243 return num_affected_keys
General Comments 0
You need to be logged in to leave comments. Login now