##// END OF EJS Templates
caches: fixed issue with exception on handling non-ascii cache keys.
super-admin -
r963:8b44d37f default
parent child Browse files
Show More
@@ -1,328 +1,329 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 2 # Copyright (C) 2014-2020 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 18 import time
19 19 import errno
20 20 import logging
21 21
22 22 import msgpack
23 23 import redis
24 24
25 25 from dogpile.cache.api import CachedValue
26 26 from dogpile.cache.backends import memory as memory_backend
27 27 from dogpile.cache.backends import file as file_backend
28 28 from dogpile.cache.backends import redis as redis_backend
29 29 from dogpile.cache.backends.file import NO_VALUE, compat, FileLock
30 30 from dogpile.cache.util import memoized_property
31 31
32 32 from pyramid.settings import asbool
33 33
34 34 from vcsserver.lib.memory_lru_dict import LRUDict, LRUDictDebug
35 from vcsserver.utils import safe_str
35 36
36 37
37 38 _default_max_size = 1024
38 39
39 40 log = logging.getLogger(__name__)
40 41
41 42
42 43 class LRUMemoryBackend(memory_backend.MemoryBackend):
43 44 key_prefix = 'lru_mem_backend'
44 45 pickle_values = False
45 46
46 47 def __init__(self, arguments):
47 48 max_size = arguments.pop('max_size', _default_max_size)
48 49
49 50 LRUDictClass = LRUDict
50 51 if arguments.pop('log_key_count', None):
51 52 LRUDictClass = LRUDictDebug
52 53
53 54 arguments['cache_dict'] = LRUDictClass(max_size)
54 55 super(LRUMemoryBackend, self).__init__(arguments)
55 56
56 57 def delete(self, key):
57 58 try:
58 59 del self._cache[key]
59 60 except KeyError:
60 61 # we don't care if key isn't there at deletion
61 62 pass
62 63
63 64 def delete_multi(self, keys):
64 65 for key in keys:
65 66 self.delete(key)
66 67
67 68
68 69 class PickleSerializer(object):
69 70
70 71 def _dumps(self, value, safe=False):
71 72 try:
72 73 return compat.pickle.dumps(value)
73 74 except Exception:
74 75 if safe:
75 76 return NO_VALUE
76 77 else:
77 78 raise
78 79
79 80 def _loads(self, value, safe=True):
80 81 try:
81 82 return compat.pickle.loads(value)
82 83 except Exception:
83 84 if safe:
84 85 return NO_VALUE
85 86 else:
86 87 raise
87 88
88 89
89 90 class MsgPackSerializer(object):
90 91
91 92 def _dumps(self, value, safe=False):
92 93 try:
93 94 return msgpack.packb(value)
94 95 except Exception:
95 96 if safe:
96 97 return NO_VALUE
97 98 else:
98 99 raise
99 100
100 101 def _loads(self, value, safe=True):
101 102 """
102 103 pickle maintained the `CachedValue` wrapper of the tuple
103 104 msgpack does not, so it must be added back in.
104 105 """
105 106 try:
106 107 value = msgpack.unpackb(value, use_list=False)
107 108 return CachedValue(*value)
108 109 except Exception:
109 110 if safe:
110 111 return NO_VALUE
111 112 else:
112 113 raise
113 114
114 115
115 116 import fcntl
116 117 flock_org = fcntl.flock
117 118
118 119
119 120 class CustomLockFactory(FileLock):
120 121
121 122 pass
122 123
123 124
124 125 class FileNamespaceBackend(PickleSerializer, file_backend.DBMBackend):
125 126 key_prefix = 'file_backend'
126 127
127 128 def __init__(self, arguments):
128 129 arguments['lock_factory'] = CustomLockFactory
129 130 db_file = arguments.get('filename')
130 131
131 132 log.debug('initialing %s DB in %s', self.__class__.__name__, db_file)
132 133 try:
133 134 super(FileNamespaceBackend, self).__init__(arguments)
134 135 except Exception:
135 136 log.error('Failed to initialize db at: %s', db_file)
136 137 raise
137 138
138 139 def __repr__(self):
139 140 return '{} `{}`'.format(self.__class__, self.filename)
140 141
141 142 def list_keys(self, prefix=''):
142 143 prefix = '{}:{}'.format(self.key_prefix, prefix)
143 144
144 145 def cond(v):
145 146 if not prefix:
146 147 return True
147 148
148 149 if v.startswith(prefix):
149 150 return True
150 151 return False
151 152
152 153 with self._dbm_file(True) as dbm:
153 154 try:
154 155 return filter(cond, dbm.keys())
155 156 except Exception:
156 157 log.error('Failed to fetch DBM keys from DB: %s', self.get_store())
157 158 raise
158 159
159 160 def get_store(self):
160 161 return self.filename
161 162
162 163 def _dbm_get(self, key):
163 164 with self._dbm_file(False) as dbm:
164 165 if hasattr(dbm, 'get'):
165 166 value = dbm.get(key, NO_VALUE)
166 167 else:
167 168 # gdbm objects lack a .get method
168 169 try:
169 170 value = dbm[key]
170 171 except KeyError:
171 172 value = NO_VALUE
172 173 if value is not NO_VALUE:
173 174 value = self._loads(value)
174 175 return value
175 176
176 177 def get(self, key):
177 178 try:
178 179 return self._dbm_get(key)
179 180 except Exception:
180 181 log.error('Failed to fetch DBM key %s from DB: %s', key, self.get_store())
181 182 raise
182 183
183 184 def set(self, key, value):
184 185 with self._dbm_file(True) as dbm:
185 186 dbm[key] = self._dumps(value)
186 187
187 188 def set_multi(self, mapping):
188 189 with self._dbm_file(True) as dbm:
189 190 for key, value in mapping.items():
190 191 dbm[key] = self._dumps(value)
191 192
192 193
193 194 class BaseRedisBackend(redis_backend.RedisBackend):
194 195 key_prefix = ''
195 196
196 197 def __init__(self, arguments):
197 198 super(BaseRedisBackend, self).__init__(arguments)
198 199 self._lock_timeout = self.lock_timeout
199 200 self._lock_auto_renewal = asbool(arguments.pop("lock_auto_renewal", True))
200 201
201 202 if self._lock_auto_renewal and not self._lock_timeout:
202 203 # set default timeout for auto_renewal
203 204 self._lock_timeout = 30
204 205
205 206 def _create_client(self):
206 207 args = {}
207 208
208 209 if self.url is not None:
209 210 args.update(url=self.url)
210 211
211 212 else:
212 213 args.update(
213 214 host=self.host, password=self.password,
214 215 port=self.port, db=self.db
215 216 )
216 217
217 218 connection_pool = redis.ConnectionPool(**args)
218 219
219 220 return redis.StrictRedis(connection_pool=connection_pool)
220 221
221 222 def list_keys(self, prefix=''):
222 223 prefix = '{}:{}*'.format(self.key_prefix, prefix)
223 224 return self.client.keys(prefix)
224 225
225 226 def get_store(self):
226 227 return self.client.connection_pool
227 228
228 229 def get(self, key):
229 230 value = self.client.get(key)
230 231 if value is None:
231 232 return NO_VALUE
232 233 return self._loads(value)
233 234
234 235 def get_multi(self, keys):
235 236 if not keys:
236 237 return []
237 238 values = self.client.mget(keys)
238 239 loads = self._loads
239 240 return [
240 241 loads(v) if v is not None else NO_VALUE
241 242 for v in values]
242 243
243 244 def set(self, key, value):
244 245 if self.redis_expiration_time:
245 246 self.client.setex(key, self.redis_expiration_time,
246 247 self._dumps(value))
247 248 else:
248 249 self.client.set(key, self._dumps(value))
249 250
250 251 def set_multi(self, mapping):
251 252 dumps = self._dumps
252 253 mapping = dict(
253 254 (k, dumps(v))
254 255 for k, v in mapping.items()
255 256 )
256 257
257 258 if not self.redis_expiration_time:
258 259 self.client.mset(mapping)
259 260 else:
260 261 pipe = self.client.pipeline()
261 262 for key, value in mapping.items():
262 263 pipe.setex(key, self.redis_expiration_time, value)
263 264 pipe.execute()
264 265
265 266 def get_mutex(self, key):
266 267 if self.distributed_lock:
267 lock_key = redis_backend.u('_lock_{0}').format(key)
268 lock_key = redis_backend.u('_lock_{0}').format(safe_str(key))
268 269 return get_mutex_lock(self.client, lock_key, self._lock_timeout,
269 270 auto_renewal=self._lock_auto_renewal)
270 271 else:
271 272 return None
272 273
273 274
274 275 class RedisPickleBackend(PickleSerializer, BaseRedisBackend):
275 276 key_prefix = 'redis_pickle_backend'
276 277 pass
277 278
278 279
279 280 class RedisMsgPackBackend(MsgPackSerializer, BaseRedisBackend):
280 281 key_prefix = 'redis_msgpack_backend'
281 282 pass
282 283
283 284
284 285 def get_mutex_lock(client, lock_key, lock_timeout, auto_renewal=False):
285 286 import redis_lock
286 287
287 288 class _RedisLockWrapper(object):
288 289 """LockWrapper for redis_lock"""
289 290
290 291 @classmethod
291 292 def get_lock(cls):
292 293 return redis_lock.Lock(
293 294 redis_client=client,
294 295 name=lock_key,
295 296 expire=lock_timeout,
296 297 auto_renewal=auto_renewal,
297 298 strict=True,
298 299 )
299 300
300 301 def __repr__(self):
301 302 return "{}:{}".format(self.__class__.__name__, lock_key)
302 303
303 304 def __str__(self):
304 305 return "{}:{}".format(self.__class__.__name__, lock_key)
305 306
306 307 def __init__(self):
307 308 self.lock = self.get_lock()
308 309 self.lock_key = lock_key
309 310
310 311 def acquire(self, wait=True):
311 312 log.debug('Trying to acquire Redis lock for key %s', self.lock_key)
312 313 try:
313 314 acquired = self.lock.acquire(wait)
314 315 log.debug('Got lock for key %s, %s', self.lock_key, acquired)
315 316 return acquired
316 317 except redis_lock.AlreadyAcquired:
317 318 return False
318 319 except redis_lock.AlreadyStarted:
319 320 # refresh thread exists, but it also means we acquired the lock
320 321 return True
321 322
322 323 def release(self):
323 324 try:
324 325 self.lock.release()
325 326 except redis_lock.NotAcquired:
326 327 pass
327 328
328 329 return _RedisLockWrapper()
General Comments 0
You need to be logged in to leave comments. Login now