##// END OF EJS Templates
caches: fixed unicode error on non-ascii cache key
super-admin -
r1015:5e5c2773 default
parent child Browse files
Show More
@@ -1,329 +1,329 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 2 # Copyright (C) 2014-2020 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 18 import time
19 19 import errno
20 20 import logging
21 21
22 22 import msgpack
23 23 import redis
24 24
25 25 from dogpile.cache.api import CachedValue
26 26 from dogpile.cache.backends import memory as memory_backend
27 27 from dogpile.cache.backends import file as file_backend
28 28 from dogpile.cache.backends import redis as redis_backend
29 29 from dogpile.cache.backends.file import NO_VALUE, compat, FileLock
30 30 from dogpile.cache.util import memoized_property
31 31
32 32 from pyramid.settings import asbool
33 33
34 34 from vcsserver.lib.memory_lru_dict import LRUDict, LRUDictDebug
35 from vcsserver.utils import safe_str
35 from vcsserver.utils import safe_str, safe_unicode
36 36
37 37
38 38 _default_max_size = 1024
39 39
40 40 log = logging.getLogger(__name__)
41 41
42 42
43 43 class LRUMemoryBackend(memory_backend.MemoryBackend):
44 44 key_prefix = 'lru_mem_backend'
45 45 pickle_values = False
46 46
47 47 def __init__(self, arguments):
48 48 max_size = arguments.pop('max_size', _default_max_size)
49 49
50 50 LRUDictClass = LRUDict
51 51 if arguments.pop('log_key_count', None):
52 52 LRUDictClass = LRUDictDebug
53 53
54 54 arguments['cache_dict'] = LRUDictClass(max_size)
55 55 super(LRUMemoryBackend, self).__init__(arguments)
56 56
57 57 def delete(self, key):
58 58 try:
59 59 del self._cache[key]
60 60 except KeyError:
61 61 # we don't care if key isn't there at deletion
62 62 pass
63 63
64 64 def delete_multi(self, keys):
65 65 for key in keys:
66 66 self.delete(key)
67 67
68 68
69 69 class PickleSerializer(object):
70 70
71 71 def _dumps(self, value, safe=False):
72 72 try:
73 73 return compat.pickle.dumps(value)
74 74 except Exception:
75 75 if safe:
76 76 return NO_VALUE
77 77 else:
78 78 raise
79 79
80 80 def _loads(self, value, safe=True):
81 81 try:
82 82 return compat.pickle.loads(value)
83 83 except Exception:
84 84 if safe:
85 85 return NO_VALUE
86 86 else:
87 87 raise
88 88
89 89
90 90 class MsgPackSerializer(object):
91 91
92 92 def _dumps(self, value, safe=False):
93 93 try:
94 94 return msgpack.packb(value)
95 95 except Exception:
96 96 if safe:
97 97 return NO_VALUE
98 98 else:
99 99 raise
100 100
101 101 def _loads(self, value, safe=True):
102 102 """
103 103 pickle maintained the `CachedValue` wrapper of the tuple
104 104 msgpack does not, so it must be added back in.
105 105 """
106 106 try:
107 107 value = msgpack.unpackb(value, use_list=False)
108 108 return CachedValue(*value)
109 109 except Exception:
110 110 if safe:
111 111 return NO_VALUE
112 112 else:
113 113 raise
114 114
115 115
116 116 import fcntl
117 117 flock_org = fcntl.flock
118 118
119 119
120 120 class CustomLockFactory(FileLock):
121 121
122 122 pass
123 123
124 124
125 125 class FileNamespaceBackend(PickleSerializer, file_backend.DBMBackend):
126 126 key_prefix = 'file_backend'
127 127
128 128 def __init__(self, arguments):
129 129 arguments['lock_factory'] = CustomLockFactory
130 130 db_file = arguments.get('filename')
131 131
132 132 log.debug('initialing %s DB in %s', self.__class__.__name__, db_file)
133 133 try:
134 134 super(FileNamespaceBackend, self).__init__(arguments)
135 135 except Exception:
136 log.error('Failed to initialize db at: %s', db_file)
136 log.exception('Failed to initialize db at: %s', db_file)
137 137 raise
138 138
139 139 def __repr__(self):
140 140 return '{} `{}`'.format(self.__class__, self.filename)
141 141
142 142 def list_keys(self, prefix=''):
143 143 prefix = '{}:{}'.format(self.key_prefix, prefix)
144 144
145 145 def cond(v):
146 146 if not prefix:
147 147 return True
148 148
149 149 if v.startswith(prefix):
150 150 return True
151 151 return False
152 152
153 153 with self._dbm_file(True) as dbm:
154 154 try:
155 155 return filter(cond, dbm.keys())
156 156 except Exception:
157 157 log.error('Failed to fetch DBM keys from DB: %s', self.get_store())
158 158 raise
159 159
160 160 def get_store(self):
161 161 return self.filename
162 162
163 163 def _dbm_get(self, key):
164 164 with self._dbm_file(False) as dbm:
165 165 if hasattr(dbm, 'get'):
166 166 value = dbm.get(key, NO_VALUE)
167 167 else:
168 168 # gdbm objects lack a .get method
169 169 try:
170 170 value = dbm[key]
171 171 except KeyError:
172 172 value = NO_VALUE
173 173 if value is not NO_VALUE:
174 174 value = self._loads(value)
175 175 return value
176 176
177 177 def get(self, key):
178 178 try:
179 179 return self._dbm_get(key)
180 180 except Exception:
181 181 log.error('Failed to fetch DBM key %s from DB: %s', key, self.get_store())
182 182 raise
183 183
184 184 def set(self, key, value):
185 185 with self._dbm_file(True) as dbm:
186 186 dbm[key] = self._dumps(value)
187 187
188 188 def set_multi(self, mapping):
189 189 with self._dbm_file(True) as dbm:
190 190 for key, value in mapping.items():
191 191 dbm[key] = self._dumps(value)
192 192
193 193
194 194 class BaseRedisBackend(redis_backend.RedisBackend):
195 195 key_prefix = ''
196 196
197 197 def __init__(self, arguments):
198 198 super(BaseRedisBackend, self).__init__(arguments)
199 199 self._lock_timeout = self.lock_timeout
200 200 self._lock_auto_renewal = asbool(arguments.pop("lock_auto_renewal", True))
201 201
202 202 if self._lock_auto_renewal and not self._lock_timeout:
203 203 # set default timeout for auto_renewal
204 204 self._lock_timeout = 30
205 205
206 206 def _create_client(self):
207 207 args = {}
208 208
209 209 if self.url is not None:
210 210 args.update(url=self.url)
211 211
212 212 else:
213 213 args.update(
214 214 host=self.host, password=self.password,
215 215 port=self.port, db=self.db
216 216 )
217 217
218 218 connection_pool = redis.ConnectionPool(**args)
219 219
220 220 return redis.StrictRedis(connection_pool=connection_pool)
221 221
222 222 def list_keys(self, prefix=''):
223 223 prefix = '{}:{}*'.format(self.key_prefix, prefix)
224 224 return self.client.keys(prefix)
225 225
226 226 def get_store(self):
227 227 return self.client.connection_pool
228 228
229 229 def get(self, key):
230 230 value = self.client.get(key)
231 231 if value is None:
232 232 return NO_VALUE
233 233 return self._loads(value)
234 234
235 235 def get_multi(self, keys):
236 236 if not keys:
237 237 return []
238 238 values = self.client.mget(keys)
239 239 loads = self._loads
240 240 return [
241 241 loads(v) if v is not None else NO_VALUE
242 242 for v in values]
243 243
244 244 def set(self, key, value):
245 245 if self.redis_expiration_time:
246 246 self.client.setex(key, self.redis_expiration_time,
247 247 self._dumps(value))
248 248 else:
249 249 self.client.set(key, self._dumps(value))
250 250
251 251 def set_multi(self, mapping):
252 252 dumps = self._dumps
253 253 mapping = dict(
254 254 (k, dumps(v))
255 255 for k, v in mapping.items()
256 256 )
257 257
258 258 if not self.redis_expiration_time:
259 259 self.client.mset(mapping)
260 260 else:
261 261 pipe = self.client.pipeline()
262 262 for key, value in mapping.items():
263 263 pipe.setex(key, self.redis_expiration_time, value)
264 264 pipe.execute()
265 265
266 266 def get_mutex(self, key):
267 267 if self.distributed_lock:
268 lock_key = redis_backend.u('_lock_{0}').format(safe_str(key))
268 lock_key = redis_backend.u(u'_lock_{0}'.format(safe_unicode(key)))
269 269 return get_mutex_lock(self.client, lock_key, self._lock_timeout,
270 270 auto_renewal=self._lock_auto_renewal)
271 271 else:
272 272 return None
273 273
274 274
275 275 class RedisPickleBackend(PickleSerializer, BaseRedisBackend):
276 276 key_prefix = 'redis_pickle_backend'
277 277 pass
278 278
279 279
280 280 class RedisMsgPackBackend(MsgPackSerializer, BaseRedisBackend):
281 281 key_prefix = 'redis_msgpack_backend'
282 282 pass
283 283
284 284
285 285 def get_mutex_lock(client, lock_key, lock_timeout, auto_renewal=False):
286 286 import redis_lock
287 287
288 288 class _RedisLockWrapper(object):
289 289 """LockWrapper for redis_lock"""
290 290
291 291 @classmethod
292 292 def get_lock(cls):
293 293 return redis_lock.Lock(
294 294 redis_client=client,
295 295 name=lock_key,
296 296 expire=lock_timeout,
297 297 auto_renewal=auto_renewal,
298 298 strict=True,
299 299 )
300 300
301 301 def __repr__(self):
302 302 return "{}:{}".format(self.__class__.__name__, lock_key)
303 303
304 304 def __str__(self):
305 305 return "{}:{}".format(self.__class__.__name__, lock_key)
306 306
307 307 def __init__(self):
308 308 self.lock = self.get_lock()
309 309 self.lock_key = lock_key
310 310
311 311 def acquire(self, wait=True):
312 312 log.debug('Trying to acquire Redis lock for key %s', self.lock_key)
313 313 try:
314 314 acquired = self.lock.acquire(wait)
315 315 log.debug('Got lock for key %s, %s', self.lock_key, acquired)
316 316 return acquired
317 317 except redis_lock.AlreadyAcquired:
318 318 return False
319 319 except redis_lock.AlreadyStarted:
320 320 # refresh thread exists, but it also means we acquired the lock
321 321 return True
322 322
323 323 def release(self):
324 324 try:
325 325 self.lock.release()
326 326 except redis_lock.NotAcquired:
327 327 pass
328 328
329 329 return _RedisLockWrapper()
General Comments 0
You need to be logged in to leave comments. Login now