Show More
@@ -1,311 +1,313 b'' | |||
|
1 | 1 | # RhodeCode VCSServer provides access to different vcs backends via network. |
|
2 | 2 | # Copyright (C) 2014-2020 RhodeCode GmbH |
|
3 | 3 | # |
|
4 | 4 | # This program is free software; you can redistribute it and/or modify |
|
5 | 5 | # it under the terms of the GNU General Public License as published by |
|
6 | 6 | # the Free Software Foundation; either version 3 of the License, or |
|
7 | 7 | # (at your option) any later version. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU General Public License |
|
15 | 15 | # along with this program; if not, write to the Free Software Foundation, |
|
16 | 16 | # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
|
17 | 17 | |
|
18 | 18 | import time |
|
19 | 19 | import errno |
|
20 | 20 | import logging |
|
21 | 21 | |
|
22 | 22 | import msgpack |
|
23 | 23 | import redis |
|
24 | 24 | |
|
25 | 25 | from dogpile.cache.api import CachedValue |
|
26 | 26 | from dogpile.cache.backends import memory as memory_backend |
|
27 | 27 | from dogpile.cache.backends import file as file_backend |
|
28 | 28 | from dogpile.cache.backends import redis as redis_backend |
|
29 | 29 | from dogpile.cache.backends.file import NO_VALUE, compat, FileLock |
|
30 | 30 | from dogpile.cache.util import memoized_property |
|
31 | 31 | |
|
32 | from pyramid.settings import asbool | |
|
33 | ||
|
32 | 34 | from vcsserver.lib.memory_lru_dict import LRUDict, LRUDictDebug |
|
33 | 35 | |
|
34 | 36 | |
|
35 | 37 | _default_max_size = 1024 |
|
36 | 38 | |
|
37 | 39 | log = logging.getLogger(__name__) |
|
38 | 40 | |
|
39 | 41 | |
|
40 | 42 | class LRUMemoryBackend(memory_backend.MemoryBackend): |
|
41 | 43 | key_prefix = 'lru_mem_backend' |
|
42 | 44 | pickle_values = False |
|
43 | 45 | |
|
44 | 46 | def __init__(self, arguments): |
|
45 | 47 | max_size = arguments.pop('max_size', _default_max_size) |
|
46 | 48 | |
|
47 | 49 | LRUDictClass = LRUDict |
|
48 | 50 | if arguments.pop('log_key_count', None): |
|
49 | 51 | LRUDictClass = LRUDictDebug |
|
50 | 52 | |
|
51 | 53 | arguments['cache_dict'] = LRUDictClass(max_size) |
|
52 | 54 | super(LRUMemoryBackend, self).__init__(arguments) |
|
53 | 55 | |
|
54 | 56 | def delete(self, key): |
|
55 | 57 | try: |
|
56 | 58 | del self._cache[key] |
|
57 | 59 | except KeyError: |
|
58 | 60 | # we don't care if key isn't there at deletion |
|
59 | 61 | pass |
|
60 | 62 | |
|
61 | 63 | def delete_multi(self, keys): |
|
62 | 64 | for key in keys: |
|
63 | 65 | self.delete(key) |
|
64 | 66 | |
|
65 | 67 | |
|
66 | 68 | class PickleSerializer(object): |
|
67 | 69 | |
|
68 | 70 | def _dumps(self, value, safe=False): |
|
69 | 71 | try: |
|
70 | 72 | return compat.pickle.dumps(value) |
|
71 | 73 | except Exception: |
|
72 | 74 | if safe: |
|
73 | 75 | return NO_VALUE |
|
74 | 76 | else: |
|
75 | 77 | raise |
|
76 | 78 | |
|
77 | 79 | def _loads(self, value, safe=True): |
|
78 | 80 | try: |
|
79 | 81 | return compat.pickle.loads(value) |
|
80 | 82 | except Exception: |
|
81 | 83 | if safe: |
|
82 | 84 | return NO_VALUE |
|
83 | 85 | else: |
|
84 | 86 | raise |
|
85 | 87 | |
|
86 | 88 | |
|
87 | 89 | class MsgPackSerializer(object): |
|
88 | 90 | |
|
89 | 91 | def _dumps(self, value, safe=False): |
|
90 | 92 | try: |
|
91 | 93 | return msgpack.packb(value) |
|
92 | 94 | except Exception: |
|
93 | 95 | if safe: |
|
94 | 96 | return NO_VALUE |
|
95 | 97 | else: |
|
96 | 98 | raise |
|
97 | 99 | |
|
98 | 100 | def _loads(self, value, safe=True): |
|
99 | 101 | """ |
|
100 | 102 | pickle maintained the `CachedValue` wrapper of the tuple |
|
101 | 103 | msgpack does not, so it must be added back in. |
|
102 | 104 | """ |
|
103 | 105 | try: |
|
104 | 106 | value = msgpack.unpackb(value, use_list=False) |
|
105 | 107 | return CachedValue(*value) |
|
106 | 108 | except Exception: |
|
107 | 109 | if safe: |
|
108 | 110 | return NO_VALUE |
|
109 | 111 | else: |
|
110 | 112 | raise |
|
111 | 113 | |
|
112 | 114 | |
|
113 | 115 | import fcntl |
|
114 | 116 | flock_org = fcntl.flock |
|
115 | 117 | |
|
116 | 118 | |
|
117 | 119 | class CustomLockFactory(FileLock): |
|
118 | 120 | |
|
119 | 121 | pass |
|
120 | 122 | |
|
121 | 123 | |
|
122 | 124 | class FileNamespaceBackend(PickleSerializer, file_backend.DBMBackend): |
|
123 | 125 | key_prefix = 'file_backend' |
|
124 | 126 | |
|
125 | 127 | def __init__(self, arguments): |
|
126 | 128 | arguments['lock_factory'] = CustomLockFactory |
|
127 | 129 | db_file = arguments.get('filename') |
|
128 | 130 | |
|
129 | 131 | log.debug('initialing %s DB in %s', self.__class__.__name__, db_file) |
|
130 | 132 | try: |
|
131 | 133 | super(FileNamespaceBackend, self).__init__(arguments) |
|
132 | 134 | except Exception: |
|
133 | 135 | log.error('Failed to initialize db at: %s', db_file) |
|
134 | 136 | raise |
|
135 | 137 | |
|
136 | 138 | def __repr__(self): |
|
137 | 139 | return '{} `{}`'.format(self.__class__, self.filename) |
|
138 | 140 | |
|
139 | 141 | def list_keys(self, prefix=''): |
|
140 | 142 | prefix = '{}:{}'.format(self.key_prefix, prefix) |
|
141 | 143 | |
|
142 | 144 | def cond(v): |
|
143 | 145 | if not prefix: |
|
144 | 146 | return True |
|
145 | 147 | |
|
146 | 148 | if v.startswith(prefix): |
|
147 | 149 | return True |
|
148 | 150 | return False |
|
149 | 151 | |
|
150 | 152 | with self._dbm_file(True) as dbm: |
|
151 | 153 | try: |
|
152 | 154 | return filter(cond, dbm.keys()) |
|
153 | 155 | except Exception: |
|
154 | 156 | log.error('Failed to fetch DBM keys from DB: %s', self.get_store()) |
|
155 | 157 | raise |
|
156 | 158 | |
|
157 | 159 | def get_store(self): |
|
158 | 160 | return self.filename |
|
159 | 161 | |
|
160 | 162 | def _dbm_get(self, key): |
|
161 | 163 | with self._dbm_file(False) as dbm: |
|
162 | 164 | if hasattr(dbm, 'get'): |
|
163 | 165 | value = dbm.get(key, NO_VALUE) |
|
164 | 166 | else: |
|
165 | 167 | # gdbm objects lack a .get method |
|
166 | 168 | try: |
|
167 | 169 | value = dbm[key] |
|
168 | 170 | except KeyError: |
|
169 | 171 | value = NO_VALUE |
|
170 | 172 | if value is not NO_VALUE: |
|
171 | 173 | value = self._loads(value) |
|
172 | 174 | return value |
|
173 | 175 | |
|
174 | 176 | def get(self, key): |
|
175 | 177 | try: |
|
176 | 178 | return self._dbm_get(key) |
|
177 | 179 | except Exception: |
|
178 | 180 | log.error('Failed to fetch DBM key %s from DB: %s', key, self.get_store()) |
|
179 | 181 | raise |
|
180 | 182 | |
|
181 | 183 | def set(self, key, value): |
|
182 | 184 | with self._dbm_file(True) as dbm: |
|
183 | 185 | dbm[key] = self._dumps(value) |
|
184 | 186 | |
|
185 | 187 | def set_multi(self, mapping): |
|
186 | 188 | with self._dbm_file(True) as dbm: |
|
187 | 189 | for key, value in mapping.items(): |
|
188 | 190 | dbm[key] = self._dumps(value) |
|
189 | 191 | |
|
190 | 192 | |
|
191 | 193 | class BaseRedisBackend(redis_backend.RedisBackend): |
|
192 | 194 | key_prefix = '' |
|
193 | 195 | |
|
194 | 196 | def __init__(self, arguments): |
|
195 | 197 | super(BaseRedisBackend, self).__init__(arguments) |
|
196 | 198 | self._lock_timeout = self.lock_timeout |
|
197 |
self._lock_auto_renewal = arguments.pop("lock_auto_renewal", |
|
|
199 | self._lock_auto_renewal = asbool(arguments.pop("lock_auto_renewal", True)) | |
|
198 | 200 | |
|
199 | 201 | if self._lock_auto_renewal and not self._lock_timeout: |
|
200 | 202 | # set default timeout for auto_renewal |
|
201 |
self._lock_timeout = |
|
|
203 | self._lock_timeout = 30 | |
|
202 | 204 | |
|
203 | 205 | def _create_client(self): |
|
204 | 206 | args = {} |
|
205 | 207 | |
|
206 | 208 | if self.url is not None: |
|
207 | 209 | args.update(url=self.url) |
|
208 | 210 | |
|
209 | 211 | else: |
|
210 | 212 | args.update( |
|
211 | 213 | host=self.host, password=self.password, |
|
212 | 214 | port=self.port, db=self.db |
|
213 | 215 | ) |
|
214 | 216 | |
|
215 | 217 | connection_pool = redis.ConnectionPool(**args) |
|
216 | 218 | |
|
217 | 219 | return redis.StrictRedis(connection_pool=connection_pool) |
|
218 | 220 | |
|
219 | 221 | def list_keys(self, prefix=''): |
|
220 | 222 | prefix = '{}:{}*'.format(self.key_prefix, prefix) |
|
221 | 223 | return self.client.keys(prefix) |
|
222 | 224 | |
|
223 | 225 | def get_store(self): |
|
224 | 226 | return self.client.connection_pool |
|
225 | 227 | |
|
226 | 228 | def get(self, key): |
|
227 | 229 | value = self.client.get(key) |
|
228 | 230 | if value is None: |
|
229 | 231 | return NO_VALUE |
|
230 | 232 | return self._loads(value) |
|
231 | 233 | |
|
232 | 234 | def get_multi(self, keys): |
|
233 | 235 | if not keys: |
|
234 | 236 | return [] |
|
235 | 237 | values = self.client.mget(keys) |
|
236 | 238 | loads = self._loads |
|
237 | 239 | return [ |
|
238 | 240 | loads(v) if v is not None else NO_VALUE |
|
239 | 241 | for v in values] |
|
240 | 242 | |
|
241 | 243 | def set(self, key, value): |
|
242 | 244 | if self.redis_expiration_time: |
|
243 | 245 | self.client.setex(key, self.redis_expiration_time, |
|
244 | 246 | self._dumps(value)) |
|
245 | 247 | else: |
|
246 | 248 | self.client.set(key, self._dumps(value)) |
|
247 | 249 | |
|
248 | 250 | def set_multi(self, mapping): |
|
249 | 251 | dumps = self._dumps |
|
250 | 252 | mapping = dict( |
|
251 | 253 | (k, dumps(v)) |
|
252 | 254 | for k, v in mapping.items() |
|
253 | 255 | ) |
|
254 | 256 | |
|
255 | 257 | if not self.redis_expiration_time: |
|
256 | 258 | self.client.mset(mapping) |
|
257 | 259 | else: |
|
258 | 260 | pipe = self.client.pipeline() |
|
259 | 261 | for key, value in mapping.items(): |
|
260 | 262 | pipe.setex(key, self.redis_expiration_time, value) |
|
261 | 263 | pipe.execute() |
|
262 | 264 | |
|
263 | 265 | def get_mutex(self, key): |
|
264 | 266 | if self.distributed_lock: |
|
265 | 267 | lock_key = redis_backend.u('_lock_{0}').format(key) |
|
266 | 268 | log.debug('Trying to acquire Redis lock for key %s', lock_key) |
|
267 | 269 | return get_mutex_lock(self.client, lock_key, self._lock_timeout, |
|
268 | 270 | auto_renewal=self._lock_auto_renewal) |
|
269 | 271 | else: |
|
270 | 272 | return None |
|
271 | 273 | |
|
272 | 274 | |
|
273 | 275 | class RedisPickleBackend(PickleSerializer, BaseRedisBackend): |
|
274 | 276 | key_prefix = 'redis_pickle_backend' |
|
275 | 277 | pass |
|
276 | 278 | |
|
277 | 279 | |
|
278 | 280 | class RedisMsgPackBackend(MsgPackSerializer, BaseRedisBackend): |
|
279 | 281 | key_prefix = 'redis_msgpack_backend' |
|
280 | 282 | pass |
|
281 | 283 | |
|
282 | 284 | |
|
283 | 285 | def get_mutex_lock(client, lock_key, lock_timeout, auto_renewal=False): |
|
284 | 286 | import redis_lock |
|
285 | 287 | |
|
286 | 288 | class _RedisLockWrapper(object): |
|
287 | 289 | """LockWrapper for redis_lock""" |
|
288 | 290 | |
|
289 | 291 | def __init__(self): |
|
290 | 292 | pass |
|
291 | 293 | |
|
292 | 294 | @property |
|
293 | 295 | def lock(self): |
|
294 | 296 | return redis_lock.Lock( |
|
295 | 297 | redis_client=client, |
|
296 | 298 | name=lock_key, |
|
297 | 299 | expire=lock_timeout, |
|
298 | 300 | auto_renewal=auto_renewal, |
|
299 | 301 | strict=True, |
|
300 | 302 | ) |
|
301 | 303 | |
|
302 | 304 | def acquire(self, wait=True): |
|
303 | 305 | return self.lock.acquire(wait) |
|
304 | 306 | |
|
305 | 307 | def release(self): |
|
306 | 308 | try: |
|
307 | 309 | self.lock.release() |
|
308 | 310 | except redis_lock.NotAcquired: |
|
309 | 311 | pass |
|
310 | 312 | |
|
311 | 313 | return _RedisLockWrapper() |
General Comments 0
You need to be logged in to leave comments.
Login now