##// END OF EJS Templates
caches: added debug and timings
super-admin -
r4733:3c68f6b7 stable
parent child Browse files
Show More
@@ -1,354 +1,363 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2015-2020 RhodeCode GmbH
3 # Copyright (C) 2015-2020 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21 import time
21 import time
22 import errno
22 import errno
23 import logging
23 import logging
24
24
25 import msgpack
25 import msgpack
26 import gevent
26 import gevent
27 import redis
27 import redis
28
28
29 from dogpile.cache.api import CachedValue
29 from dogpile.cache.api import CachedValue
30 from dogpile.cache.backends import memory as memory_backend
30 from dogpile.cache.backends import memory as memory_backend
31 from dogpile.cache.backends import file as file_backend
31 from dogpile.cache.backends import file as file_backend
32 from dogpile.cache.backends import redis as redis_backend
32 from dogpile.cache.backends import redis as redis_backend
33 from dogpile.cache.backends.file import NO_VALUE, compat, FileLock
33 from dogpile.cache.backends.file import NO_VALUE, compat, FileLock
34 from dogpile.cache.util import memoized_property
34 from dogpile.cache.util import memoized_property
35
35
36 from pyramid.settings import asbool
36 from pyramid.settings import asbool
37
37
38 from rhodecode.lib.memory_lru_dict import LRUDict, LRUDictDebug
38 from rhodecode.lib.memory_lru_dict import LRUDict, LRUDictDebug
39
39
40
40
41 _default_max_size = 1024
41 _default_max_size = 1024
42
42
43 log = logging.getLogger(__name__)
43 log = logging.getLogger(__name__)
44
44
45
45
46 class LRUMemoryBackend(memory_backend.MemoryBackend):
46 class LRUMemoryBackend(memory_backend.MemoryBackend):
47 key_prefix = 'lru_mem_backend'
47 key_prefix = 'lru_mem_backend'
48 pickle_values = False
48 pickle_values = False
49
49
50 def __init__(self, arguments):
50 def __init__(self, arguments):
51 max_size = arguments.pop('max_size', _default_max_size)
51 max_size = arguments.pop('max_size', _default_max_size)
52
52
53 LRUDictClass = LRUDict
53 LRUDictClass = LRUDict
54 if arguments.pop('log_key_count', None):
54 if arguments.pop('log_key_count', None):
55 LRUDictClass = LRUDictDebug
55 LRUDictClass = LRUDictDebug
56
56
57 arguments['cache_dict'] = LRUDictClass(max_size)
57 arguments['cache_dict'] = LRUDictClass(max_size)
58 super(LRUMemoryBackend, self).__init__(arguments)
58 super(LRUMemoryBackend, self).__init__(arguments)
59
59
60 def delete(self, key):
60 def delete(self, key):
61 try:
61 try:
62 del self._cache[key]
62 del self._cache[key]
63 except KeyError:
63 except KeyError:
64 # we don't care if key isn't there at deletion
64 # we don't care if key isn't there at deletion
65 pass
65 pass
66
66
67 def delete_multi(self, keys):
67 def delete_multi(self, keys):
68 for key in keys:
68 for key in keys:
69 self.delete(key)
69 self.delete(key)
70
70
71
71
72 class PickleSerializer(object):
72 class PickleSerializer(object):
73
73
74 def _dumps(self, value, safe=False):
74 def _dumps(self, value, safe=False):
75 try:
75 try:
76 return compat.pickle.dumps(value)
76 return compat.pickle.dumps(value)
77 except Exception:
77 except Exception:
78 if safe:
78 if safe:
79 return NO_VALUE
79 return NO_VALUE
80 else:
80 else:
81 raise
81 raise
82
82
83 def _loads(self, value, safe=True):
83 def _loads(self, value, safe=True):
84 try:
84 try:
85 return compat.pickle.loads(value)
85 return compat.pickle.loads(value)
86 except Exception:
86 except Exception:
87 if safe:
87 if safe:
88 return NO_VALUE
88 return NO_VALUE
89 else:
89 else:
90 raise
90 raise
91
91
92
92
93 class MsgPackSerializer(object):
93 class MsgPackSerializer(object):
94
94
95 def _dumps(self, value, safe=False):
95 def _dumps(self, value, safe=False):
96 try:
96 try:
97 return msgpack.packb(value)
97 return msgpack.packb(value)
98 except Exception:
98 except Exception:
99 if safe:
99 if safe:
100 return NO_VALUE
100 return NO_VALUE
101 else:
101 else:
102 raise
102 raise
103
103
104 def _loads(self, value, safe=True):
104 def _loads(self, value, safe=True):
105 """
105 """
106 pickle maintained the `CachedValue` wrapper of the tuple
106 pickle maintained the `CachedValue` wrapper of the tuple
107 msgpack does not, so it must be added back in.
107 msgpack does not, so it must be added back in.
108 """
108 """
109 try:
109 try:
110 value = msgpack.unpackb(value, use_list=False)
110 value = msgpack.unpackb(value, use_list=False)
111 return CachedValue(*value)
111 return CachedValue(*value)
112 except Exception:
112 except Exception:
113 if safe:
113 if safe:
114 return NO_VALUE
114 return NO_VALUE
115 else:
115 else:
116 raise
116 raise
117
117
118
118
119 import fcntl
119 import fcntl
120 flock_org = fcntl.flock
120 flock_org = fcntl.flock
121
121
122
122
123 class CustomLockFactory(FileLock):
123 class CustomLockFactory(FileLock):
124
124
125 @memoized_property
125 @memoized_property
126 def _module(self):
126 def _module(self):
127
127
128 def gevent_flock(fd, operation):
128 def gevent_flock(fd, operation):
129 """
129 """
130 Gevent compatible flock
130 Gevent compatible flock
131 """
131 """
132 # set non-blocking, this will cause an exception if we cannot acquire a lock
132 # set non-blocking, this will cause an exception if we cannot acquire a lock
133 operation |= fcntl.LOCK_NB
133 operation |= fcntl.LOCK_NB
134 start_lock_time = time.time()
134 start_lock_time = time.time()
135 timeout = 60 * 15 # 15min
135 timeout = 60 * 15 # 15min
136 while True:
136 while True:
137 try:
137 try:
138 flock_org(fd, operation)
138 flock_org(fd, operation)
139 # lock has been acquired
139 # lock has been acquired
140 break
140 break
141 except (OSError, IOError) as e:
141 except (OSError, IOError) as e:
142 # raise on other errors than Resource temporarily unavailable
142 # raise on other errors than Resource temporarily unavailable
143 if e.errno != errno.EAGAIN:
143 if e.errno != errno.EAGAIN:
144 raise
144 raise
145 elif (time.time() - start_lock_time) > timeout:
145 elif (time.time() - start_lock_time) > timeout:
146 # waited to much time on a lock, better fail than loop for ever
146 # waited to much time on a lock, better fail than loop for ever
147 log.error('Failed to acquire lock on `%s` after waiting %ss',
147 log.error('Failed to acquire lock on `%s` after waiting %ss',
148 self.filename, timeout)
148 self.filename, timeout)
149 raise
149 raise
150 wait_timeout = 0.03
150 wait_timeout = 0.03
151 log.debug('Failed to acquire lock on `%s`, retry in %ss',
151 log.debug('Failed to acquire lock on `%s`, retry in %ss',
152 self.filename, wait_timeout)
152 self.filename, wait_timeout)
153 gevent.sleep(wait_timeout)
153 gevent.sleep(wait_timeout)
154
154
155 fcntl.flock = gevent_flock
155 fcntl.flock = gevent_flock
156 return fcntl
156 return fcntl
157
157
158
158
159 class FileNamespaceBackend(PickleSerializer, file_backend.DBMBackend):
159 class FileNamespaceBackend(PickleSerializer, file_backend.DBMBackend):
160 key_prefix = 'file_backend'
160 key_prefix = 'file_backend'
161
161
162 def __init__(self, arguments):
162 def __init__(self, arguments):
163 arguments['lock_factory'] = CustomLockFactory
163 arguments['lock_factory'] = CustomLockFactory
164 db_file = arguments.get('filename')
164 db_file = arguments.get('filename')
165
165
166 log.debug('initialing %s DB in %s', self.__class__.__name__, db_file)
166 log.debug('initialing %s DB in %s', self.__class__.__name__, db_file)
167 try:
167 try:
168 super(FileNamespaceBackend, self).__init__(arguments)
168 super(FileNamespaceBackend, self).__init__(arguments)
169 except Exception:
169 except Exception:
170 log.error('Failed to initialize db at: %s', db_file)
170 log.error('Failed to initialize db at: %s', db_file)
171 raise
171 raise
172
172
173 def __repr__(self):
173 def __repr__(self):
174 return '{} `{}`'.format(self.__class__, self.filename)
174 return '{} `{}`'.format(self.__class__, self.filename)
175
175
176 def list_keys(self, prefix=''):
176 def list_keys(self, prefix=''):
177 prefix = '{}:{}'.format(self.key_prefix, prefix)
177 prefix = '{}:{}'.format(self.key_prefix, prefix)
178
178
179 def cond(v):
179 def cond(v):
180 if not prefix:
180 if not prefix:
181 return True
181 return True
182
182
183 if v.startswith(prefix):
183 if v.startswith(prefix):
184 return True
184 return True
185 return False
185 return False
186
186
187 with self._dbm_file(True) as dbm:
187 with self._dbm_file(True) as dbm:
188 try:
188 try:
189 return filter(cond, dbm.keys())
189 return filter(cond, dbm.keys())
190 except Exception:
190 except Exception:
191 log.error('Failed to fetch DBM keys from DB: %s', self.get_store())
191 log.error('Failed to fetch DBM keys from DB: %s', self.get_store())
192 raise
192 raise
193
193
194 def get_store(self):
194 def get_store(self):
195 return self.filename
195 return self.filename
196
196
197 def _dbm_get(self, key):
197 def _dbm_get(self, key):
198 with self._dbm_file(False) as dbm:
198 with self._dbm_file(False) as dbm:
199 if hasattr(dbm, 'get'):
199 if hasattr(dbm, 'get'):
200 value = dbm.get(key, NO_VALUE)
200 value = dbm.get(key, NO_VALUE)
201 else:
201 else:
202 # gdbm objects lack a .get method
202 # gdbm objects lack a .get method
203 try:
203 try:
204 value = dbm[key]
204 value = dbm[key]
205 except KeyError:
205 except KeyError:
206 value = NO_VALUE
206 value = NO_VALUE
207 if value is not NO_VALUE:
207 if value is not NO_VALUE:
208 value = self._loads(value)
208 value = self._loads(value)
209 return value
209 return value
210
210
211 def get(self, key):
211 def get(self, key):
212 try:
212 try:
213 return self._dbm_get(key)
213 return self._dbm_get(key)
214 except Exception:
214 except Exception:
215 log.error('Failed to fetch DBM key %s from DB: %s', key, self.get_store())
215 log.error('Failed to fetch DBM key %s from DB: %s', key, self.get_store())
216 raise
216 raise
217
217
218 def set(self, key, value):
218 def set(self, key, value):
219 with self._dbm_file(True) as dbm:
219 with self._dbm_file(True) as dbm:
220 dbm[key] = self._dumps(value)
220 dbm[key] = self._dumps(value)
221
221
222 def set_multi(self, mapping):
222 def set_multi(self, mapping):
223 with self._dbm_file(True) as dbm:
223 with self._dbm_file(True) as dbm:
224 for key, value in mapping.items():
224 for key, value in mapping.items():
225 dbm[key] = self._dumps(value)
225 dbm[key] = self._dumps(value)
226
226
227
227
228 class BaseRedisBackend(redis_backend.RedisBackend):
228 class BaseRedisBackend(redis_backend.RedisBackend):
229 key_prefix = ''
229 key_prefix = ''
230
230
231 def __init__(self, arguments):
231 def __init__(self, arguments):
232 super(BaseRedisBackend, self).__init__(arguments)
232 super(BaseRedisBackend, self).__init__(arguments)
233 self._lock_timeout = self.lock_timeout
233 self._lock_timeout = self.lock_timeout
234 self._lock_auto_renewal = asbool(arguments.pop("lock_auto_renewal", True))
234 self._lock_auto_renewal = asbool(arguments.pop("lock_auto_renewal", True))
235
235
236 if self._lock_auto_renewal and not self._lock_timeout:
236 if self._lock_auto_renewal and not self._lock_timeout:
237 # set default timeout for auto_renewal
237 # set default timeout for auto_renewal
238 self._lock_timeout = 30
238 self._lock_timeout = 30
239
239
240 def _create_client(self):
240 def _create_client(self):
241 args = {}
241 args = {}
242
242
243 if self.url is not None:
243 if self.url is not None:
244 args.update(url=self.url)
244 args.update(url=self.url)
245
245
246 else:
246 else:
247 args.update(
247 args.update(
248 host=self.host, password=self.password,
248 host=self.host, password=self.password,
249 port=self.port, db=self.db
249 port=self.port, db=self.db
250 )
250 )
251
251
252 connection_pool = redis.ConnectionPool(**args)
252 connection_pool = redis.ConnectionPool(**args)
253
253
254 return redis.StrictRedis(connection_pool=connection_pool)
254 return redis.StrictRedis(connection_pool=connection_pool)
255
255
256 def list_keys(self, prefix=''):
256 def list_keys(self, prefix=''):
257 prefix = '{}:{}*'.format(self.key_prefix, prefix)
257 prefix = '{}:{}*'.format(self.key_prefix, prefix)
258 return self.client.keys(prefix)
258 return self.client.keys(prefix)
259
259
260 def get_store(self):
260 def get_store(self):
261 return self.client.connection_pool
261 return self.client.connection_pool
262
262
263 def get(self, key):
263 def get(self, key):
264 value = self.client.get(key)
264 value = self.client.get(key)
265 if value is None:
265 if value is None:
266 return NO_VALUE
266 return NO_VALUE
267 return self._loads(value)
267 return self._loads(value)
268
268
269 def get_multi(self, keys):
269 def get_multi(self, keys):
270 if not keys:
270 if not keys:
271 return []
271 return []
272 values = self.client.mget(keys)
272 values = self.client.mget(keys)
273 loads = self._loads
273 loads = self._loads
274 return [
274 return [
275 loads(v) if v is not None else NO_VALUE
275 loads(v) if v is not None else NO_VALUE
276 for v in values]
276 for v in values]
277
277
278 def set(self, key, value):
278 def set(self, key, value):
279 if self.redis_expiration_time:
279 if self.redis_expiration_time:
280 self.client.setex(key, self.redis_expiration_time,
280 self.client.setex(key, self.redis_expiration_time,
281 self._dumps(value))
281 self._dumps(value))
282 else:
282 else:
283 self.client.set(key, self._dumps(value))
283 self.client.set(key, self._dumps(value))
284
284
285 def set_multi(self, mapping):
285 def set_multi(self, mapping):
286 dumps = self._dumps
286 dumps = self._dumps
287 mapping = dict(
287 mapping = dict(
288 (k, dumps(v))
288 (k, dumps(v))
289 for k, v in mapping.items()
289 for k, v in mapping.items()
290 )
290 )
291
291
292 if not self.redis_expiration_time:
292 if not self.redis_expiration_time:
293 self.client.mset(mapping)
293 self.client.mset(mapping)
294 else:
294 else:
295 pipe = self.client.pipeline()
295 pipe = self.client.pipeline()
296 for key, value in mapping.items():
296 for key, value in mapping.items():
297 pipe.setex(key, self.redis_expiration_time, value)
297 pipe.setex(key, self.redis_expiration_time, value)
298 pipe.execute()
298 pipe.execute()
299
299
300 def get_mutex(self, key):
300 def get_mutex(self, key):
301 if self.distributed_lock:
301 if self.distributed_lock:
302 lock_key = redis_backend.u('_lock_{0}').format(key)
302 lock_key = redis_backend.u('_lock_{0}').format(key)
303 log.debug('Trying to acquire Redis lock for key %s', lock_key)
304 return get_mutex_lock(self.client, lock_key, self._lock_timeout,
303 return get_mutex_lock(self.client, lock_key, self._lock_timeout,
305 auto_renewal=self._lock_auto_renewal)
304 auto_renewal=self._lock_auto_renewal)
306 else:
305 else:
307 return None
306 return None
308
307
309
308
310 class RedisPickleBackend(PickleSerializer, BaseRedisBackend):
309 class RedisPickleBackend(PickleSerializer, BaseRedisBackend):
311 key_prefix = 'redis_pickle_backend'
310 key_prefix = 'redis_pickle_backend'
312 pass
311 pass
313
312
314
313
315 class RedisMsgPackBackend(MsgPackSerializer, BaseRedisBackend):
314 class RedisMsgPackBackend(MsgPackSerializer, BaseRedisBackend):
316 key_prefix = 'redis_msgpack_backend'
315 key_prefix = 'redis_msgpack_backend'
317 pass
316 pass
318
317
319
318
320 def get_mutex_lock(client, lock_key, lock_timeout, auto_renewal=False):
319 def get_mutex_lock(client, lock_key, lock_timeout, auto_renewal=False):
321 import redis_lock
320 import redis_lock
322
321
323 class _RedisLockWrapper(object):
322 class _RedisLockWrapper(object):
324 """LockWrapper for redis_lock"""
323 """LockWrapper for redis_lock"""
325
324
326 @classmethod
325 @classmethod
327 def get_lock(cls):
326 def get_lock(cls):
328 return redis_lock.Lock(
327 return redis_lock.Lock(
329 redis_client=client,
328 redis_client=client,
330 name=lock_key,
329 name=lock_key,
331 expire=lock_timeout,
330 expire=lock_timeout,
332 auto_renewal=auto_renewal,
331 auto_renewal=auto_renewal,
333 strict=True,
332 strict=True,
334 )
333 )
335
334
335 def __repr__(self):
336 return "{}:{}".format(self.__class__.__name__, lock_key)
337
338 def __str__(self):
339 return "{}:{}".format(self.__class__.__name__, lock_key)
340
336 def __init__(self):
341 def __init__(self):
337 self.lock = self.get_lock()
342 self.lock = self.get_lock()
343 self.lock_key = lock_key
338
344
339 def acquire(self, wait=True):
345 def acquire(self, wait=True):
346 log.debug('Trying to acquire Redis lock for key %s', self.lock_key)
340 try:
347 try:
341 return self.lock.acquire(wait)
348 acquired = self.lock.acquire(wait)
349 log.debug('Got lock for key %s, %s', self.lock_key, acquired)
350 return acquired
342 except redis_lock.AlreadyAcquired:
351 except redis_lock.AlreadyAcquired:
343 return False
352 return False
344 except redis_lock.AlreadyStarted:
353 except redis_lock.AlreadyStarted:
345 # refresh thread exists, but it also means we acquired the lock
354 # refresh thread exists, but it also means we acquired the lock
346 return True
355 return True
347
356
348 def release(self):
357 def release(self):
349 try:
358 try:
350 self.lock.release()
359 self.lock.release()
351 except redis_lock.NotAcquired:
360 except redis_lock.NotAcquired:
352 pass
361 pass
353
362
354 return _RedisLockWrapper()
363 return _RedisLockWrapper()
@@ -1,418 +1,422 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2015-2020 RhodeCode GmbH
3 # Copyright (C) 2015-2020 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 import os
20 import os
21 import time
21 import time
22 import logging
22 import logging
23 import functools
23 import functools
24 import threading
24 import threading
25
25
26 from dogpile.cache import CacheRegion
26 from dogpile.cache import CacheRegion
27 from dogpile.cache.util import compat
27 from dogpile.cache.util import compat
28
28
29 import rhodecode
29 import rhodecode
30 from rhodecode.lib.utils import safe_str, sha1
30 from rhodecode.lib.utils import safe_str, sha1
31 from rhodecode.lib.utils2 import safe_unicode, str2bool
31 from rhodecode.lib.utils2 import safe_unicode, str2bool
32 from rhodecode.model.db import Session, CacheKey, IntegrityError
32 from rhodecode.model.db import Session, CacheKey, IntegrityError
33
33
34 from rhodecode.lib.rc_cache import cache_key_meta
34 from rhodecode.lib.rc_cache import cache_key_meta
35 from rhodecode.lib.rc_cache import region_meta
35 from rhodecode.lib.rc_cache import region_meta
36
36
37 log = logging.getLogger(__name__)
37 log = logging.getLogger(__name__)
38
38
39
39
40 def isCython(func):
40 def isCython(func):
41 """
41 """
42 Private helper that checks if a function is a cython function.
42 Private helper that checks if a function is a cython function.
43 """
43 """
44 return func.__class__.__name__ == 'cython_function_or_method'
44 return func.__class__.__name__ == 'cython_function_or_method'
45
45
46
46
47 class RhodeCodeCacheRegion(CacheRegion):
47 class RhodeCodeCacheRegion(CacheRegion):
48
48
49 def conditional_cache_on_arguments(
49 def conditional_cache_on_arguments(
50 self, namespace=None,
50 self, namespace=None,
51 expiration_time=None,
51 expiration_time=None,
52 should_cache_fn=None,
52 should_cache_fn=None,
53 to_str=compat.string_type,
53 to_str=compat.string_type,
54 function_key_generator=None,
54 function_key_generator=None,
55 condition=True):
55 condition=True):
56 """
56 """
57 Custom conditional decorator, that will not touch any dogpile internals if
57 Custom conditional decorator, that will not touch any dogpile internals if
58 condition isn't meet. This works a bit different than should_cache_fn
58 condition isn't meet. This works a bit different than should_cache_fn
59 And it's faster in cases we don't ever want to compute cached values
59 And it's faster in cases we don't ever want to compute cached values
60 """
60 """
61 expiration_time_is_callable = compat.callable(expiration_time)
61 expiration_time_is_callable = compat.callable(expiration_time)
62
62
63 if function_key_generator is None:
63 if function_key_generator is None:
64 function_key_generator = self.function_key_generator
64 function_key_generator = self.function_key_generator
65
65
66 # workaround for py2 and cython problems, this block should be removed
66 # workaround for py2 and cython problems, this block should be removed
67 # once we've migrated to py3
67 # once we've migrated to py3
68 if 'cython' == 'cython':
68 if 'cython' == 'cython':
69 def decorator(fn):
69 def decorator(fn):
70 if to_str is compat.string_type:
70 if to_str is compat.string_type:
71 # backwards compatible
71 # backwards compatible
72 key_generator = function_key_generator(namespace, fn)
72 key_generator = function_key_generator(namespace, fn)
73 else:
73 else:
74 key_generator = function_key_generator(namespace, fn, to_str=to_str)
74 key_generator = function_key_generator(namespace, fn, to_str=to_str)
75
75
76 @functools.wraps(fn)
76 @functools.wraps(fn)
77 def decorate(*arg, **kw):
77 def decorate(*arg, **kw):
78 key = key_generator(*arg, **kw)
78 key = key_generator(*arg, **kw)
79
79
80 @functools.wraps(fn)
80 @functools.wraps(fn)
81 def creator():
81 def creator():
82 return fn(*arg, **kw)
82 return fn(*arg, **kw)
83
83
84 if not condition:
84 if not condition:
85 return creator()
85 return creator()
86
86
87 timeout = expiration_time() if expiration_time_is_callable \
87 timeout = expiration_time() if expiration_time_is_callable \
88 else expiration_time
88 else expiration_time
89
89
90 return self.get_or_create(key, creator, timeout, should_cache_fn)
90 return self.get_or_create(key, creator, timeout, should_cache_fn)
91
91
92 def invalidate(*arg, **kw):
92 def invalidate(*arg, **kw):
93 key = key_generator(*arg, **kw)
93 key = key_generator(*arg, **kw)
94 self.delete(key)
94 self.delete(key)
95
95
96 def set_(value, *arg, **kw):
96 def set_(value, *arg, **kw):
97 key = key_generator(*arg, **kw)
97 key = key_generator(*arg, **kw)
98 self.set(key, value)
98 self.set(key, value)
99
99
100 def get(*arg, **kw):
100 def get(*arg, **kw):
101 key = key_generator(*arg, **kw)
101 key = key_generator(*arg, **kw)
102 return self.get(key)
102 return self.get(key)
103
103
104 def refresh(*arg, **kw):
104 def refresh(*arg, **kw):
105 key = key_generator(*arg, **kw)
105 key = key_generator(*arg, **kw)
106 value = fn(*arg, **kw)
106 value = fn(*arg, **kw)
107 self.set(key, value)
107 self.set(key, value)
108 return value
108 return value
109
109
110 decorate.set = set_
110 decorate.set = set_
111 decorate.invalidate = invalidate
111 decorate.invalidate = invalidate
112 decorate.refresh = refresh
112 decorate.refresh = refresh
113 decorate.get = get
113 decorate.get = get
114 decorate.original = fn
114 decorate.original = fn
115 decorate.key_generator = key_generator
115 decorate.key_generator = key_generator
116 decorate.__wrapped__ = fn
116 decorate.__wrapped__ = fn
117
117
118 return decorate
118 return decorate
119 return decorator
119 return decorator
120
120
121 def get_or_create_for_user_func(key_generator, user_func, *arg, **kw):
121 def get_or_create_for_user_func(key_generator, user_func, *arg, **kw):
122
122
123 if not condition:
123 if not condition:
124 log.debug('Calling un-cached func:%s', user_func.func_name)
124 log.debug('Calling un-cached func:%s', user_func.func_name)
125 return user_func(*arg, **kw)
125 start = time.time()
126 result = user_func(*arg, **kw)
127 total = time.time() - start
128 log.debug('un-cached func:%s took %.4fs', user_func.func_name, total)
129 return result
126
130
127 key = key_generator(*arg, **kw)
131 key = key_generator(*arg, **kw)
128
132
129 timeout = expiration_time() if expiration_time_is_callable \
133 timeout = expiration_time() if expiration_time_is_callable \
130 else expiration_time
134 else expiration_time
131
135
132 log.debug('Calling cached fn:%s', user_func.func_name)
136 log.debug('Calling cached fn:%s', user_func.func_name)
133 return self.get_or_create(key, user_func, timeout, should_cache_fn, (arg, kw))
137 return self.get_or_create(key, user_func, timeout, should_cache_fn, (arg, kw))
134
138
135 def cache_decorator(user_func):
139 def cache_decorator(user_func):
136 if to_str is compat.string_type:
140 if to_str is compat.string_type:
137 # backwards compatible
141 # backwards compatible
138 key_generator = function_key_generator(namespace, user_func)
142 key_generator = function_key_generator(namespace, user_func)
139 else:
143 else:
140 key_generator = function_key_generator(namespace, user_func, to_str=to_str)
144 key_generator = function_key_generator(namespace, user_func, to_str=to_str)
141
145
142 def refresh(*arg, **kw):
146 def refresh(*arg, **kw):
143 """
147 """
144 Like invalidate, but regenerates the value instead
148 Like invalidate, but regenerates the value instead
145 """
149 """
146 key = key_generator(*arg, **kw)
150 key = key_generator(*arg, **kw)
147 value = user_func(*arg, **kw)
151 value = user_func(*arg, **kw)
148 self.set(key, value)
152 self.set(key, value)
149 return value
153 return value
150
154
151 def invalidate(*arg, **kw):
155 def invalidate(*arg, **kw):
152 key = key_generator(*arg, **kw)
156 key = key_generator(*arg, **kw)
153 self.delete(key)
157 self.delete(key)
154
158
155 def set_(value, *arg, **kw):
159 def set_(value, *arg, **kw):
156 key = key_generator(*arg, **kw)
160 key = key_generator(*arg, **kw)
157 self.set(key, value)
161 self.set(key, value)
158
162
159 def get(*arg, **kw):
163 def get(*arg, **kw):
160 key = key_generator(*arg, **kw)
164 key = key_generator(*arg, **kw)
161 return self.get(key)
165 return self.get(key)
162
166
163 user_func.set = set_
167 user_func.set = set_
164 user_func.invalidate = invalidate
168 user_func.invalidate = invalidate
165 user_func.get = get
169 user_func.get = get
166 user_func.refresh = refresh
170 user_func.refresh = refresh
167 user_func.key_generator = key_generator
171 user_func.key_generator = key_generator
168 user_func.original = user_func
172 user_func.original = user_func
169
173
170 # Use `decorate` to preserve the signature of :param:`user_func`.
174 # Use `decorate` to preserve the signature of :param:`user_func`.
171 return decorator.decorate(user_func, functools.partial(
175 return decorator.decorate(user_func, functools.partial(
172 get_or_create_for_user_func, key_generator))
176 get_or_create_for_user_func, key_generator))
173
177
174 return cache_decorator
178 return cache_decorator
175
179
176
180
177 def make_region(*arg, **kw):
181 def make_region(*arg, **kw):
178 return RhodeCodeCacheRegion(*arg, **kw)
182 return RhodeCodeCacheRegion(*arg, **kw)
179
183
180
184
181 def get_default_cache_settings(settings, prefixes=None):
185 def get_default_cache_settings(settings, prefixes=None):
182 prefixes = prefixes or []
186 prefixes = prefixes or []
183 cache_settings = {}
187 cache_settings = {}
184 for key in settings.keys():
188 for key in settings.keys():
185 for prefix in prefixes:
189 for prefix in prefixes:
186 if key.startswith(prefix):
190 if key.startswith(prefix):
187 name = key.split(prefix)[1].strip()
191 name = key.split(prefix)[1].strip()
188 val = settings[key]
192 val = settings[key]
189 if isinstance(val, compat.string_types):
193 if isinstance(val, compat.string_types):
190 val = val.strip()
194 val = val.strip()
191 cache_settings[name] = val
195 cache_settings[name] = val
192 return cache_settings
196 return cache_settings
193
197
194
198
195 def compute_key_from_params(*args):
199 def compute_key_from_params(*args):
196 """
200 """
197 Helper to compute key from given params to be used in cache manager
201 Helper to compute key from given params to be used in cache manager
198 """
202 """
199 return sha1("_".join(map(safe_str, args)))
203 return sha1("_".join(map(safe_str, args)))
200
204
201
205
202 def backend_key_generator(backend):
206 def backend_key_generator(backend):
203 """
207 """
204 Special wrapper that also sends over the backend to the key generator
208 Special wrapper that also sends over the backend to the key generator
205 """
209 """
206 def wrapper(namespace, fn):
210 def wrapper(namespace, fn):
207 return key_generator(backend, namespace, fn)
211 return key_generator(backend, namespace, fn)
208 return wrapper
212 return wrapper
209
213
210
214
211 def key_generator(backend, namespace, fn):
215 def key_generator(backend, namespace, fn):
212 fname = fn.__name__
216 fname = fn.__name__
213
217
214 def generate_key(*args):
218 def generate_key(*args):
215 backend_prefix = getattr(backend, 'key_prefix', None) or 'backend_prefix'
219 backend_prefix = getattr(backend, 'key_prefix', None) or 'backend_prefix'
216 namespace_pref = namespace or 'default_namespace'
220 namespace_pref = namespace or 'default_namespace'
217 arg_key = compute_key_from_params(*args)
221 arg_key = compute_key_from_params(*args)
218 final_key = "{}:{}:{}_{}".format(backend_prefix, namespace_pref, fname, arg_key)
222 final_key = "{}:{}:{}_{}".format(backend_prefix, namespace_pref, fname, arg_key)
219
223
220 return final_key
224 return final_key
221
225
222 return generate_key
226 return generate_key
223
227
224
228
225 def get_or_create_region(region_name, region_namespace=None):
229 def get_or_create_region(region_name, region_namespace=None):
226 from rhodecode.lib.rc_cache.backends import FileNamespaceBackend
230 from rhodecode.lib.rc_cache.backends import FileNamespaceBackend
227 region_obj = region_meta.dogpile_cache_regions.get(region_name)
231 region_obj = region_meta.dogpile_cache_regions.get(region_name)
228 if not region_obj:
232 if not region_obj:
229 raise EnvironmentError(
233 raise EnvironmentError(
230 'Region `{}` not in configured: {}.'.format(
234 'Region `{}` not in configured: {}.'.format(
231 region_name, region_meta.dogpile_cache_regions.keys()))
235 region_name, region_meta.dogpile_cache_regions.keys()))
232
236
233 region_uid_name = '{}:{}'.format(region_name, region_namespace)
237 region_uid_name = '{}:{}'.format(region_name, region_namespace)
234 if isinstance(region_obj.actual_backend, FileNamespaceBackend):
238 if isinstance(region_obj.actual_backend, FileNamespaceBackend):
235 region_exist = region_meta.dogpile_cache_regions.get(region_namespace)
239 region_exist = region_meta.dogpile_cache_regions.get(region_namespace)
236 if region_exist:
240 if region_exist:
237 log.debug('Using already configured region: %s', region_namespace)
241 log.debug('Using already configured region: %s', region_namespace)
238 return region_exist
242 return region_exist
239 cache_dir = region_meta.dogpile_config_defaults['cache_dir']
243 cache_dir = region_meta.dogpile_config_defaults['cache_dir']
240 expiration_time = region_obj.expiration_time
244 expiration_time = region_obj.expiration_time
241
245
242 if not os.path.isdir(cache_dir):
246 if not os.path.isdir(cache_dir):
243 os.makedirs(cache_dir)
247 os.makedirs(cache_dir)
244 new_region = make_region(
248 new_region = make_region(
245 name=region_uid_name,
249 name=region_uid_name,
246 function_key_generator=backend_key_generator(region_obj.actual_backend)
250 function_key_generator=backend_key_generator(region_obj.actual_backend)
247 )
251 )
248 namespace_filename = os.path.join(
252 namespace_filename = os.path.join(
249 cache_dir, "{}.cache.dbm".format(region_namespace))
253 cache_dir, "{}.cache.dbm".format(region_namespace))
250 # special type that allows 1db per namespace
254 # special type that allows 1db per namespace
251 new_region.configure(
255 new_region.configure(
252 backend='dogpile.cache.rc.file_namespace',
256 backend='dogpile.cache.rc.file_namespace',
253 expiration_time=expiration_time,
257 expiration_time=expiration_time,
254 arguments={"filename": namespace_filename}
258 arguments={"filename": namespace_filename}
255 )
259 )
256
260
257 # create and save in region caches
261 # create and save in region caches
258 log.debug('configuring new region: %s', region_uid_name)
262 log.debug('configuring new region: %s', region_uid_name)
259 region_obj = region_meta.dogpile_cache_regions[region_namespace] = new_region
263 region_obj = region_meta.dogpile_cache_regions[region_namespace] = new_region
260
264
261 return region_obj
265 return region_obj
262
266
263
267
264 def clear_cache_namespace(cache_region, cache_namespace_uid, invalidate=False):
268 def clear_cache_namespace(cache_region, cache_namespace_uid, invalidate=False):
265 region = get_or_create_region(cache_region, cache_namespace_uid)
269 region = get_or_create_region(cache_region, cache_namespace_uid)
266 cache_keys = region.backend.list_keys(prefix=cache_namespace_uid)
270 cache_keys = region.backend.list_keys(prefix=cache_namespace_uid)
267 num_delete_keys = len(cache_keys)
271 num_delete_keys = len(cache_keys)
268 if invalidate:
272 if invalidate:
269 region.invalidate(hard=False)
273 region.invalidate(hard=False)
270 else:
274 else:
271 if num_delete_keys:
275 if num_delete_keys:
272 region.delete_multi(cache_keys)
276 region.delete_multi(cache_keys)
273 return num_delete_keys
277 return num_delete_keys
274
278
275
279
276 class ActiveRegionCache(object):
280 class ActiveRegionCache(object):
277 def __init__(self, context, cache_data):
281 def __init__(self, context, cache_data):
278 self.context = context
282 self.context = context
279 self.cache_data = cache_data
283 self.cache_data = cache_data
280
284
281 def should_invalidate(self):
285 def should_invalidate(self):
282 return False
286 return False
283
287
284
288
285 class FreshRegionCache(object):
289 class FreshRegionCache(object):
286 def __init__(self, context, cache_data):
290 def __init__(self, context, cache_data):
287 self.context = context
291 self.context = context
288 self.cache_data = cache_data
292 self.cache_data = cache_data
289
293
290 def should_invalidate(self):
294 def should_invalidate(self):
291 return True
295 return True
292
296
293
297
294 class InvalidationContext(object):
298 class InvalidationContext(object):
295 """
299 """
296 usage::
300 usage::
297
301
298 from rhodecode.lib import rc_cache
302 from rhodecode.lib import rc_cache
299
303
300 cache_namespace_uid = CacheKey.SOME_NAMESPACE.format(1)
304 cache_namespace_uid = CacheKey.SOME_NAMESPACE.format(1)
301 region = rc_cache.get_or_create_region('cache_perms', cache_namespace_uid)
305 region = rc_cache.get_or_create_region('cache_perms', cache_namespace_uid)
302
306
303 @region.conditional_cache_on_arguments(namespace=cache_namespace_uid, condition=True)
307 @region.conditional_cache_on_arguments(namespace=cache_namespace_uid, condition=True)
304 def heavy_compute(cache_name, param1, param2):
308 def heavy_compute(cache_name, param1, param2):
305 print('COMPUTE {}, {}, {}'.format(cache_name, param1, param2))
309 print('COMPUTE {}, {}, {}'.format(cache_name, param1, param2))
306
310
307 # invalidation namespace is shared namespace key for all process caches
311 # invalidation namespace is shared namespace key for all process caches
308 # we use it to send a global signal
312 # we use it to send a global signal
309 invalidation_namespace = 'repo_cache:1'
313 invalidation_namespace = 'repo_cache:1'
310
314
311 inv_context_manager = rc_cache.InvalidationContext(
315 inv_context_manager = rc_cache.InvalidationContext(
312 uid=cache_namespace_uid, invalidation_namespace=invalidation_namespace)
316 uid=cache_namespace_uid, invalidation_namespace=invalidation_namespace)
313 with inv_context_manager as invalidation_context:
317 with inv_context_manager as invalidation_context:
314 args = ('one', 'two')
318 args = ('one', 'two')
315 # re-compute and store cache if we get invalidate signal
319 # re-compute and store cache if we get invalidate signal
316 if invalidation_context.should_invalidate():
320 if invalidation_context.should_invalidate():
317 result = heavy_compute.refresh(*args)
321 result = heavy_compute.refresh(*args)
318 else:
322 else:
319 result = heavy_compute(*args)
323 result = heavy_compute(*args)
320
324
321 compute_time = inv_context_manager.compute_time
325 compute_time = inv_context_manager.compute_time
322 log.debug('result computed in %.4fs', compute_time)
326 log.debug('result computed in %.4fs', compute_time)
323
327
324 # To send global invalidation signal, simply run
328 # To send global invalidation signal, simply run
325 CacheKey.set_invalidate(invalidation_namespace)
329 CacheKey.set_invalidate(invalidation_namespace)
326
330
327 """
331 """
328
332
329 def __repr__(self):
333 def __repr__(self):
330 return '<InvalidationContext:{}[{}]>'.format(
334 return '<InvalidationContext:{}[{}]>'.format(
331 safe_str(self.cache_key), safe_str(self.uid))
335 safe_str(self.cache_key), safe_str(self.uid))
332
336
333 def __init__(self, uid, invalidation_namespace='',
337 def __init__(self, uid, invalidation_namespace='',
334 raise_exception=False, thread_scoped=None):
338 raise_exception=False, thread_scoped=None):
335 self.uid = uid
339 self.uid = uid
336 self.invalidation_namespace = invalidation_namespace
340 self.invalidation_namespace = invalidation_namespace
337 self.raise_exception = raise_exception
341 self.raise_exception = raise_exception
338 self.proc_id = safe_unicode(rhodecode.CONFIG.get('instance_id') or 'DEFAULT')
342 self.proc_id = safe_unicode(rhodecode.CONFIG.get('instance_id') or 'DEFAULT')
339 self.thread_id = 'global'
343 self.thread_id = 'global'
340
344
341 if thread_scoped is None:
345 if thread_scoped is None:
342 # if we set "default" we can override this via .ini settings
346 # if we set "default" we can override this via .ini settings
343 thread_scoped = str2bool(rhodecode.CONFIG.get('cache_thread_scoped'))
347 thread_scoped = str2bool(rhodecode.CONFIG.get('cache_thread_scoped'))
344
348
345 # Append the thread id to the cache key if this invalidation context
349 # Append the thread id to the cache key if this invalidation context
346 # should be scoped to the current thread.
350 # should be scoped to the current thread.
347 if thread_scoped is True:
351 if thread_scoped is True:
348 self.thread_id = threading.current_thread().ident
352 self.thread_id = threading.current_thread().ident
349
353
350 self.cache_key = compute_key_from_params(uid)
354 self.cache_key = compute_key_from_params(uid)
351 self.cache_key = 'proc:{}|thread:{}|params:{}'.format(
355 self.cache_key = 'proc:{}|thread:{}|params:{}'.format(
352 self.proc_id, self.thread_id, self.cache_key)
356 self.proc_id, self.thread_id, self.cache_key)
353 self.compute_time = 0
357 self.compute_time = 0
354
358
355 def get_or_create_cache_obj(self, cache_type, invalidation_namespace=''):
359 def get_or_create_cache_obj(self, cache_type, invalidation_namespace=''):
356 invalidation_namespace = invalidation_namespace or self.invalidation_namespace
360 invalidation_namespace = invalidation_namespace or self.invalidation_namespace
357 # fetch all cache keys for this namespace and convert them to a map to find if we
361 # fetch all cache keys for this namespace and convert them to a map to find if we
358 # have specific cache_key object registered. We do this because we want to have
362 # have specific cache_key object registered. We do this because we want to have
359 # all consistent cache_state_uid for newly registered objects
363 # all consistent cache_state_uid for newly registered objects
360 cache_obj_map = CacheKey.get_namespace_map(invalidation_namespace)
364 cache_obj_map = CacheKey.get_namespace_map(invalidation_namespace)
361 cache_obj = cache_obj_map.get(self.cache_key)
365 cache_obj = cache_obj_map.get(self.cache_key)
362 log.debug('Fetched cache obj %s using %s cache key.', cache_obj, self.cache_key)
366 log.debug('Fetched cache obj %s using %s cache key.', cache_obj, self.cache_key)
363 if not cache_obj:
367 if not cache_obj:
364 new_cache_args = invalidation_namespace
368 new_cache_args = invalidation_namespace
365 first_cache_obj = next(cache_obj_map.itervalues()) if cache_obj_map else None
369 first_cache_obj = next(cache_obj_map.itervalues()) if cache_obj_map else None
366 cache_state_uid = None
370 cache_state_uid = None
367 if first_cache_obj:
371 if first_cache_obj:
368 cache_state_uid = first_cache_obj.cache_state_uid
372 cache_state_uid = first_cache_obj.cache_state_uid
369 cache_obj = CacheKey(self.cache_key, cache_args=new_cache_args,
373 cache_obj = CacheKey(self.cache_key, cache_args=new_cache_args,
370 cache_state_uid=cache_state_uid)
374 cache_state_uid=cache_state_uid)
371 cache_key_meta.cache_keys_by_pid.append(self.cache_key)
375 cache_key_meta.cache_keys_by_pid.append(self.cache_key)
372
376
373 return cache_obj
377 return cache_obj
374
378
375 def __enter__(self):
379 def __enter__(self):
376 """
380 """
377 Test if current object is valid, and return CacheRegion function
381 Test if current object is valid, and return CacheRegion function
378 that does invalidation and calculation
382 that does invalidation and calculation
379 """
383 """
380 log.debug('Entering cache invalidation check context: %s', self.invalidation_namespace)
384 log.debug('Entering cache invalidation check context: %s', self.invalidation_namespace)
381 # register or get a new key based on uid
385 # register or get a new key based on uid
382 self.cache_obj = self.get_or_create_cache_obj(cache_type=self.uid)
386 self.cache_obj = self.get_or_create_cache_obj(cache_type=self.uid)
383 cache_data = self.cache_obj.get_dict()
387 cache_data = self.cache_obj.get_dict()
384 self._start_time = time.time()
388 self._start_time = time.time()
385 if self.cache_obj.cache_active:
389 if self.cache_obj.cache_active:
386 # means our cache obj is existing and marked as it's
390 # means our cache obj is existing and marked as it's
387 # cache is not outdated, we return ActiveRegionCache
391 # cache is not outdated, we return ActiveRegionCache
388 self.skip_cache_active_change = True
392 self.skip_cache_active_change = True
389
393
390 return ActiveRegionCache(context=self, cache_data=cache_data)
394 return ActiveRegionCache(context=self, cache_data=cache_data)
391
395
392 # the key is either not existing or set to False, we return
396 # the key is either not existing or set to False, we return
393 # the real invalidator which re-computes value. We additionally set
397 # the real invalidator which re-computes value. We additionally set
394 # the flag to actually update the Database objects
398 # the flag to actually update the Database objects
395 self.skip_cache_active_change = False
399 self.skip_cache_active_change = False
396 return FreshRegionCache(context=self, cache_data=cache_data)
400 return FreshRegionCache(context=self, cache_data=cache_data)
397
401
398 def __exit__(self, exc_type, exc_val, exc_tb):
402 def __exit__(self, exc_type, exc_val, exc_tb):
399 # save compute time
403 # save compute time
400 self.compute_time = time.time() - self._start_time
404 self.compute_time = time.time() - self._start_time
401
405
402 if self.skip_cache_active_change:
406 if self.skip_cache_active_change:
403 return
407 return
404
408
405 try:
409 try:
406 self.cache_obj.cache_active = True
410 self.cache_obj.cache_active = True
407 Session().add(self.cache_obj)
411 Session().add(self.cache_obj)
408 Session().commit()
412 Session().commit()
409 except IntegrityError:
413 except IntegrityError:
410 # if we catch integrity error, it means we inserted this object
414 # if we catch integrity error, it means we inserted this object
411 # assumption is that's really an edge race-condition case and
415 # assumption is that's really an edge race-condition case and
412 # it's safe is to skip it
416 # it's safe is to skip it
413 Session().rollback()
417 Session().rollback()
414 except Exception:
418 except Exception:
415 log.exception('Failed to commit on cache key update')
419 log.exception('Failed to commit on cache key update')
416 Session().rollback()
420 Session().rollback()
417 if self.raise_exception:
421 if self.raise_exception:
418 raise
422 raise
General Comments 0
You need to be logged in to leave comments. Login now