##// END OF EJS Templates
caches: allow debug of max_size reached by the MemoryLRU cache.
marcink -
r2887:2a9499df default
parent child Browse files
Show More
@@ -1,174 +1,181 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2015-2018 RhodeCode GmbH
3 # Copyright (C) 2015-2018 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 import time
20 import time
21 import errno
21 import errno
22 import logging
22 import logging
23
23
24 import gevent
24 import gevent
25
25
26 from dogpile.cache.backends import memory as memory_backend
26 from dogpile.cache.backends import memory as memory_backend
27 from dogpile.cache.backends import file as file_backend
27 from dogpile.cache.backends import file as file_backend
28 from dogpile.cache.backends import redis as redis_backend
28 from dogpile.cache.backends import redis as redis_backend
29 from dogpile.cache.backends.file import NO_VALUE, compat, FileLock
29 from dogpile.cache.backends.file import NO_VALUE, compat, FileLock
30 from dogpile.cache.util import memoized_property
30 from dogpile.cache.util import memoized_property
31 from lru import LRU as LRUDict
31 from lru import LRU as LRUDict
32
32
33
33
34 _default_max_size = 1024
34 _default_max_size = 1024
35
35
36 log = logging.getLogger(__name__)
36 log = logging.getLogger(__name__)
37
37
38
38
39 class LRUMemoryBackend(memory_backend.MemoryBackend):
39 class LRUMemoryBackend(memory_backend.MemoryBackend):
40 pickle_values = False
40 pickle_values = False
41
41
42 def __init__(self, arguments):
42 def __init__(self, arguments):
43 max_size = arguments.pop('max_size', _default_max_size)
43 max_size = arguments.pop('max_size', _default_max_size)
44 arguments['cache_dict'] = LRUDict(max_size)
44 callback = None
45 if arguments.pop('log_max_size_reached', None):
46 def evicted(key, value):
47 log.debug(
48 'LRU: evicting key `%s` due to max size %s reach', key, max_size)
49 callback = evicted
50
51 arguments['cache_dict'] = LRUDict(max_size, callback=callback)
45 super(LRUMemoryBackend, self).__init__(arguments)
52 super(LRUMemoryBackend, self).__init__(arguments)
46
53
47 def delete(self, key):
54 def delete(self, key):
48 if self._cache.has_key(key):
55 if self._cache.has_key(key):
49 del self._cache[key]
56 del self._cache[key]
50
57
51 def delete_multi(self, keys):
58 def delete_multi(self, keys):
52 for key in keys:
59 for key in keys:
53 if self._cache.has_key(key):
60 if self._cache.has_key(key):
54 del self._cache[key]
61 del self._cache[key]
55
62
56
63
57 class Serializer(object):
64 class Serializer(object):
58 def _dumps(self, value):
65 def _dumps(self, value):
59 return compat.pickle.dumps(value)
66 return compat.pickle.dumps(value)
60
67
61 def _loads(self, value):
68 def _loads(self, value):
62 return compat.pickle.loads(value)
69 return compat.pickle.loads(value)
63
70
64
71
65 class CustomLockFactory(FileLock):
72 class CustomLockFactory(FileLock):
66
73
67 @memoized_property
74 @memoized_property
68 def _module(self):
75 def _module(self):
69 import fcntl
76 import fcntl
70 flock_org = fcntl.flock
77 flock_org = fcntl.flock
71
78
72 def gevent_flock(fd, operation):
79 def gevent_flock(fd, operation):
73 """
80 """
74 Gevent compatible flock
81 Gevent compatible flock
75 """
82 """
76 # set non-blocking, this will cause an exception if we cannot acquire a lock
83 # set non-blocking, this will cause an exception if we cannot acquire a lock
77 operation |= fcntl.LOCK_NB
84 operation |= fcntl.LOCK_NB
78 start_lock_time = time.time()
85 start_lock_time = time.time()
79 timeout = 60 * 5 # 5min
86 timeout = 60 * 5 # 5min
80 while True:
87 while True:
81 try:
88 try:
82 flock_org(fd, operation)
89 flock_org(fd, operation)
83 # lock has been acquired
90 # lock has been acquired
84 break
91 break
85 except (OSError, IOError) as e:
92 except (OSError, IOError) as e:
86 # raise on other errors than Resource temporarily unavailable
93 # raise on other errors than Resource temporarily unavailable
87 if e.errno != errno.EAGAIN:
94 if e.errno != errno.EAGAIN:
88 raise
95 raise
89 elif (time.time() - start_lock_time) > timeout:
96 elif (time.time() - start_lock_time) > timeout:
90 # waited to much time on a lock, better fail than loop for ever
97 # waited to much time on a lock, better fail than loop for ever
91 raise
98 raise
92
99
93 log.debug('Failed to acquire lock, retry in 0.1')
100 log.debug('Failed to acquire lock, retry in 0.1')
94 gevent.sleep(0.1)
101 gevent.sleep(0.1)
95
102
96 fcntl.flock = gevent_flock
103 fcntl.flock = gevent_flock
97 return fcntl
104 return fcntl
98
105
99
106
100 class FileNamespaceBackend(Serializer, file_backend.DBMBackend):
107 class FileNamespaceBackend(Serializer, file_backend.DBMBackend):
101
108
102 def __init__(self, arguments):
109 def __init__(self, arguments):
103 arguments['lock_factory'] = CustomLockFactory
110 arguments['lock_factory'] = CustomLockFactory
104 super(FileNamespaceBackend, self).__init__(arguments)
111 super(FileNamespaceBackend, self).__init__(arguments)
105
112
106 def list_keys(self, prefix=''):
113 def list_keys(self, prefix=''):
107 def cond(v):
114 def cond(v):
108 if not prefix:
115 if not prefix:
109 return True
116 return True
110
117
111 if v.startswith(prefix):
118 if v.startswith(prefix):
112 return True
119 return True
113 return False
120 return False
114
121
115 with self._dbm_file(True) as dbm:
122 with self._dbm_file(True) as dbm:
116
123
117 return filter(cond, dbm.keys())
124 return filter(cond, dbm.keys())
118
125
119 def get_store(self):
126 def get_store(self):
120 return self.filename
127 return self.filename
121
128
122 def get(self, key):
129 def get(self, key):
123 with self._dbm_file(False) as dbm:
130 with self._dbm_file(False) as dbm:
124 if hasattr(dbm, 'get'):
131 if hasattr(dbm, 'get'):
125 value = dbm.get(key, NO_VALUE)
132 value = dbm.get(key, NO_VALUE)
126 else:
133 else:
127 # gdbm objects lack a .get method
134 # gdbm objects lack a .get method
128 try:
135 try:
129 value = dbm[key]
136 value = dbm[key]
130 except KeyError:
137 except KeyError:
131 value = NO_VALUE
138 value = NO_VALUE
132 if value is not NO_VALUE:
139 if value is not NO_VALUE:
133 value = self._loads(value)
140 value = self._loads(value)
134 return value
141 return value
135
142
136 def set(self, key, value):
143 def set(self, key, value):
137 with self._dbm_file(True) as dbm:
144 with self._dbm_file(True) as dbm:
138 dbm[key] = self._dumps(value)
145 dbm[key] = self._dumps(value)
139
146
140 def set_multi(self, mapping):
147 def set_multi(self, mapping):
141 with self._dbm_file(True) as dbm:
148 with self._dbm_file(True) as dbm:
142 for key, value in mapping.items():
149 for key, value in mapping.items():
143 dbm[key] = self._dumps(value)
150 dbm[key] = self._dumps(value)
144
151
145
152
146 class RedisPickleBackend(Serializer, redis_backend.RedisBackend):
153 class RedisPickleBackend(Serializer, redis_backend.RedisBackend):
147 def list_keys(self, prefix=''):
154 def list_keys(self, prefix=''):
148 if prefix:
155 if prefix:
149 prefix = prefix + '*'
156 prefix = prefix + '*'
150 return self.client.keys(prefix)
157 return self.client.keys(prefix)
151
158
152 def get_store(self):
159 def get_store(self):
153 return self.client.connection_pool
160 return self.client.connection_pool
154
161
155 def set(self, key, value):
162 def set(self, key, value):
156 if self.redis_expiration_time:
163 if self.redis_expiration_time:
157 self.client.setex(key, self.redis_expiration_time,
164 self.client.setex(key, self.redis_expiration_time,
158 self._dumps(value))
165 self._dumps(value))
159 else:
166 else:
160 self.client.set(key, self._dumps(value))
167 self.client.set(key, self._dumps(value))
161
168
162 def set_multi(self, mapping):
169 def set_multi(self, mapping):
163 mapping = dict(
170 mapping = dict(
164 (k, self._dumps(v))
171 (k, self._dumps(v))
165 for k, v in mapping.items()
172 for k, v in mapping.items()
166 )
173 )
167
174
168 if not self.redis_expiration_time:
175 if not self.redis_expiration_time:
169 self.client.mset(mapping)
176 self.client.mset(mapping)
170 else:
177 else:
171 pipe = self.client.pipeline()
178 pipe = self.client.pipeline()
172 for key, value in mapping.items():
179 for key, value in mapping.items():
173 pipe.setex(key, self.redis_expiration_time, value)
180 pipe.setex(key, self.redis_expiration_time, value)
174 pipe.execute()
181 pipe.execute()
General Comments 0
You need to be logged in to leave comments. Login now