##// END OF EJS Templates
caches: don't try to delete keys if there aren't any to delete....
marcink -
r2969:c92b412f default
parent child Browse files
Show More
@@ -1,320 +1,322 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2015-2018 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20 import os
21 21 import time
22 22 import logging
23 23 import functools
24 24 import threading
25 25
26 26 from dogpile.cache import CacheRegion
27 27 from dogpile.cache.util import compat
28 28
29 29 import rhodecode
30 30 from rhodecode.lib.utils import safe_str, sha1
31 31 from rhodecode.lib.utils2 import safe_unicode, str2bool
32 32 from rhodecode.model.db import Session, CacheKey, IntegrityError
33 33
34 34 from . import region_meta
35 35
36 36 log = logging.getLogger(__name__)
37 37
38 38
39 39 class RhodeCodeCacheRegion(CacheRegion):
40 40
41 41 def conditional_cache_on_arguments(
42 42 self, namespace=None,
43 43 expiration_time=None,
44 44 should_cache_fn=None,
45 45 to_str=compat.string_type,
46 46 function_key_generator=None,
47 47 condition=True):
48 48 """
49 49 Custom conditional decorator, that will not touch any dogpile internals if
50 50 condition isn't meet. This works a bit different than should_cache_fn
51 51 And it's faster in cases we don't ever want to compute cached values
52 52 """
53 53 expiration_time_is_callable = compat.callable(expiration_time)
54 54
55 55 if function_key_generator is None:
56 56 function_key_generator = self.function_key_generator
57 57
58 58 def decorator(fn):
59 59 if to_str is compat.string_type:
60 60 # backwards compatible
61 61 key_generator = function_key_generator(namespace, fn)
62 62 else:
63 63 key_generator = function_key_generator(namespace, fn, to_str=to_str)
64 64
65 65 @functools.wraps(fn)
66 66 def decorate(*arg, **kw):
67 67 key = key_generator(*arg, **kw)
68 68
69 69 @functools.wraps(fn)
70 70 def creator():
71 71 return fn(*arg, **kw)
72 72
73 73 if not condition:
74 74 return creator()
75 75
76 76 timeout = expiration_time() if expiration_time_is_callable \
77 77 else expiration_time
78 78
79 79 return self.get_or_create(key, creator, timeout, should_cache_fn)
80 80
81 81 def invalidate(*arg, **kw):
82 82 key = key_generator(*arg, **kw)
83 83 self.delete(key)
84 84
85 85 def set_(value, *arg, **kw):
86 86 key = key_generator(*arg, **kw)
87 87 self.set(key, value)
88 88
89 89 def get(*arg, **kw):
90 90 key = key_generator(*arg, **kw)
91 91 return self.get(key)
92 92
93 93 def refresh(*arg, **kw):
94 94 key = key_generator(*arg, **kw)
95 95 value = fn(*arg, **kw)
96 96 self.set(key, value)
97 97 return value
98 98
99 99 decorate.set = set_
100 100 decorate.invalidate = invalidate
101 101 decorate.refresh = refresh
102 102 decorate.get = get
103 103 decorate.original = fn
104 104 decorate.key_generator = key_generator
105 105
106 106 return decorate
107 107
108 108 return decorator
109 109
110 110
111 111 def make_region(*arg, **kw):
112 112 return RhodeCodeCacheRegion(*arg, **kw)
113 113
114 114
115 115 def get_default_cache_settings(settings, prefixes=None):
116 116 prefixes = prefixes or []
117 117 cache_settings = {}
118 118 for key in settings.keys():
119 119 for prefix in prefixes:
120 120 if key.startswith(prefix):
121 121 name = key.split(prefix)[1].strip()
122 122 val = settings[key]
123 123 if isinstance(val, basestring):
124 124 val = val.strip()
125 125 cache_settings[name] = val
126 126 return cache_settings
127 127
128 128
129 129 def compute_key_from_params(*args):
130 130 """
131 131 Helper to compute key from given params to be used in cache manager
132 132 """
133 133 return sha1("_".join(map(safe_str, args)))
134 134
135 135
136 136 def key_generator(namespace, fn):
137 137 fname = fn.__name__
138 138
139 139 def generate_key(*args):
140 140 namespace_pref = namespace or 'default'
141 141 arg_key = compute_key_from_params(*args)
142 142 final_key = "{}:{}_{}".format(namespace_pref, fname, arg_key)
143 143
144 144 return final_key
145 145
146 146 return generate_key
147 147
148 148
149 149 def get_or_create_region(region_name, region_namespace=None):
150 150 from rhodecode.lib.rc_cache.backends import FileNamespaceBackend
151 151 region_obj = region_meta.dogpile_cache_regions.get(region_name)
152 152 if not region_obj:
153 153 raise EnvironmentError(
154 154 'Region `{}` not in configured: {}.'.format(
155 155 region_name, region_meta.dogpile_cache_regions.keys()))
156 156
157 157 region_uid_name = '{}:{}'.format(region_name, region_namespace)
158 158 if isinstance(region_obj.actual_backend, FileNamespaceBackend):
159 159 region_exist = region_meta.dogpile_cache_regions.get(region_namespace)
160 160 if region_exist:
161 161 log.debug('Using already configured region: %s', region_namespace)
162 162 return region_exist
163 163 cache_dir = region_meta.dogpile_config_defaults['cache_dir']
164 164 expiration_time = region_obj.expiration_time
165 165
166 166 if not os.path.isdir(cache_dir):
167 167 os.makedirs(cache_dir)
168 168 new_region = make_region(
169 169 name=region_uid_name, function_key_generator=key_generator
170 170 )
171 171 namespace_filename = os.path.join(
172 172 cache_dir, "{}.cache.dbm".format(region_namespace))
173 173 # special type that allows 1db per namespace
174 174 new_region.configure(
175 175 backend='dogpile.cache.rc.file_namespace',
176 176 expiration_time=expiration_time,
177 177 arguments={"filename": namespace_filename}
178 178 )
179 179
180 180 # create and save in region caches
181 181 log.debug('configuring new region: %s',region_uid_name)
182 182 region_obj = region_meta.dogpile_cache_regions[region_namespace] = new_region
183 183
184 184 return region_obj
185 185
186 186
187 187 def clear_cache_namespace(cache_region, cache_namespace_uid):
188 188 region = get_or_create_region(cache_region, cache_namespace_uid)
189 189 cache_keys = region.backend.list_keys(prefix=cache_namespace_uid)
190 region.delete_multi(cache_keys)
191 return len(cache_keys)
190 num_delete_keys = len(cache_keys)
191 if num_delete_keys:
192 region.delete_multi(cache_keys)
193 return num_delete_keys
192 194
193 195
194 196 class ActiveRegionCache(object):
195 197 def __init__(self, context):
196 198 self.context = context
197 199
198 200 def should_invalidate(self):
199 201 return False
200 202
201 203
202 204 class FreshRegionCache(object):
203 205 def __init__(self, context):
204 206 self.context = context
205 207
206 208 def should_invalidate(self):
207 209 return True
208 210
209 211
210 212 class InvalidationContext(object):
211 213 """
212 214 usage::
213 215
214 216 from rhodecode.lib import rc_cache
215 217
216 218 cache_namespace_uid = CacheKey.SOME_NAMESPACE.format(1)
217 219 region = rc_cache.get_or_create_region('cache_perms', cache_namespace_uid)
218 220
219 221 @region.conditional_cache_on_arguments(namespace=cache_namespace_uid, condition=True)
220 222 def heavy_compute(cache_name, param1, param2):
221 223 print('COMPUTE {}, {}, {}'.format(cache_name, param1, param2))
222 224
223 225 # invalidation namespace is shared namespace key for all process caches
224 226 # we use it to send a global signal
225 227 invalidation_namespace = 'repo_cache:1'
226 228
227 229 inv_context_manager = rc_cache.InvalidationContext(
228 230 uid=cache_namespace_uid, invalidation_namespace=invalidation_namespace)
229 231 with inv_context_manager as invalidation_context:
230 232 args = ('one', 'two')
231 233 # re-compute and store cache if we get invalidate signal
232 234 if invalidation_context.should_invalidate():
233 235 result = heavy_compute.refresh(*args)
234 236 else:
235 237 result = heavy_compute(*args)
236 238
237 239 compute_time = inv_context_manager.compute_time
238 240 log.debug('result computed in %.3fs' ,compute_time)
239 241
240 242 # To send global invalidation signal, simply run
241 243 CacheKey.set_invalidate(invalidation_namespace)
242 244
243 245 """
244 246
245 247 def __repr__(self):
246 248 return '<InvalidationContext:{}[{}]>'.format(
247 249 safe_str(self.cache_key), safe_str(self.uid))
248 250
249 251 def __init__(self, uid, invalidation_namespace='',
250 252 raise_exception=False, thread_scoped=None):
251 253 self.uid = uid
252 254 self.invalidation_namespace = invalidation_namespace
253 255 self.raise_exception = raise_exception
254 256 self.proc_id = safe_unicode(rhodecode.CONFIG.get('instance_id') or 'DEFAULT')
255 257 self.thread_id = 'global'
256 258
257 259 if thread_scoped is None:
258 260 # if we set "default" we can override this via .ini settings
259 261 thread_scoped = str2bool(rhodecode.CONFIG.get('cache_thread_scoped'))
260 262
261 263 # Append the thread id to the cache key if this invalidation context
262 264 # should be scoped to the current thread.
263 265 if thread_scoped is True:
264 266 self.thread_id = threading.current_thread().ident
265 267
266 268 self.cache_key = compute_key_from_params(uid)
267 269 self.cache_key = 'proc:{}_thread:{}_{}'.format(
268 270 self.proc_id, self.thread_id, self.cache_key)
269 271 self.compute_time = 0
270 272
271 273 def get_or_create_cache_obj(self, uid, invalidation_namespace=''):
272 274 cache_obj = CacheKey.get_active_cache(self.cache_key)
273 275 log.debug('Fetched cache obj %s using %s cache key.', cache_obj, self.cache_key)
274 276 invalidation_namespace = invalidation_namespace or self.invalidation_namespace
275 277 if not cache_obj:
276 278 cache_obj = CacheKey(self.cache_key, cache_args=invalidation_namespace)
277 279 return cache_obj
278 280
279 281 def __enter__(self):
280 282 """
281 283 Test if current object is valid, and return CacheRegion function
282 284 that does invalidation and calculation
283 285 """
284 286 # register or get a new key based on uid
285 287 self.cache_obj = self.get_or_create_cache_obj(uid=self.uid)
286 288 self._start_time = time.time()
287 289 if self.cache_obj.cache_active:
288 290 # means our cache obj is existing and marked as it's
289 291 # cache is not outdated, we return ActiveRegionCache
290 292 self.skip_cache_active_change = True
291 293
292 294 return ActiveRegionCache(context=self)
293 295
294 296 # the key is either not existing or set to False, we return
295 297 # the real invalidator which re-computes value. We additionally set
296 298 # the flag to actually update the Database objects
297 299 self.skip_cache_active_change = False
298 300 return FreshRegionCache(context=self)
299 301
300 302 def __exit__(self, exc_type, exc_val, exc_tb):
301 303 # save compute time
302 304 self.compute_time = time.time() - self._start_time
303 305
304 306 if self.skip_cache_active_change:
305 307 return
306 308
307 309 try:
308 310 self.cache_obj.cache_active = True
309 311 Session().add(self.cache_obj)
310 312 Session().commit()
311 313 except IntegrityError:
312 314 # if we catch integrity error, it means we inserted this object
313 315 # assumption is that's really an edge race-condition case and
314 316 # it's safe is to skip it
315 317 Session().rollback()
316 318 except Exception:
317 319 log.exception('Failed to commit on cache key update')
318 320 Session().rollback()
319 321 if self.raise_exception:
320 322 raise
General Comments 0
You need to be logged in to leave comments. Login now