##// END OF EJS Templates
caches: improve logging.
marcink -
r2938:8ed0cc06 default
parent child Browse files
Show More
@@ -1,319 +1,319 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2015-2018 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20 import os
21 21 import time
22 22 import logging
23 23 import functools
24 24 import threading
25 25
26 26 from dogpile.cache import CacheRegion
27 27 from dogpile.cache.util import compat
28 28
29 29 import rhodecode
30 30 from rhodecode.lib.utils import safe_str, sha1
31 31 from rhodecode.lib.utils2 import safe_unicode, str2bool
32 32 from rhodecode.model.db import Session, CacheKey, IntegrityError
33 33
34 34 from . import region_meta
35 35
36 36 log = logging.getLogger(__name__)
37 37
38 38
39 39 class RhodeCodeCacheRegion(CacheRegion):
40 40
41 41 def conditional_cache_on_arguments(
42 42 self, namespace=None,
43 43 expiration_time=None,
44 44 should_cache_fn=None,
45 45 to_str=compat.string_type,
46 46 function_key_generator=None,
47 47 condition=True):
48 48 """
49 49 Custom conditional decorator, that will not touch any dogpile internals if
50 50 condition isn't meet. This works a bit different than should_cache_fn
51 51 And it's faster in cases we don't ever want to compute cached values
52 52 """
53 53 expiration_time_is_callable = compat.callable(expiration_time)
54 54
55 55 if function_key_generator is None:
56 56 function_key_generator = self.function_key_generator
57 57
58 58 def decorator(fn):
59 59 if to_str is compat.string_type:
60 60 # backwards compatible
61 61 key_generator = function_key_generator(namespace, fn)
62 62 else:
63 63 key_generator = function_key_generator(namespace, fn, to_str=to_str)
64 64
65 65 @functools.wraps(fn)
66 66 def decorate(*arg, **kw):
67 67 key = key_generator(*arg, **kw)
68 68
69 69 @functools.wraps(fn)
70 70 def creator():
71 71 return fn(*arg, **kw)
72 72
73 73 if not condition:
74 74 return creator()
75 75
76 76 timeout = expiration_time() if expiration_time_is_callable \
77 77 else expiration_time
78 78
79 79 return self.get_or_create(key, creator, timeout, should_cache_fn)
80 80
81 81 def invalidate(*arg, **kw):
82 82 key = key_generator(*arg, **kw)
83 83 self.delete(key)
84 84
85 85 def set_(value, *arg, **kw):
86 86 key = key_generator(*arg, **kw)
87 87 self.set(key, value)
88 88
89 89 def get(*arg, **kw):
90 90 key = key_generator(*arg, **kw)
91 91 return self.get(key)
92 92
93 93 def refresh(*arg, **kw):
94 94 key = key_generator(*arg, **kw)
95 95 value = fn(*arg, **kw)
96 96 self.set(key, value)
97 97 return value
98 98
99 99 decorate.set = set_
100 100 decorate.invalidate = invalidate
101 101 decorate.refresh = refresh
102 102 decorate.get = get
103 103 decorate.original = fn
104 104 decorate.key_generator = key_generator
105 105
106 106 return decorate
107 107
108 108 return decorator
109 109
110 110
111 111 def make_region(*arg, **kw):
112 112 return RhodeCodeCacheRegion(*arg, **kw)
113 113
114 114
115 115 def get_default_cache_settings(settings, prefixes=None):
116 116 prefixes = prefixes or []
117 117 cache_settings = {}
118 118 for key in settings.keys():
119 119 for prefix in prefixes:
120 120 if key.startswith(prefix):
121 121 name = key.split(prefix)[1].strip()
122 122 val = settings[key]
123 123 if isinstance(val, basestring):
124 124 val = val.strip()
125 125 cache_settings[name] = val
126 126 return cache_settings
127 127
128 128
129 129 def compute_key_from_params(*args):
130 130 """
131 131 Helper to compute key from given params to be used in cache manager
132 132 """
133 133 return sha1("_".join(map(safe_str, args)))
134 134
135 135
136 136 def key_generator(namespace, fn):
137 137 fname = fn.__name__
138 138
139 139 def generate_key(*args):
140 140 namespace_pref = namespace or 'default'
141 141 arg_key = compute_key_from_params(*args)
142 142 final_key = "{}:{}_{}".format(namespace_pref, fname, arg_key)
143 143
144 144 return final_key
145 145
146 146 return generate_key
147 147
148 148
149 149 def get_or_create_region(region_name, region_namespace=None):
150 150 from rhodecode.lib.rc_cache.backends import FileNamespaceBackend
151 151 region_obj = region_meta.dogpile_cache_regions.get(region_name)
152 152 if not region_obj:
153 153 raise EnvironmentError(
154 154 'Region `{}` not in configured: {}.'.format(
155 155 region_name, region_meta.dogpile_cache_regions.keys()))
156 156
157 157 region_uid_name = '{}:{}'.format(region_name, region_namespace)
158 158 if isinstance(region_obj.actual_backend, FileNamespaceBackend):
159 159 region_exist = region_meta.dogpile_cache_regions.get(region_namespace)
160 160 if region_exist:
161 161 log.debug('Using already configured region: %s', region_namespace)
162 162 return region_exist
163 163 cache_dir = region_meta.dogpile_config_defaults['cache_dir']
164 164 expiration_time = region_obj.expiration_time
165 165
166 166 if not os.path.isdir(cache_dir):
167 167 os.makedirs(cache_dir)
168 168 new_region = make_region(
169 169 name=region_uid_name, function_key_generator=key_generator
170 170 )
171 171 namespace_filename = os.path.join(
172 172 cache_dir, "{}.cache.dbm".format(region_namespace))
173 173 # special type that allows 1db per namespace
174 174 new_region.configure(
175 175 backend='dogpile.cache.rc.file_namespace',
176 176 expiration_time=expiration_time,
177 177 arguments={"filename": namespace_filename}
178 178 )
179 179
180 180 # create and save in region caches
181 181 log.debug('configuring new region: %s',region_uid_name)
182 182 region_obj = region_meta.dogpile_cache_regions[region_namespace] = new_region
183 183
184 184 return region_obj
185 185
186 186
187 187 def clear_cache_namespace(cache_region, cache_namespace_uid):
188 188 region = get_or_create_region(cache_region, cache_namespace_uid)
189 189 cache_keys = region.backend.list_keys(prefix=cache_namespace_uid)
190 190 region.delete_multi(cache_keys)
191 191 return len(cache_keys)
192 192
193 193
194 194 class ActiveRegionCache(object):
195 195 def __init__(self, context):
196 196 self.context = context
197 197
198 198 def should_invalidate(self):
199 199 return False
200 200
201 201
202 202 class FreshRegionCache(object):
203 203 def __init__(self, context):
204 204 self.context = context
205 205
206 206 def should_invalidate(self):
207 207 return True
208 208
209 209
210 210 class InvalidationContext(object):
211 211 """
212 212 usage::
213 213
214 214 from rhodecode.lib import rc_cache
215 215
216 216 cache_namespace_uid = CacheKey.SOME_NAMESPACE.format(1)
217 217 region = rc_cache.get_or_create_region('cache_perms', cache_namespace_uid)
218 218
219 219 @region.conditional_cache_on_arguments(namespace=cache_namespace_uid, condition=True)
220 220 def heavy_compute(cache_name, param1, param2):
221 221 print('COMPUTE {}, {}, {}'.format(cache_name, param1, param2))
222 222
223 223 # invalidation namespace is shared namespace key for all process caches
224 224 # we use it to send a global signal
225 225 invalidation_namespace = 'repo_cache:1'
226 226
227 227 inv_context_manager = rc_cache.InvalidationContext(
228 228 uid=cache_namespace_uid, invalidation_namespace=invalidation_namespace)
229 229 with inv_context_manager as invalidation_context:
230 230 # check for stored invalidation signal, and maybe purge the cache
231 231 # before computing it again
232 232 if invalidation_context.should_invalidate():
233 233 heavy_compute.invalidate('some_name', 'param1', 'param2')
234 234
235 235 result = heavy_compute('some_name', 'param1', 'param2')
236 236 compute_time = inv_context_manager.compute_time
237 237 print(compute_time)
238 238
239 239 # To send global invalidation signal, simply run
240 240 CacheKey.set_invalidate(invalidation_namespace)
241 241
242 242 """
243 243
244 244 def __repr__(self):
245 245 return '<InvalidationContext:{}[{}]>'.format(
246 246 safe_str(self.cache_key), safe_str(self.uid))
247 247
248 248 def __init__(self, uid, invalidation_namespace='',
249 249 raise_exception=False, thread_scoped=None):
250 250 self.uid = uid
251 251 self.invalidation_namespace = invalidation_namespace
252 252 self.raise_exception = raise_exception
253 253 self.proc_id = safe_unicode(rhodecode.CONFIG.get('instance_id') or 'DEFAULT')
254 254 self.thread_id = 'global'
255 255
256 256 if thread_scoped is None:
257 257 # if we set "default" we can override this via .ini settings
258 258 thread_scoped = str2bool(rhodecode.CONFIG.get('cache_thread_scoped'))
259 259
260 260 # Append the thread id to the cache key if this invalidation context
261 261 # should be scoped to the current thread.
262 262 if thread_scoped is True:
263 263 self.thread_id = threading.current_thread().ident
264 264
265 265 self.cache_key = compute_key_from_params(uid)
266 266 self.cache_key = 'proc:{}_thread:{}_{}'.format(
267 267 self.proc_id, self.thread_id, self.cache_key)
268 268 self.compute_time = 0
269 269
270 270 def get_or_create_cache_obj(self, uid, invalidation_namespace=''):
271 log.debug('Checking if %s cache key is present and active', self.cache_key)
272 271 cache_obj = CacheKey.get_active_cache(self.cache_key)
272 log.debug('Fetched cache obj %s using %s cache key.', cache_obj, self.cache_key)
273 273 invalidation_namespace = invalidation_namespace or self.invalidation_namespace
274 274 if not cache_obj:
275 275 cache_obj = CacheKey(self.cache_key, cache_args=invalidation_namespace)
276 276 return cache_obj
277 277
278 278 def __enter__(self):
279 279 """
280 280 Test if current object is valid, and return CacheRegion function
281 281 that does invalidation and calculation
282 282 """
283 283 # register or get a new key based on uid
284 284 self.cache_obj = self.get_or_create_cache_obj(uid=self.uid)
285 285 self._start_time = time.time()
286 286 if self.cache_obj.cache_active:
287 287 # means our cache obj is existing and marked as it's
288 288 # cache is not outdated, we return ActiveRegionCache
289 289 self.skip_cache_active_change = True
290 290
291 291 return ActiveRegionCache(context=self)
292 292
293 293 # the key is either not existing or set to False, we return
294 294 # the real invalidator which re-computes value. We additionally set
295 295 # the flag to actually update the Database objects
296 296 self.skip_cache_active_change = False
297 297 return FreshRegionCache(context=self)
298 298
299 299 def __exit__(self, exc_type, exc_val, exc_tb):
300 300 # save compute time
301 301 self.compute_time = time.time() - self._start_time
302 302
303 303 if self.skip_cache_active_change:
304 304 return
305 305
306 306 try:
307 307 self.cache_obj.cache_active = True
308 308 Session().add(self.cache_obj)
309 309 Session().commit()
310 310 except IntegrityError:
311 311 # if we catch integrity error, it means we inserted this object
312 312 # assumption is that's really an edge race-condition case and
313 313 # it's safe is to skip it
314 314 Session().rollback()
315 315 except Exception:
316 316 log.exception('Failed to commit on cache key update')
317 317 Session().rollback()
318 318 if self.raise_exception:
319 319 raise
General Comments 0
You need to be logged in to leave comments. Login now