##// END OF EJS Templates
py3: fixes for python3
super-admin -
r1045:7571f5a6 python3
parent child Browse files
Show More
@@ -1,390 +1,384 b''
1 1 import sys
2 2 import threading
3 3 import weakref
4 4 from base64 import b64encode
5 5 from logging import getLogger
6 6 from os import urandom
7 7
8 8 from redis import StrictRedis
9 9
10 10 __version__ = '3.7.0'
11 11
12 12 loggers = {
13 13 k: getLogger("vcsserver." + ".".join((__name__, k)))
14 14 for k in [
15 15 "acquire",
16 16 "refresh.thread.start",
17 17 "refresh.thread.stop",
18 18 "refresh.thread.exit",
19 19 "refresh.start",
20 20 "refresh.shutdown",
21 21 "refresh.exit",
22 22 "release",
23 23 ]
24 24 }
25 25
26 PY3 = sys.version_info[0] == 3
27
28 if PY3:
29 text_type = str
30 binary_type = bytes
31 else:
32 text_type = unicode # noqa
33 binary_type = str
26 text_type = str
27 binary_type = bytes
34 28
35 29
36 30 # Check if the id match. If not, return an error code.
37 31 UNLOCK_SCRIPT = b"""
38 32 if redis.call("get", KEYS[1]) ~= ARGV[1] then
39 33 return 1
40 34 else
41 35 redis.call("del", KEYS[2])
42 36 redis.call("lpush", KEYS[2], 1)
43 37 redis.call("pexpire", KEYS[2], ARGV[2])
44 38 redis.call("del", KEYS[1])
45 39 return 0
46 40 end
47 41 """
48 42
49 43 # Covers both cases when key doesn't exist and doesn't equal to lock's id
50 44 EXTEND_SCRIPT = b"""
51 45 if redis.call("get", KEYS[1]) ~= ARGV[1] then
52 46 return 1
53 47 elseif redis.call("ttl", KEYS[1]) < 0 then
54 48 return 2
55 49 else
56 50 redis.call("expire", KEYS[1], ARGV[2])
57 51 return 0
58 52 end
59 53 """
60 54
61 55 RESET_SCRIPT = b"""
62 56 redis.call('del', KEYS[2])
63 57 redis.call('lpush', KEYS[2], 1)
64 58 redis.call('pexpire', KEYS[2], ARGV[2])
65 59 return redis.call('del', KEYS[1])
66 60 """
67 61
68 62 RESET_ALL_SCRIPT = b"""
69 63 local locks = redis.call('keys', 'lock:*')
70 64 local signal
71 65 for _, lock in pairs(locks) do
72 66 signal = 'lock-signal:' .. string.sub(lock, 6)
73 67 redis.call('del', signal)
74 68 redis.call('lpush', signal, 1)
75 69 redis.call('expire', signal, 1)
76 70 redis.call('del', lock)
77 71 end
78 72 return #locks
79 73 """
80 74
81 75
82 76 class AlreadyAcquired(RuntimeError):
83 77 pass
84 78
85 79
86 80 class NotAcquired(RuntimeError):
87 81 pass
88 82
89 83
90 84 class AlreadyStarted(RuntimeError):
91 85 pass
92 86
93 87
94 88 class TimeoutNotUsable(RuntimeError):
95 89 pass
96 90
97 91
98 92 class InvalidTimeout(RuntimeError):
99 93 pass
100 94
101 95
102 96 class TimeoutTooLarge(RuntimeError):
103 97 pass
104 98
105 99
106 100 class NotExpirable(RuntimeError):
107 101 pass
108 102
109 103
110 104 class Lock(object):
111 105 """
112 106 A Lock context manager implemented via redis SETNX/BLPOP.
113 107 """
114 108 unlock_script = None
115 109 extend_script = None
116 110 reset_script = None
117 111 reset_all_script = None
118 112
119 113 def __init__(self, redis_client, name, expire=None, id=None, auto_renewal=False, strict=True, signal_expire=1000):
120 114 """
121 115 :param redis_client:
122 116 An instance of :class:`~StrictRedis`.
123 117 :param name:
124 118 The name (redis key) the lock should have.
125 119 :param expire:
126 120 The lock expiry time in seconds. If left at the default (None)
127 121 the lock will not expire.
128 122 :param id:
129 123 The ID (redis value) the lock should have. A random value is
130 124 generated when left at the default.
131 125
132 126 Note that if you specify this then the lock is marked as "held". Acquires
133 127 won't be possible.
134 128 :param auto_renewal:
135 129 If set to ``True``, Lock will automatically renew the lock so that it
136 130 doesn't expire for as long as the lock is held (acquire() called
137 131 or running in a context manager).
138 132
139 133 Implementation note: Renewal will happen using a daemon thread with
140 134 an interval of ``expire*2/3``. If wishing to use a different renewal
141 135 time, subclass Lock, call ``super().__init__()`` then set
142 136 ``self._lock_renewal_interval`` to your desired interval.
143 137 :param strict:
144 138 If set ``True`` then the ``redis_client`` needs to be an instance of ``redis.StrictRedis``.
145 139 :param signal_expire:
146 140 Advanced option to override signal list expiration in milliseconds. Increase it for very slow clients. Default: ``1000``.
147 141 """
148 142 if strict and not isinstance(redis_client, StrictRedis):
149 143 raise ValueError("redis_client must be instance of StrictRedis. "
150 144 "Use strict=False if you know what you're doing.")
151 145 if auto_renewal and expire is None:
152 146 raise ValueError("Expire may not be None when auto_renewal is set")
153 147
154 148 self._client = redis_client
155 149
156 150 if expire:
157 151 expire = int(expire)
158 152 if expire < 0:
159 153 raise ValueError("A negative expire is not acceptable.")
160 154 else:
161 155 expire = None
162 156 self._expire = expire
163 157
164 158 self._signal_expire = signal_expire
165 159 if id is None:
166 160 self._id = b64encode(urandom(18)).decode('ascii')
167 161 elif isinstance(id, binary_type):
168 162 try:
169 163 self._id = id.decode('ascii')
170 164 except UnicodeDecodeError:
171 165 self._id = b64encode(id).decode('ascii')
172 166 elif isinstance(id, text_type):
173 167 self._id = id
174 168 else:
175 169 raise TypeError("Incorrect type for `id`. Must be bytes/str not %s." % type(id))
176 170 self._name = 'lock:' + name
177 171 self._signal = 'lock-signal:' + name
178 172 self._lock_renewal_interval = (float(expire) * 2 / 3
179 173 if auto_renewal
180 174 else None)
181 175 self._lock_renewal_thread = None
182 176
183 177 self.register_scripts(redis_client)
184 178
185 179 @classmethod
186 180 def register_scripts(cls, redis_client):
187 181 global reset_all_script
188 182 if reset_all_script is None:
189 183 reset_all_script = redis_client.register_script(RESET_ALL_SCRIPT)
190 184 cls.unlock_script = redis_client.register_script(UNLOCK_SCRIPT)
191 185 cls.extend_script = redis_client.register_script(EXTEND_SCRIPT)
192 186 cls.reset_script = redis_client.register_script(RESET_SCRIPT)
193 187 cls.reset_all_script = redis_client.register_script(RESET_ALL_SCRIPT)
194 188
195 189 @property
196 190 def _held(self):
197 191 return self.id == self.get_owner_id()
198 192
199 193 def reset(self):
200 194 """
201 195 Forcibly deletes the lock. Use this with care.
202 196 """
203 197 self.reset_script(client=self._client, keys=(self._name, self._signal), args=(self.id, self._signal_expire))
204 198
205 199 @property
206 200 def id(self):
207 201 return self._id
208 202
209 203 def get_owner_id(self):
210 204 owner_id = self._client.get(self._name)
211 205 if isinstance(owner_id, binary_type):
212 206 owner_id = owner_id.decode('ascii', 'replace')
213 207 return owner_id
214 208
215 209 def acquire(self, blocking=True, timeout=None):
216 210 """
217 211 :param blocking:
218 212 Boolean value specifying whether lock should be blocking or not.
219 213 :param timeout:
220 214 An integer value specifying the maximum number of seconds to block.
221 215 """
222 216 logger = loggers["acquire"]
223 217
224 218 logger.debug("Getting blocking: %s acquire on %r ...", blocking, self._name)
225 219
226 220 if self._held:
227 221 owner_id = self.get_owner_id()
228 222 raise AlreadyAcquired("Already acquired from this Lock instance. Lock id: {}".format(owner_id))
229 223
230 224 if not blocking and timeout is not None:
231 225 raise TimeoutNotUsable("Timeout cannot be used if blocking=False")
232 226
233 227 if timeout:
234 228 timeout = int(timeout)
235 229 if timeout < 0:
236 230 raise InvalidTimeout("Timeout (%d) cannot be less than or equal to 0" % timeout)
237 231
238 232 if self._expire and not self._lock_renewal_interval and timeout > self._expire:
239 233 raise TimeoutTooLarge("Timeout (%d) cannot be greater than expire (%d)" % (timeout, self._expire))
240 234
241 235 busy = True
242 236 blpop_timeout = timeout or self._expire or 0
243 237 timed_out = False
244 238 while busy:
245 239 busy = not self._client.set(self._name, self._id, nx=True, ex=self._expire)
246 240 if busy:
247 241 if timed_out:
248 242 return False
249 243 elif blocking:
250 244 timed_out = not self._client.blpop(self._signal, blpop_timeout) and timeout
251 245 else:
252 246 logger.warning("Failed to get %r.", self._name)
253 247 return False
254 248
255 249 logger.debug("Got lock for %r.", self._name)
256 250 if self._lock_renewal_interval is not None:
257 251 self._start_lock_renewer()
258 252 return True
259 253
260 254 def extend(self, expire=None):
261 255 """Extends expiration time of the lock.
262 256
263 257 :param expire:
264 258 New expiration time. If ``None`` - `expire` provided during
265 259 lock initialization will be taken.
266 260 """
267 261 if expire:
268 262 expire = int(expire)
269 263 if expire < 0:
270 264 raise ValueError("A negative expire is not acceptable.")
271 265 elif self._expire is not None:
272 266 expire = self._expire
273 267 else:
274 268 raise TypeError(
275 269 "To extend a lock 'expire' must be provided as an "
276 270 "argument to extend() method or at initialization time."
277 271 )
278 272
279 273 error = self.extend_script(client=self._client, keys=(self._name, self._signal), args=(self._id, expire))
280 274 if error == 1:
281 275 raise NotAcquired("Lock %s is not acquired or it already expired." % self._name)
282 276 elif error == 2:
283 277 raise NotExpirable("Lock %s has no assigned expiration time" % self._name)
284 278 elif error:
285 279 raise RuntimeError("Unsupported error code %s from EXTEND script" % error)
286 280
287 281 @staticmethod
288 282 def _lock_renewer(lockref, interval, stop):
289 283 """
290 284 Renew the lock key in redis every `interval` seconds for as long
291 285 as `self._lock_renewal_thread.should_exit` is False.
292 286 """
293 287 while not stop.wait(timeout=interval):
294 288 loggers["refresh.thread.start"].debug("Refreshing lock")
295 289 lock = lockref()
296 290 if lock is None:
297 291 loggers["refresh.thread.stop"].debug(
298 292 "The lock no longer exists, stopping lock refreshing"
299 293 )
300 294 break
301 295 lock.extend(expire=lock._expire)
302 296 del lock
303 297 loggers["refresh.thread.exit"].debug("Exit requested, stopping lock refreshing")
304 298
305 299 def _start_lock_renewer(self):
306 300 """
307 301 Starts the lock refresher thread.
308 302 """
309 303 if self._lock_renewal_thread is not None:
310 304 raise AlreadyStarted("Lock refresh thread already started")
311 305
312 306 loggers["refresh.start"].debug(
313 307 "Starting thread to refresh lock every %s seconds",
314 308 self._lock_renewal_interval
315 309 )
316 310 self._lock_renewal_stop = threading.Event()
317 311 self._lock_renewal_thread = threading.Thread(
318 312 group=None,
319 313 target=self._lock_renewer,
320 314 kwargs={'lockref': weakref.ref(self),
321 315 'interval': self._lock_renewal_interval,
322 316 'stop': self._lock_renewal_stop}
323 317 )
324 318 self._lock_renewal_thread.setDaemon(True)
325 319 self._lock_renewal_thread.start()
326 320
327 321 def _stop_lock_renewer(self):
328 322 """
329 323 Stop the lock renewer.
330 324
331 325 This signals the renewal thread and waits for its exit.
332 326 """
333 327 if self._lock_renewal_thread is None or not self._lock_renewal_thread.is_alive():
334 328 return
335 329 loggers["refresh.shutdown"].debug("Signalling the lock refresher to stop")
336 330 self._lock_renewal_stop.set()
337 331 self._lock_renewal_thread.join()
338 332 self._lock_renewal_thread = None
339 333 loggers["refresh.exit"].debug("Lock refresher has stopped")
340 334
341 335 def __enter__(self):
342 336 acquired = self.acquire(blocking=True)
343 337 assert acquired, "Lock wasn't acquired, but blocking=True"
344 338 return self
345 339
346 340 def __exit__(self, exc_type=None, exc_value=None, traceback=None):
347 341 self.release()
348 342
349 343 def release(self):
350 344 """Releases the lock, that was acquired with the same object.
351 345
352 346 .. note::
353 347
354 348 If you want to release a lock that you acquired in a different place you have two choices:
355 349
356 350 * Use ``Lock("name", id=id_from_other_place).release()``
357 351 * Use ``Lock("name").reset()``
358 352 """
359 353 if self._lock_renewal_thread is not None:
360 354 self._stop_lock_renewer()
361 355 loggers["release"].debug("Releasing %r.", self._name)
362 356 error = self.unlock_script(client=self._client, keys=(self._name, self._signal), args=(self._id, self._signal_expire))
363 357 if error == 1:
364 358 raise NotAcquired("Lock %s is not acquired or it already expired." % self._name)
365 359 elif error:
366 360 raise RuntimeError("Unsupported error code %s from EXTEND script." % error)
367 361
368 362 def locked(self):
369 363 """
370 364 Return true if the lock is acquired.
371 365
372 366 Checks that lock with same name already exists. This method returns true, even if
373 367 lock have another id.
374 368 """
375 369 return self._client.exists(self._name) == 1
376 370
377 371
378 372 reset_all_script = None
379 373
380 374
381 375 def reset_all(redis_client):
382 376 """
383 377 Forcibly deletes all locks if its remains (like a crash reason). Use this with care.
384 378
385 379 :param redis_client:
386 380 An instance of :class:`~StrictRedis`.
387 381 """
388 382 Lock.register_scripts(redis_client)
389 383
390 384 reset_all_script(client=redis_client) # noqa
@@ -1,75 +1,68 b''
1 1
2 2
3 3 import functools
4
5 # Use timer that's not susceptible to time of day adjustments.
6 try:
7 # perf_counter is only present on Py3.3+
8 from time import perf_counter as time_now
9 except ImportError:
10 # fall back to using time
11 from time import time as time_now
4 from time import perf_counter as time_now
12 5
13 6
14 7 def safe_wraps(wrapper, *args, **kwargs):
15 8 """Safely wraps partial functions."""
16 9 while isinstance(wrapper, functools.partial):
17 10 wrapper = wrapper.func
18 11 return functools.wraps(wrapper, *args, **kwargs)
19 12
20 13
21 14 class Timer(object):
22 15 """A context manager/decorator for statsd.timing()."""
23 16
24 17 def __init__(self, client, stat, rate=1, tags=None, use_decimals=True, auto_send=True):
25 18 self.client = client
26 19 self.stat = stat
27 20 self.rate = rate
28 21 self.tags = tags
29 22 self.ms = None
30 23 self._sent = False
31 24 self._start_time = None
32 25 self.use_decimals = use_decimals
33 26 self.auto_send = auto_send
34 27
35 28 def __call__(self, f):
36 29 """Thread-safe timing function decorator."""
37 30 @safe_wraps(f)
38 31 def _wrapped(*args, **kwargs):
39 32 start_time = time_now()
40 33 try:
41 34 return f(*args, **kwargs)
42 35 finally:
43 36 elapsed_time_ms = 1000.0 * (time_now() - start_time)
44 37 self.client.timing(self.stat, elapsed_time_ms, self.rate, self.tags, self.use_decimals)
45 38 self._sent = True
46 39 return _wrapped
47 40
48 41 def __enter__(self):
49 42 return self.start()
50 43
51 44 def __exit__(self, typ, value, tb):
52 45 self.stop(send=self.auto_send)
53 46
54 47 def start(self):
55 48 self.ms = None
56 49 self._sent = False
57 50 self._start_time = time_now()
58 51 return self
59 52
60 53 def stop(self, send=True):
61 54 if self._start_time is None:
62 55 raise RuntimeError('Timer has not started.')
63 56 dt = time_now() - self._start_time
64 57 self.ms = 1000.0 * dt # Convert to milliseconds.
65 58 if send:
66 59 self.send()
67 60 return self
68 61
69 62 def send(self):
70 63 if self.ms is None:
71 64 raise RuntimeError('No data recorded.')
72 65 if self._sent:
73 66 raise RuntimeError('Already sent data.')
74 67 self._sent = True
75 68 self.client.timing(self.stat, self.ms, self.rate, self.tags, self.use_decimals)
@@ -1,261 +1,207 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 2 # Copyright (C) 2014-2020 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 18 import os
19 19 import time
20 20 import logging
21 21 import functools
22 import decorator
22 23
23 24 from dogpile.cache import CacheRegion
24 25
25 26 from vcsserver.utils import safe_str, sha1
26 27 from vcsserver.lib.rc_cache import region_meta
27 28
28 29 log = logging.getLogger(__name__)
29 30
30 31
31 32 class RhodeCodeCacheRegion(CacheRegion):
32 33
33 34 def conditional_cache_on_arguments(
34 35 self, namespace=None,
35 36 expiration_time=None,
36 37 should_cache_fn=None,
37 38 to_str=str,
38 39 function_key_generator=None,
39 40 condition=True):
40 41 """
41 42 Custom conditional decorator, that will not touch any dogpile internals if
42 43 condition isn't meet. This works a bit different than should_cache_fn
43 44 And it's faster in cases we don't ever want to compute cached values
44 45 """
45 46 expiration_time_is_callable = callable(expiration_time)
46 47
47 48 if function_key_generator is None:
48 49 function_key_generator = self.function_key_generator
49 50
50 # workaround for py2 and cython problems, this block should be removed
51 # once we've migrated to py3
52 if 'cython' == 'cython':
53 def decorator(fn):
54 if to_str is str:
55 # backwards compatible
56 key_generator = function_key_generator(namespace, fn)
57 else:
58 key_generator = function_key_generator(namespace, fn, to_str=to_str)
59
60 @functools.wraps(fn)
61 def decorate(*arg, **kw):
62 key = key_generator(*arg, **kw)
63
64 @functools.wraps(fn)
65 def creator():
66 return fn(*arg, **kw)
67
68 if not condition:
69 return creator()
70
71 timeout = expiration_time() if expiration_time_is_callable \
72 else expiration_time
73
74 return self.get_or_create(key, creator, timeout, should_cache_fn)
75
76 def invalidate(*arg, **kw):
77 key = key_generator(*arg, **kw)
78 self.delete(key)
79
80 def set_(value, *arg, **kw):
81 key = key_generator(*arg, **kw)
82 self.set(key, value)
83
84 def get(*arg, **kw):
85 key = key_generator(*arg, **kw)
86 return self.get(key)
87
88 def refresh(*arg, **kw):
89 key = key_generator(*arg, **kw)
90 value = fn(*arg, **kw)
91 self.set(key, value)
92 return value
93
94 decorate.set = set_
95 decorate.invalidate = invalidate
96 decorate.refresh = refresh
97 decorate.get = get
98 decorate.original = fn
99 decorate.key_generator = key_generator
100 decorate.__wrapped__ = fn
101
102 return decorate
103 return decorator
104
105 51 def get_or_create_for_user_func(key_generator, user_func, *arg, **kw):
106 52
107 53 if not condition:
108 54 log.debug('Calling un-cached method:%s', user_func.__name__)
109 55 start = time.time()
110 56 result = user_func(*arg, **kw)
111 57 total = time.time() - start
112 58 log.debug('un-cached method:%s took %.4fs', user_func.__name__, total)
113 59 return result
114 60
115 61 key = key_generator(*arg, **kw)
116 62
117 63 timeout = expiration_time() if expiration_time_is_callable \
118 64 else expiration_time
119 65
120 66 log.debug('Calling cached method:`%s`', user_func.__name__)
121 67 return self.get_or_create(key, user_func, timeout, should_cache_fn, (arg, kw))
122 68
123 69 def cache_decorator(user_func):
124 70 if to_str is str:
125 71 # backwards compatible
126 72 key_generator = function_key_generator(namespace, user_func)
127 73 else:
128 74 key_generator = function_key_generator(namespace, user_func, to_str=to_str)
129 75
130 76 def refresh(*arg, **kw):
131 77 """
132 78 Like invalidate, but regenerates the value instead
133 79 """
134 80 key = key_generator(*arg, **kw)
135 81 value = user_func(*arg, **kw)
136 82 self.set(key, value)
137 83 return value
138 84
139 85 def invalidate(*arg, **kw):
140 86 key = key_generator(*arg, **kw)
141 87 self.delete(key)
142 88
143 89 def set_(value, *arg, **kw):
144 90 key = key_generator(*arg, **kw)
145 91 self.set(key, value)
146 92
147 93 def get(*arg, **kw):
148 94 key = key_generator(*arg, **kw)
149 95 return self.get(key)
150 96
151 97 user_func.set = set_
152 98 user_func.invalidate = invalidate
153 99 user_func.get = get
154 100 user_func.refresh = refresh
155 101 user_func.key_generator = key_generator
156 102 user_func.original = user_func
157 103
158 104 # Use `decorate` to preserve the signature of :param:`user_func`.
159 105 return decorator.decorate(user_func, functools.partial(
160 106 get_or_create_for_user_func, key_generator))
161 107
162 108 return cache_decorator
163 109
164 110
165 111 def make_region(*arg, **kw):
166 112 return RhodeCodeCacheRegion(*arg, **kw)
167 113
168 114
169 115 def get_default_cache_settings(settings, prefixes=None):
170 116 prefixes = prefixes or []
171 117 cache_settings = {}
172 118 for key in settings.keys():
173 119 for prefix in prefixes:
174 120 if key.startswith(prefix):
175 121 name = key.split(prefix)[1].strip()
176 122 val = settings[key]
177 123 if isinstance(val, str):
178 124 val = val.strip()
179 125 cache_settings[name] = val
180 126 return cache_settings
181 127
182 128
183 129 def compute_key_from_params(*args):
184 130 """
185 131 Helper to compute key from given params to be used in cache manager
186 132 """
187 133 return sha1("_".join(map(safe_str, args)))
188 134
189 135
190 136 def backend_key_generator(backend):
191 137 """
192 138 Special wrapper that also sends over the backend to the key generator
193 139 """
194 140 def wrapper(namespace, fn):
195 141 return key_generator(backend, namespace, fn)
196 142 return wrapper
197 143
198 144
199 145 def key_generator(backend, namespace, fn):
200 146 fname = fn.__name__
201 147
202 148 def generate_key(*args):
203 149 backend_prefix = getattr(backend, 'key_prefix', None) or 'backend_prefix'
204 150 namespace_pref = namespace or 'default_namespace'
205 151 arg_key = compute_key_from_params(*args)
206 152 final_key = "{}:{}:{}_{}".format(backend_prefix, namespace_pref, fname, arg_key)
207 153
208 154 return final_key
209 155
210 156 return generate_key
211 157
212 158
213 159 def get_or_create_region(region_name, region_namespace=None):
214 160 from vcsserver.lib.rc_cache.backends import FileNamespaceBackend
215 161 region_obj = region_meta.dogpile_cache_regions.get(region_name)
216 162 if not region_obj:
217 163 raise EnvironmentError(
218 164 'Region `{}` not in configured: {}.'.format(
219 165 region_name, region_meta.dogpile_cache_regions.keys()))
220 166
221 167 region_uid_name = '{}:{}'.format(region_name, region_namespace)
222 168 if isinstance(region_obj.actual_backend, FileNamespaceBackend):
223 169 region_exist = region_meta.dogpile_cache_regions.get(region_namespace)
224 170 if region_exist:
225 171 log.debug('Using already configured region: %s', region_namespace)
226 172 return region_exist
227 173 cache_dir = region_meta.dogpile_config_defaults['cache_dir']
228 174 expiration_time = region_obj.expiration_time
229 175
230 176 if not os.path.isdir(cache_dir):
231 177 os.makedirs(cache_dir)
232 178 new_region = make_region(
233 179 name=region_uid_name,
234 180 function_key_generator=backend_key_generator(region_obj.actual_backend)
235 181 )
236 182 namespace_filename = os.path.join(
237 183 cache_dir, "{}.cache.dbm".format(region_namespace))
238 184 # special type that allows 1db per namespace
239 185 new_region.configure(
240 186 backend='dogpile.cache.rc.file_namespace',
241 187 expiration_time=expiration_time,
242 188 arguments={"filename": namespace_filename}
243 189 )
244 190
245 191 # create and save in region caches
246 192 log.debug('configuring new region: %s', region_uid_name)
247 193 region_obj = region_meta.dogpile_cache_regions[region_namespace] = new_region
248 194
249 195 return region_obj
250 196
251 197
252 198 def clear_cache_namespace(cache_region, cache_namespace_uid, invalidate=False):
253 199 region = get_or_create_region(cache_region, cache_namespace_uid)
254 200 cache_keys = region.backend.list_keys(prefix=cache_namespace_uid)
255 201 num_delete_keys = len(cache_keys)
256 202 if invalidate:
257 203 region.invalidate(hard=False)
258 204 else:
259 205 if num_delete_keys:
260 206 region.delete_multi(cache_keys)
261 207 return num_delete_keys
General Comments 0
You need to be logged in to leave comments. Login now