##// END OF EJS Templates
cache: allow controlling lock_auto_renewal via .ini config
super-admin -
r949:6224e9fb default
parent child Browse files
Show More
@@ -1,206 +1,209 b''
1 ## -*- coding: utf-8 -*-
1 ## -*- coding: utf-8 -*-
2
2
3 ; #################################
3 ; #################################
4 ; RHODECODE VCSSERVER CONFIGURATION
4 ; RHODECODE VCSSERVER CONFIGURATION
5 ; #################################
5 ; #################################
6
6
7 [server:main]
7 [server:main]
8 ; COMMON HOST/IP CONFIG
8 ; COMMON HOST/IP CONFIG
9 host = 127.0.0.1
9 host = 127.0.0.1
10 port = 9900
10 port = 9900
11
11
12
12
13 ; ###########################
13 ; ###########################
14 ; GUNICORN APPLICATION SERVER
14 ; GUNICORN APPLICATION SERVER
15 ; ###########################
15 ; ###########################
16
16
17 ; run with gunicorn --log-config rhodecode.ini --paste rhodecode.ini
17 ; run with gunicorn --log-config rhodecode.ini --paste rhodecode.ini
18
18
19 ; Module to use, this setting shouldn't be changed
19 ; Module to use, this setting shouldn't be changed
20 use = egg:gunicorn#main
20 use = egg:gunicorn#main
21
21
22 ; Sets the number of process workers. More workers means more concurrent connections
22 ; Sets the number of process workers. More workers means more concurrent connections
23 ; RhodeCode can handle at the same time. Each additional worker also it increases
23 ; RhodeCode can handle at the same time. Each additional worker also it increases
24 ; memory usage as each has it's own set of caches.
24 ; memory usage as each has it's own set of caches.
25 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
25 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
26 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
26 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
27 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
27 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
28 ; when using more than 1 worker.
28 ; when using more than 1 worker.
29 workers = 2
29 workers = 2
30
30
31 ; Gunicorn access log level
31 ; Gunicorn access log level
32 loglevel = info
32 loglevel = info
33
33
34 ; Process name visible in process list
34 ; Process name visible in process list
35 proc_name = rhodecode_vcsserver
35 proc_name = rhodecode_vcsserver
36
36
37 ; Type of worker class, one of `sync`, `gevent`
37 ; Type of worker class, one of `sync`, `gevent`
38 ; currently `sync` is the only option allowed.
38 ; currently `sync` is the only option allowed.
39 worker_class = sync
39 worker_class = sync
40
40
41 ; The maximum number of simultaneous clients. Valid only for gevent
41 ; The maximum number of simultaneous clients. Valid only for gevent
42 worker_connections = 10
42 worker_connections = 10
43
43
44 ; Max number of requests that worker will handle before being gracefully restarted.
44 ; Max number of requests that worker will handle before being gracefully restarted.
45 ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
45 ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
46 max_requests = 1000
46 max_requests = 1000
47 max_requests_jitter = 30
47 max_requests_jitter = 30
48
48
49 ; Amount of time a worker can spend with handling a request before it
49 ; Amount of time a worker can spend with handling a request before it
50 ; gets killed and restarted. By default set to 21600 (6hrs)
50 ; gets killed and restarted. By default set to 21600 (6hrs)
51 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
51 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
52 timeout = 21600
52 timeout = 21600
53
53
54 ; The maximum size of HTTP request line in bytes.
54 ; The maximum size of HTTP request line in bytes.
55 ; 0 for unlimited
55 ; 0 for unlimited
56 limit_request_line = 0
56 limit_request_line = 0
57
57
58 ; Limit the number of HTTP headers fields in a request.
58 ; Limit the number of HTTP headers fields in a request.
59 ; By default this value is 100 and can't be larger than 32768.
59 ; By default this value is 100 and can't be larger than 32768.
60 limit_request_fields = 32768
60 limit_request_fields = 32768
61
61
62 ; Limit the allowed size of an HTTP request header field.
62 ; Limit the allowed size of an HTTP request header field.
63 ; Value is a positive number or 0.
63 ; Value is a positive number or 0.
64 ; Setting it to 0 will allow unlimited header field sizes.
64 ; Setting it to 0 will allow unlimited header field sizes.
65 limit_request_field_size = 0
65 limit_request_field_size = 0
66
66
67 ; Timeout for graceful workers restart.
67 ; Timeout for graceful workers restart.
68 ; After receiving a restart signal, workers have this much time to finish
68 ; After receiving a restart signal, workers have this much time to finish
69 ; serving requests. Workers still alive after the timeout (starting from the
69 ; serving requests. Workers still alive after the timeout (starting from the
70 ; receipt of the restart signal) are force killed.
70 ; receipt of the restart signal) are force killed.
71 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
71 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
72 graceful_timeout = 3600
72 graceful_timeout = 3600
73
73
74 # The number of seconds to wait for requests on a Keep-Alive connection.
74 # The number of seconds to wait for requests on a Keep-Alive connection.
75 # Generally set in the 1-5 seconds range.
75 # Generally set in the 1-5 seconds range.
76 keepalive = 2
76 keepalive = 2
77
77
78 ; Maximum memory usage that each worker can use before it will receive a
78 ; Maximum memory usage that each worker can use before it will receive a
79 ; graceful restart signal 0 = memory monitoring is disabled
79 ; graceful restart signal 0 = memory monitoring is disabled
80 ; Examples: 268435456 (256MB), 536870912 (512MB)
80 ; Examples: 268435456 (256MB), 536870912 (512MB)
81 ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
81 ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
82 memory_max_usage = 0
82 memory_max_usage = 0
83
83
84 ; How often in seconds to check for memory usage for each gunicorn worker
84 ; How often in seconds to check for memory usage for each gunicorn worker
85 memory_usage_check_interval = 60
85 memory_usage_check_interval = 60
86
86
87 ; Threshold value for which we don't recycle worker if GarbageCollection
87 ; Threshold value for which we don't recycle worker if GarbageCollection
88 ; frees up enough resources. Before each restart we try to run GC on worker
88 ; frees up enough resources. Before each restart we try to run GC on worker
89 ; in case we get enough free memory after that, restart will not happen.
89 ; in case we get enough free memory after that, restart will not happen.
90 memory_usage_recovery_threshold = 0.8
90 memory_usage_recovery_threshold = 0.8
91
91
92
92
93 [app:main]
93 [app:main]
94 ; The %(here)s variable will be replaced with the absolute path of parent directory
94 ; The %(here)s variable will be replaced with the absolute path of parent directory
95 ; of this file
95 ; of this file
96 use = egg:rhodecode-vcsserver
96 use = egg:rhodecode-vcsserver
97
97
98 ; Pyramid default locales, we need this to be set
98 ; Pyramid default locales, we need this to be set
99 pyramid.default_locale_name = en
99 pyramid.default_locale_name = en
100
100
101 ; default locale used by VCS systems
101 ; default locale used by VCS systems
102 locale = en_US.UTF-8
102 locale = en_US.UTF-8
103
103
104 ; path to binaries for vcsserver, it should be set by the installer
104 ; path to binaries for vcsserver, it should be set by the installer
105 ; at installation time, e.g /home/user/vcsserver-1/profile/bin
105 ; at installation time, e.g /home/user/vcsserver-1/profile/bin
106 ; it can also be a path to nix-build output in case of development
106 ; it can also be a path to nix-build output in case of development
107 core.binary_dir = ""
107 core.binary_dir = ""
108
108
109 ; Custom exception store path, defaults to TMPDIR
109 ; Custom exception store path, defaults to TMPDIR
110 ; This is used to store exception from RhodeCode in shared directory
110 ; This is used to store exception from RhodeCode in shared directory
111 #exception_tracker.store_path =
111 #exception_tracker.store_path =
112
112
113 ; #############
113 ; #############
114 ; DOGPILE CACHE
114 ; DOGPILE CACHE
115 ; #############
115 ; #############
116
116
117 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
117 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
118 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
118 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
119 cache_dir = %(here)s/data
119 cache_dir = %(here)s/data
120
120
121 ; ***************************************
121 ; ***************************************
122 ; `repo_object` cache, default file based
122 ; `repo_object` cache, default file based
123 ; ***************************************
123 ; ***************************************
124
124
125 ; `repo_object` cache settings for vcs methods for repositories
125 ; `repo_object` cache settings for vcs methods for repositories
126 rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
126 rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
127
127
128 ; cache auto-expires after N seconds
128 ; cache auto-expires after N seconds
129 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
129 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
130 rc_cache.repo_object.expiration_time = 2592000
130 rc_cache.repo_object.expiration_time = 2592000
131
131
132 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
132 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
133 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache.db
133 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache.db
134
134
135 ; ***********************************************************
135 ; ***********************************************************
136 ; `repo_object` cache with redis backend
136 ; `repo_object` cache with redis backend
137 ; recommended for larger instance, and for better performance
137 ; recommended for larger instance, and for better performance
138 ; ***********************************************************
138 ; ***********************************************************
139
139
140 ; `repo_object` cache settings for vcs methods for repositories
140 ; `repo_object` cache settings for vcs methods for repositories
141 #rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack
141 #rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack
142
142
143 ; cache auto-expires after N seconds
143 ; cache auto-expires after N seconds
144 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
144 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
145 #rc_cache.repo_object.expiration_time = 2592000
145 #rc_cache.repo_object.expiration_time = 2592000
146
146
147 ; redis_expiration_time needs to be greater then expiration_time
147 ; redis_expiration_time needs to be greater then expiration_time
148 #rc_cache.repo_object.arguments.redis_expiration_time = 3592000
148 #rc_cache.repo_object.arguments.redis_expiration_time = 3592000
149
149
150 #rc_cache.repo_object.arguments.host = localhost
150 #rc_cache.repo_object.arguments.host = localhost
151 #rc_cache.repo_object.arguments.port = 6379
151 #rc_cache.repo_object.arguments.port = 6379
152 #rc_cache.repo_object.arguments.db = 5
152 #rc_cache.repo_object.arguments.db = 5
153 #rc_cache.repo_object.arguments.socket_timeout = 30
153 #rc_cache.repo_object.arguments.socket_timeout = 30
154 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
154 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
155 #rc_cache.repo_object.arguments.distributed_lock = true
155 #rc_cache.repo_object.arguments.distributed_lock = true
156
156
157 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
158 #rc_cache.repo_object.arguments.lock_auto_renewal = true
159
157 ; Statsd client config
160 ; Statsd client config
158 #statsd.enabled = false
161 #statsd.enabled = false
159 #statsd.statsd_host = 0.0.0.0
162 #statsd.statsd_host = 0.0.0.0
160 #statsd.statsd_port = 8125
163 #statsd.statsd_port = 8125
161 #statsd.statsd_prefix =
164 #statsd.statsd_prefix =
162 #statsd.statsd_ipv6 = false
165 #statsd.statsd_ipv6 = false
163
166
164 ; #####################
167 ; #####################
165 ; LOGGING CONFIGURATION
168 ; LOGGING CONFIGURATION
166 ; #####################
169 ; #####################
167 [loggers]
170 [loggers]
168 keys = root, vcsserver
171 keys = root, vcsserver
169
172
170 [handlers]
173 [handlers]
171 keys = console
174 keys = console
172
175
173 [formatters]
176 [formatters]
174 keys = generic
177 keys = generic
175
178
176 ; #######
179 ; #######
177 ; LOGGERS
180 ; LOGGERS
178 ; #######
181 ; #######
179 [logger_root]
182 [logger_root]
180 level = NOTSET
183 level = NOTSET
181 handlers = console
184 handlers = console
182
185
183 [logger_vcsserver]
186 [logger_vcsserver]
184 level = DEBUG
187 level = DEBUG
185 handlers =
188 handlers =
186 qualname = vcsserver
189 qualname = vcsserver
187 propagate = 1
190 propagate = 1
188
191
189
192
190 ; ########
193 ; ########
191 ; HANDLERS
194 ; HANDLERS
192 ; ########
195 ; ########
193
196
194 [handler_console]
197 [handler_console]
195 class = StreamHandler
198 class = StreamHandler
196 args = (sys.stderr, )
199 args = (sys.stderr, )
197 level = INFO
200 level = INFO
198 formatter = generic
201 formatter = generic
199
202
200 ; ##########
203 ; ##########
201 ; FORMATTERS
204 ; FORMATTERS
202 ; ##########
205 ; ##########
203
206
204 [formatter_generic]
207 [formatter_generic]
205 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
208 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
206 datefmt = %Y-%m-%d %H:%M:%S
209 datefmt = %Y-%m-%d %H:%M:%S
@@ -1,307 +1,311 b''
1 # RhodeCode VCSServer provides access to different vcs backends via network.
1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 # Copyright (C) 2014-2020 RhodeCode GmbH
2 # Copyright (C) 2014-2020 RhodeCode GmbH
3 #
3 #
4 # This program is free software; you can redistribute it and/or modify
4 # This program is free software; you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation; either version 3 of the License, or
6 # the Free Software Foundation; either version 3 of the License, or
7 # (at your option) any later version.
7 # (at your option) any later version.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU General Public License
14 # You should have received a copy of the GNU General Public License
15 # along with this program; if not, write to the Free Software Foundation,
15 # along with this program; if not, write to the Free Software Foundation,
16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
17
18 import time
18 import time
19 import errno
19 import errno
20 import logging
20 import logging
21
21
22 import msgpack
22 import msgpack
23 import redis
23 import redis
24
24
25 from dogpile.cache.api import CachedValue
25 from dogpile.cache.api import CachedValue
26 from dogpile.cache.backends import memory as memory_backend
26 from dogpile.cache.backends import memory as memory_backend
27 from dogpile.cache.backends import file as file_backend
27 from dogpile.cache.backends import file as file_backend
28 from dogpile.cache.backends import redis as redis_backend
28 from dogpile.cache.backends import redis as redis_backend
29 from dogpile.cache.backends.file import NO_VALUE, compat, FileLock
29 from dogpile.cache.backends.file import NO_VALUE, compat, FileLock
30 from dogpile.cache.util import memoized_property
30 from dogpile.cache.util import memoized_property
31
31
32 from vcsserver.lib.memory_lru_dict import LRUDict, LRUDictDebug
32 from vcsserver.lib.memory_lru_dict import LRUDict, LRUDictDebug
33
33
34
34
35 _default_max_size = 1024
35 _default_max_size = 1024
36
36
37 log = logging.getLogger(__name__)
37 log = logging.getLogger(__name__)
38
38
39
39
40 class LRUMemoryBackend(memory_backend.MemoryBackend):
40 class LRUMemoryBackend(memory_backend.MemoryBackend):
41 key_prefix = 'lru_mem_backend'
41 key_prefix = 'lru_mem_backend'
42 pickle_values = False
42 pickle_values = False
43
43
44 def __init__(self, arguments):
44 def __init__(self, arguments):
45 max_size = arguments.pop('max_size', _default_max_size)
45 max_size = arguments.pop('max_size', _default_max_size)
46
46
47 LRUDictClass = LRUDict
47 LRUDictClass = LRUDict
48 if arguments.pop('log_key_count', None):
48 if arguments.pop('log_key_count', None):
49 LRUDictClass = LRUDictDebug
49 LRUDictClass = LRUDictDebug
50
50
51 arguments['cache_dict'] = LRUDictClass(max_size)
51 arguments['cache_dict'] = LRUDictClass(max_size)
52 super(LRUMemoryBackend, self).__init__(arguments)
52 super(LRUMemoryBackend, self).__init__(arguments)
53
53
54 def delete(self, key):
54 def delete(self, key):
55 try:
55 try:
56 del self._cache[key]
56 del self._cache[key]
57 except KeyError:
57 except KeyError:
58 # we don't care if key isn't there at deletion
58 # we don't care if key isn't there at deletion
59 pass
59 pass
60
60
61 def delete_multi(self, keys):
61 def delete_multi(self, keys):
62 for key in keys:
62 for key in keys:
63 self.delete(key)
63 self.delete(key)
64
64
65
65
66 class PickleSerializer(object):
66 class PickleSerializer(object):
67
67
68 def _dumps(self, value, safe=False):
68 def _dumps(self, value, safe=False):
69 try:
69 try:
70 return compat.pickle.dumps(value)
70 return compat.pickle.dumps(value)
71 except Exception:
71 except Exception:
72 if safe:
72 if safe:
73 return NO_VALUE
73 return NO_VALUE
74 else:
74 else:
75 raise
75 raise
76
76
77 def _loads(self, value, safe=True):
77 def _loads(self, value, safe=True):
78 try:
78 try:
79 return compat.pickle.loads(value)
79 return compat.pickle.loads(value)
80 except Exception:
80 except Exception:
81 if safe:
81 if safe:
82 return NO_VALUE
82 return NO_VALUE
83 else:
83 else:
84 raise
84 raise
85
85
86
86
87 class MsgPackSerializer(object):
87 class MsgPackSerializer(object):
88
88
89 def _dumps(self, value, safe=False):
89 def _dumps(self, value, safe=False):
90 try:
90 try:
91 return msgpack.packb(value)
91 return msgpack.packb(value)
92 except Exception:
92 except Exception:
93 if safe:
93 if safe:
94 return NO_VALUE
94 return NO_VALUE
95 else:
95 else:
96 raise
96 raise
97
97
98 def _loads(self, value, safe=True):
98 def _loads(self, value, safe=True):
99 """
99 """
100 pickle maintained the `CachedValue` wrapper of the tuple
100 pickle maintained the `CachedValue` wrapper of the tuple
101 msgpack does not, so it must be added back in.
101 msgpack does not, so it must be added back in.
102 """
102 """
103 try:
103 try:
104 value = msgpack.unpackb(value, use_list=False)
104 value = msgpack.unpackb(value, use_list=False)
105 return CachedValue(*value)
105 return CachedValue(*value)
106 except Exception:
106 except Exception:
107 if safe:
107 if safe:
108 return NO_VALUE
108 return NO_VALUE
109 else:
109 else:
110 raise
110 raise
111
111
112
112
113 import fcntl
113 import fcntl
114 flock_org = fcntl.flock
114 flock_org = fcntl.flock
115
115
116
116
117 class CustomLockFactory(FileLock):
117 class CustomLockFactory(FileLock):
118
118
119 pass
119 pass
120
120
121
121
122 class FileNamespaceBackend(PickleSerializer, file_backend.DBMBackend):
122 class FileNamespaceBackend(PickleSerializer, file_backend.DBMBackend):
123 key_prefix = 'file_backend'
123 key_prefix = 'file_backend'
124
124
125 def __init__(self, arguments):
125 def __init__(self, arguments):
126 arguments['lock_factory'] = CustomLockFactory
126 arguments['lock_factory'] = CustomLockFactory
127 db_file = arguments.get('filename')
127 db_file = arguments.get('filename')
128
128
129 log.debug('initialing %s DB in %s', self.__class__.__name__, db_file)
129 log.debug('initialing %s DB in %s', self.__class__.__name__, db_file)
130 try:
130 try:
131 super(FileNamespaceBackend, self).__init__(arguments)
131 super(FileNamespaceBackend, self).__init__(arguments)
132 except Exception:
132 except Exception:
133 log.error('Failed to initialize db at: %s', db_file)
133 log.error('Failed to initialize db at: %s', db_file)
134 raise
134 raise
135
135
136 def __repr__(self):
136 def __repr__(self):
137 return '{} `{}`'.format(self.__class__, self.filename)
137 return '{} `{}`'.format(self.__class__, self.filename)
138
138
139 def list_keys(self, prefix=''):
139 def list_keys(self, prefix=''):
140 prefix = '{}:{}'.format(self.key_prefix, prefix)
140 prefix = '{}:{}'.format(self.key_prefix, prefix)
141
141
142 def cond(v):
142 def cond(v):
143 if not prefix:
143 if not prefix:
144 return True
144 return True
145
145
146 if v.startswith(prefix):
146 if v.startswith(prefix):
147 return True
147 return True
148 return False
148 return False
149
149
150 with self._dbm_file(True) as dbm:
150 with self._dbm_file(True) as dbm:
151 try:
151 try:
152 return filter(cond, dbm.keys())
152 return filter(cond, dbm.keys())
153 except Exception:
153 except Exception:
154 log.error('Failed to fetch DBM keys from DB: %s', self.get_store())
154 log.error('Failed to fetch DBM keys from DB: %s', self.get_store())
155 raise
155 raise
156
156
157 def get_store(self):
157 def get_store(self):
158 return self.filename
158 return self.filename
159
159
160 def _dbm_get(self, key):
160 def _dbm_get(self, key):
161 with self._dbm_file(False) as dbm:
161 with self._dbm_file(False) as dbm:
162 if hasattr(dbm, 'get'):
162 if hasattr(dbm, 'get'):
163 value = dbm.get(key, NO_VALUE)
163 value = dbm.get(key, NO_VALUE)
164 else:
164 else:
165 # gdbm objects lack a .get method
165 # gdbm objects lack a .get method
166 try:
166 try:
167 value = dbm[key]
167 value = dbm[key]
168 except KeyError:
168 except KeyError:
169 value = NO_VALUE
169 value = NO_VALUE
170 if value is not NO_VALUE:
170 if value is not NO_VALUE:
171 value = self._loads(value)
171 value = self._loads(value)
172 return value
172 return value
173
173
174 def get(self, key):
174 def get(self, key):
175 try:
175 try:
176 return self._dbm_get(key)
176 return self._dbm_get(key)
177 except Exception:
177 except Exception:
178 log.error('Failed to fetch DBM key %s from DB: %s', key, self.get_store())
178 log.error('Failed to fetch DBM key %s from DB: %s', key, self.get_store())
179 raise
179 raise
180
180
181 def set(self, key, value):
181 def set(self, key, value):
182 with self._dbm_file(True) as dbm:
182 with self._dbm_file(True) as dbm:
183 dbm[key] = self._dumps(value)
183 dbm[key] = self._dumps(value)
184
184
185 def set_multi(self, mapping):
185 def set_multi(self, mapping):
186 with self._dbm_file(True) as dbm:
186 with self._dbm_file(True) as dbm:
187 for key, value in mapping.items():
187 for key, value in mapping.items():
188 dbm[key] = self._dumps(value)
188 dbm[key] = self._dumps(value)
189
189
190
190
191 class BaseRedisBackend(redis_backend.RedisBackend):
191 class BaseRedisBackend(redis_backend.RedisBackend):
192 key_prefix = ''
193
194 def __init__(self, arguments):
195 super(BaseRedisBackend, self).__init__(arguments)
196 self._lock_timeout = self.lock_timeout
197 self._lock_auto_renewal = arguments.pop("lock_auto_renewal", False)
198
199 if self._lock_auto_renewal and not self._lock_timeout:
200 # set default timeout for auto_renewal
201 self._lock_timeout = 60
192
202
193 def _create_client(self):
203 def _create_client(self):
194 args = {}
204 args = {}
195
205
196 if self.url is not None:
206 if self.url is not None:
197 args.update(url=self.url)
207 args.update(url=self.url)
198
208
199 else:
209 else:
200 args.update(
210 args.update(
201 host=self.host, password=self.password,
211 host=self.host, password=self.password,
202 port=self.port, db=self.db
212 port=self.port, db=self.db
203 )
213 )
204
214
205 connection_pool = redis.ConnectionPool(**args)
215 connection_pool = redis.ConnectionPool(**args)
206
216
207 return redis.StrictRedis(connection_pool=connection_pool)
217 return redis.StrictRedis(connection_pool=connection_pool)
208
218
209 def list_keys(self, prefix=''):
219 def list_keys(self, prefix=''):
210 prefix = '{}:{}*'.format(self.key_prefix, prefix)
220 prefix = '{}:{}*'.format(self.key_prefix, prefix)
211 return self.client.keys(prefix)
221 return self.client.keys(prefix)
212
222
213 def get_store(self):
223 def get_store(self):
214 return self.client.connection_pool
224 return self.client.connection_pool
215
225
216 def get(self, key):
226 def get(self, key):
217 value = self.client.get(key)
227 value = self.client.get(key)
218 if value is None:
228 if value is None:
219 return NO_VALUE
229 return NO_VALUE
220 return self._loads(value)
230 return self._loads(value)
221
231
222 def get_multi(self, keys):
232 def get_multi(self, keys):
223 if not keys:
233 if not keys:
224 return []
234 return []
225 values = self.client.mget(keys)
235 values = self.client.mget(keys)
226 loads = self._loads
236 loads = self._loads
227 return [
237 return [
228 loads(v) if v is not None else NO_VALUE
238 loads(v) if v is not None else NO_VALUE
229 for v in values]
239 for v in values]
230
240
231 def set(self, key, value):
241 def set(self, key, value):
232 if self.redis_expiration_time:
242 if self.redis_expiration_time:
233 self.client.setex(key, self.redis_expiration_time,
243 self.client.setex(key, self.redis_expiration_time,
234 self._dumps(value))
244 self._dumps(value))
235 else:
245 else:
236 self.client.set(key, self._dumps(value))
246 self.client.set(key, self._dumps(value))
237
247
238 def set_multi(self, mapping):
248 def set_multi(self, mapping):
239 dumps = self._dumps
249 dumps = self._dumps
240 mapping = dict(
250 mapping = dict(
241 (k, dumps(v))
251 (k, dumps(v))
242 for k, v in mapping.items()
252 for k, v in mapping.items()
243 )
253 )
244
254
245 if not self.redis_expiration_time:
255 if not self.redis_expiration_time:
246 self.client.mset(mapping)
256 self.client.mset(mapping)
247 else:
257 else:
248 pipe = self.client.pipeline()
258 pipe = self.client.pipeline()
249 for key, value in mapping.items():
259 for key, value in mapping.items():
250 pipe.setex(key, self.redis_expiration_time, value)
260 pipe.setex(key, self.redis_expiration_time, value)
251 pipe.execute()
261 pipe.execute()
252
262
253 def get_mutex(self, key):
263 def get_mutex(self, key):
254 if self.distributed_lock:
264 if self.distributed_lock:
255 lock_key = redis_backend.u('_lock_{0}').format(key)
265 lock_key = redis_backend.u('_lock_{0}').format(key)
256 log.debug('Trying to acquire Redis lock for key %s', lock_key)
266 log.debug('Trying to acquire Redis lock for key %s', lock_key)
257
267 return get_mutex_lock(self.client, lock_key, self._lock_timeout,
258 auto_renewal = True
268 auto_renewal=self._lock_auto_renewal)
259 lock_timeout = self.lock_timeout
260 if auto_renewal and not self.lock_timeout:
261 # set default timeout for auto_renewal
262 lock_timeout = 10
263 return get_mutex_lock(self.client, lock_key, lock_timeout,
264 auto_renewal=auto_renewal)
265 else:
269 else:
266 return None
270 return None
267
271
268
272
269 class RedisPickleBackend(PickleSerializer, BaseRedisBackend):
273 class RedisPickleBackend(PickleSerializer, BaseRedisBackend):
270 key_prefix = 'redis_pickle_backend'
274 key_prefix = 'redis_pickle_backend'
271 pass
275 pass
272
276
273
277
274 class RedisMsgPackBackend(MsgPackSerializer, BaseRedisBackend):
278 class RedisMsgPackBackend(MsgPackSerializer, BaseRedisBackend):
275 key_prefix = 'redis_msgpack_backend'
279 key_prefix = 'redis_msgpack_backend'
276 pass
280 pass
277
281
278
282
279 def get_mutex_lock(client, lock_key, lock_timeout, auto_renewal=False):
283 def get_mutex_lock(client, lock_key, lock_timeout, auto_renewal=False):
280 import redis_lock
284 import redis_lock
281
285
282 class _RedisLockWrapper(object):
286 class _RedisLockWrapper(object):
283 """LockWrapper for redis_lock"""
287 """LockWrapper for redis_lock"""
284
288
285 def __init__(self):
289 def __init__(self):
286 pass
290 pass
287
291
288 @property
292 @property
289 def lock(self):
293 def lock(self):
290 return redis_lock.Lock(
294 return redis_lock.Lock(
291 redis_client=client,
295 redis_client=client,
292 name=lock_key,
296 name=lock_key,
293 expire=lock_timeout,
297 expire=lock_timeout,
294 auto_renewal=auto_renewal,
298 auto_renewal=auto_renewal,
295 strict=True,
299 strict=True,
296 )
300 )
297
301
298 def acquire(self, wait=True):
302 def acquire(self, wait=True):
299 return self.lock.acquire(wait)
303 return self.lock.acquire(wait)
300
304
301 def release(self):
305 def release(self):
302 try:
306 try:
303 self.lock.release()
307 self.lock.release()
304 except redis_lock.NotAcquired:
308 except redis_lock.NotAcquired:
305 pass
309 pass
306
310
307 return _RedisLockWrapper()
311 return _RedisLockWrapper()
General Comments 0
You need to be logged in to leave comments. Login now