##// END OF EJS Templates
cache: allow controlling lock_auto_renewal via .ini config
super-admin -
r949:6224e9fb default
parent child Browse files
Show More
@@ -1,206 +1,209 b''
1 1 ## -*- coding: utf-8 -*-
2 2
3 3 ; #################################
4 4 ; RHODECODE VCSSERVER CONFIGURATION
5 5 ; #################################
6 6
7 7 [server:main]
8 8 ; COMMON HOST/IP CONFIG
9 9 host = 127.0.0.1
10 10 port = 9900
11 11
12 12
13 13 ; ###########################
14 14 ; GUNICORN APPLICATION SERVER
15 15 ; ###########################
16 16
17 17 ; run with gunicorn --log-config rhodecode.ini --paste rhodecode.ini
18 18
19 19 ; Module to use, this setting shouldn't be changed
20 20 use = egg:gunicorn#main
21 21
22 22 ; Sets the number of process workers. More workers means more concurrent connections
23 23 ; RhodeCode can handle at the same time. Each additional worker also it increases
24 24 ; memory usage as each has it's own set of caches.
25 25 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
26 26 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
27 27 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
28 28 ; when using more than 1 worker.
29 29 workers = 2
30 30
31 31 ; Gunicorn access log level
32 32 loglevel = info
33 33
34 34 ; Process name visible in process list
35 35 proc_name = rhodecode_vcsserver
36 36
37 37 ; Type of worker class, one of `sync`, `gevent`
38 38 ; currently `sync` is the only option allowed.
39 39 worker_class = sync
40 40
41 41 ; The maximum number of simultaneous clients. Valid only for gevent
42 42 worker_connections = 10
43 43
44 44 ; Max number of requests that worker will handle before being gracefully restarted.
45 45 ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
46 46 max_requests = 1000
47 47 max_requests_jitter = 30
48 48
49 49 ; Amount of time a worker can spend with handling a request before it
50 50 ; gets killed and restarted. By default set to 21600 (6hrs)
51 51 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
52 52 timeout = 21600
53 53
54 54 ; The maximum size of HTTP request line in bytes.
55 55 ; 0 for unlimited
56 56 limit_request_line = 0
57 57
58 58 ; Limit the number of HTTP headers fields in a request.
59 59 ; By default this value is 100 and can't be larger than 32768.
60 60 limit_request_fields = 32768
61 61
62 62 ; Limit the allowed size of an HTTP request header field.
63 63 ; Value is a positive number or 0.
64 64 ; Setting it to 0 will allow unlimited header field sizes.
65 65 limit_request_field_size = 0
66 66
67 67 ; Timeout for graceful workers restart.
68 68 ; After receiving a restart signal, workers have this much time to finish
69 69 ; serving requests. Workers still alive after the timeout (starting from the
70 70 ; receipt of the restart signal) are force killed.
71 71 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
72 72 graceful_timeout = 3600
73 73
74 74 # The number of seconds to wait for requests on a Keep-Alive connection.
75 75 # Generally set in the 1-5 seconds range.
76 76 keepalive = 2
77 77
78 78 ; Maximum memory usage that each worker can use before it will receive a
79 79 ; graceful restart signal 0 = memory monitoring is disabled
80 80 ; Examples: 268435456 (256MB), 536870912 (512MB)
81 81 ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
82 82 memory_max_usage = 0
83 83
84 84 ; How often in seconds to check for memory usage for each gunicorn worker
85 85 memory_usage_check_interval = 60
86 86
87 87 ; Threshold value for which we don't recycle worker if GarbageCollection
88 88 ; frees up enough resources. Before each restart we try to run GC on worker
89 89 ; in case we get enough free memory after that, restart will not happen.
90 90 memory_usage_recovery_threshold = 0.8
91 91
92 92
93 93 [app:main]
94 94 ; The %(here)s variable will be replaced with the absolute path of parent directory
95 95 ; of this file
96 96 use = egg:rhodecode-vcsserver
97 97
98 98 ; Pyramid default locales, we need this to be set
99 99 pyramid.default_locale_name = en
100 100
101 101 ; default locale used by VCS systems
102 102 locale = en_US.UTF-8
103 103
104 104 ; path to binaries for vcsserver, it should be set by the installer
105 105 ; at installation time, e.g /home/user/vcsserver-1/profile/bin
106 106 ; it can also be a path to nix-build output in case of development
107 107 core.binary_dir = ""
108 108
109 109 ; Custom exception store path, defaults to TMPDIR
110 110 ; This is used to store exception from RhodeCode in shared directory
111 111 #exception_tracker.store_path =
112 112
113 113 ; #############
114 114 ; DOGPILE CACHE
115 115 ; #############
116 116
117 117 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
118 118 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
119 119 cache_dir = %(here)s/data
120 120
121 121 ; ***************************************
122 122 ; `repo_object` cache, default file based
123 123 ; ***************************************
124 124
125 125 ; `repo_object` cache settings for vcs methods for repositories
126 126 rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
127 127
128 128 ; cache auto-expires after N seconds
129 129 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
130 130 rc_cache.repo_object.expiration_time = 2592000
131 131
132 132 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
133 133 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache.db
134 134
135 135 ; ***********************************************************
136 136 ; `repo_object` cache with redis backend
137 137 ; recommended for larger instance, and for better performance
138 138 ; ***********************************************************
139 139
140 140 ; `repo_object` cache settings for vcs methods for repositories
141 141 #rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack
142 142
143 143 ; cache auto-expires after N seconds
144 144 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
145 145 #rc_cache.repo_object.expiration_time = 2592000
146 146
147 147 ; redis_expiration_time needs to be greater then expiration_time
148 148 #rc_cache.repo_object.arguments.redis_expiration_time = 3592000
149 149
150 150 #rc_cache.repo_object.arguments.host = localhost
151 151 #rc_cache.repo_object.arguments.port = 6379
152 152 #rc_cache.repo_object.arguments.db = 5
153 153 #rc_cache.repo_object.arguments.socket_timeout = 30
154 154 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
155 155 #rc_cache.repo_object.arguments.distributed_lock = true
156 156
157 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
158 #rc_cache.repo_object.arguments.lock_auto_renewal = true
159
157 160 ; Statsd client config
158 161 #statsd.enabled = false
159 162 #statsd.statsd_host = 0.0.0.0
160 163 #statsd.statsd_port = 8125
161 164 #statsd.statsd_prefix =
162 165 #statsd.statsd_ipv6 = false
163 166
164 167 ; #####################
165 168 ; LOGGING CONFIGURATION
166 169 ; #####################
167 170 [loggers]
168 171 keys = root, vcsserver
169 172
170 173 [handlers]
171 174 keys = console
172 175
173 176 [formatters]
174 177 keys = generic
175 178
176 179 ; #######
177 180 ; LOGGERS
178 181 ; #######
179 182 [logger_root]
180 183 level = NOTSET
181 184 handlers = console
182 185
183 186 [logger_vcsserver]
184 187 level = DEBUG
185 188 handlers =
186 189 qualname = vcsserver
187 190 propagate = 1
188 191
189 192
190 193 ; ########
191 194 ; HANDLERS
192 195 ; ########
193 196
194 197 [handler_console]
195 198 class = StreamHandler
196 199 args = (sys.stderr, )
197 200 level = INFO
198 201 formatter = generic
199 202
200 203 ; ##########
201 204 ; FORMATTERS
202 205 ; ##########
203 206
204 207 [formatter_generic]
205 208 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
206 209 datefmt = %Y-%m-%d %H:%M:%S
@@ -1,307 +1,311 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 2 # Copyright (C) 2014-2020 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 18 import time
19 19 import errno
20 20 import logging
21 21
22 22 import msgpack
23 23 import redis
24 24
25 25 from dogpile.cache.api import CachedValue
26 26 from dogpile.cache.backends import memory as memory_backend
27 27 from dogpile.cache.backends import file as file_backend
28 28 from dogpile.cache.backends import redis as redis_backend
29 29 from dogpile.cache.backends.file import NO_VALUE, compat, FileLock
30 30 from dogpile.cache.util import memoized_property
31 31
32 32 from vcsserver.lib.memory_lru_dict import LRUDict, LRUDictDebug
33 33
34 34
35 35 _default_max_size = 1024
36 36
37 37 log = logging.getLogger(__name__)
38 38
39 39
40 40 class LRUMemoryBackend(memory_backend.MemoryBackend):
41 41 key_prefix = 'lru_mem_backend'
42 42 pickle_values = False
43 43
44 44 def __init__(self, arguments):
45 45 max_size = arguments.pop('max_size', _default_max_size)
46 46
47 47 LRUDictClass = LRUDict
48 48 if arguments.pop('log_key_count', None):
49 49 LRUDictClass = LRUDictDebug
50 50
51 51 arguments['cache_dict'] = LRUDictClass(max_size)
52 52 super(LRUMemoryBackend, self).__init__(arguments)
53 53
54 54 def delete(self, key):
55 55 try:
56 56 del self._cache[key]
57 57 except KeyError:
58 58 # we don't care if key isn't there at deletion
59 59 pass
60 60
61 61 def delete_multi(self, keys):
62 62 for key in keys:
63 63 self.delete(key)
64 64
65 65
66 66 class PickleSerializer(object):
67 67
68 68 def _dumps(self, value, safe=False):
69 69 try:
70 70 return compat.pickle.dumps(value)
71 71 except Exception:
72 72 if safe:
73 73 return NO_VALUE
74 74 else:
75 75 raise
76 76
77 77 def _loads(self, value, safe=True):
78 78 try:
79 79 return compat.pickle.loads(value)
80 80 except Exception:
81 81 if safe:
82 82 return NO_VALUE
83 83 else:
84 84 raise
85 85
86 86
87 87 class MsgPackSerializer(object):
88 88
89 89 def _dumps(self, value, safe=False):
90 90 try:
91 91 return msgpack.packb(value)
92 92 except Exception:
93 93 if safe:
94 94 return NO_VALUE
95 95 else:
96 96 raise
97 97
98 98 def _loads(self, value, safe=True):
99 99 """
100 100 pickle maintained the `CachedValue` wrapper of the tuple
101 101 msgpack does not, so it must be added back in.
102 102 """
103 103 try:
104 104 value = msgpack.unpackb(value, use_list=False)
105 105 return CachedValue(*value)
106 106 except Exception:
107 107 if safe:
108 108 return NO_VALUE
109 109 else:
110 110 raise
111 111
112 112
113 113 import fcntl
114 114 flock_org = fcntl.flock
115 115
116 116
117 117 class CustomLockFactory(FileLock):
118 118
119 119 pass
120 120
121 121
122 122 class FileNamespaceBackend(PickleSerializer, file_backend.DBMBackend):
123 123 key_prefix = 'file_backend'
124 124
125 125 def __init__(self, arguments):
126 126 arguments['lock_factory'] = CustomLockFactory
127 127 db_file = arguments.get('filename')
128 128
129 129 log.debug('initialing %s DB in %s', self.__class__.__name__, db_file)
130 130 try:
131 131 super(FileNamespaceBackend, self).__init__(arguments)
132 132 except Exception:
133 133 log.error('Failed to initialize db at: %s', db_file)
134 134 raise
135 135
136 136 def __repr__(self):
137 137 return '{} `{}`'.format(self.__class__, self.filename)
138 138
139 139 def list_keys(self, prefix=''):
140 140 prefix = '{}:{}'.format(self.key_prefix, prefix)
141 141
142 142 def cond(v):
143 143 if not prefix:
144 144 return True
145 145
146 146 if v.startswith(prefix):
147 147 return True
148 148 return False
149 149
150 150 with self._dbm_file(True) as dbm:
151 151 try:
152 152 return filter(cond, dbm.keys())
153 153 except Exception:
154 154 log.error('Failed to fetch DBM keys from DB: %s', self.get_store())
155 155 raise
156 156
157 157 def get_store(self):
158 158 return self.filename
159 159
160 160 def _dbm_get(self, key):
161 161 with self._dbm_file(False) as dbm:
162 162 if hasattr(dbm, 'get'):
163 163 value = dbm.get(key, NO_VALUE)
164 164 else:
165 165 # gdbm objects lack a .get method
166 166 try:
167 167 value = dbm[key]
168 168 except KeyError:
169 169 value = NO_VALUE
170 170 if value is not NO_VALUE:
171 171 value = self._loads(value)
172 172 return value
173 173
174 174 def get(self, key):
175 175 try:
176 176 return self._dbm_get(key)
177 177 except Exception:
178 178 log.error('Failed to fetch DBM key %s from DB: %s', key, self.get_store())
179 179 raise
180 180
181 181 def set(self, key, value):
182 182 with self._dbm_file(True) as dbm:
183 183 dbm[key] = self._dumps(value)
184 184
185 185 def set_multi(self, mapping):
186 186 with self._dbm_file(True) as dbm:
187 187 for key, value in mapping.items():
188 188 dbm[key] = self._dumps(value)
189 189
190 190
191 191 class BaseRedisBackend(redis_backend.RedisBackend):
192 key_prefix = ''
193
194 def __init__(self, arguments):
195 super(BaseRedisBackend, self).__init__(arguments)
196 self._lock_timeout = self.lock_timeout
197 self._lock_auto_renewal = arguments.pop("lock_auto_renewal", False)
198
199 if self._lock_auto_renewal and not self._lock_timeout:
200 # set default timeout for auto_renewal
201 self._lock_timeout = 60
192 202
193 203 def _create_client(self):
194 204 args = {}
195 205
196 206 if self.url is not None:
197 207 args.update(url=self.url)
198 208
199 209 else:
200 210 args.update(
201 211 host=self.host, password=self.password,
202 212 port=self.port, db=self.db
203 213 )
204 214
205 215 connection_pool = redis.ConnectionPool(**args)
206 216
207 217 return redis.StrictRedis(connection_pool=connection_pool)
208 218
209 219 def list_keys(self, prefix=''):
210 220 prefix = '{}:{}*'.format(self.key_prefix, prefix)
211 221 return self.client.keys(prefix)
212 222
213 223 def get_store(self):
214 224 return self.client.connection_pool
215 225
216 226 def get(self, key):
217 227 value = self.client.get(key)
218 228 if value is None:
219 229 return NO_VALUE
220 230 return self._loads(value)
221 231
222 232 def get_multi(self, keys):
223 233 if not keys:
224 234 return []
225 235 values = self.client.mget(keys)
226 236 loads = self._loads
227 237 return [
228 238 loads(v) if v is not None else NO_VALUE
229 239 for v in values]
230 240
231 241 def set(self, key, value):
232 242 if self.redis_expiration_time:
233 243 self.client.setex(key, self.redis_expiration_time,
234 244 self._dumps(value))
235 245 else:
236 246 self.client.set(key, self._dumps(value))
237 247
238 248 def set_multi(self, mapping):
239 249 dumps = self._dumps
240 250 mapping = dict(
241 251 (k, dumps(v))
242 252 for k, v in mapping.items()
243 253 )
244 254
245 255 if not self.redis_expiration_time:
246 256 self.client.mset(mapping)
247 257 else:
248 258 pipe = self.client.pipeline()
249 259 for key, value in mapping.items():
250 260 pipe.setex(key, self.redis_expiration_time, value)
251 261 pipe.execute()
252 262
253 263 def get_mutex(self, key):
254 264 if self.distributed_lock:
255 265 lock_key = redis_backend.u('_lock_{0}').format(key)
256 266 log.debug('Trying to acquire Redis lock for key %s', lock_key)
257
258 auto_renewal = True
259 lock_timeout = self.lock_timeout
260 if auto_renewal and not self.lock_timeout:
261 # set default timeout for auto_renewal
262 lock_timeout = 10
263 return get_mutex_lock(self.client, lock_key, lock_timeout,
264 auto_renewal=auto_renewal)
267 return get_mutex_lock(self.client, lock_key, self._lock_timeout,
268 auto_renewal=self._lock_auto_renewal)
265 269 else:
266 270 return None
267 271
268 272
269 273 class RedisPickleBackend(PickleSerializer, BaseRedisBackend):
270 274 key_prefix = 'redis_pickle_backend'
271 275 pass
272 276
273 277
274 278 class RedisMsgPackBackend(MsgPackSerializer, BaseRedisBackend):
275 279 key_prefix = 'redis_msgpack_backend'
276 280 pass
277 281
278 282
279 283 def get_mutex_lock(client, lock_key, lock_timeout, auto_renewal=False):
280 284 import redis_lock
281 285
282 286 class _RedisLockWrapper(object):
283 287 """LockWrapper for redis_lock"""
284 288
285 289 def __init__(self):
286 290 pass
287 291
288 292 @property
289 293 def lock(self):
290 294 return redis_lock.Lock(
291 295 redis_client=client,
292 296 name=lock_key,
293 297 expire=lock_timeout,
294 298 auto_renewal=auto_renewal,
295 299 strict=True,
296 300 )
297 301
298 302 def acquire(self, wait=True):
299 303 return self.lock.acquire(wait)
300 304
301 305 def release(self):
302 306 try:
303 307 self.lock.release()
304 308 except redis_lock.NotAcquired:
305 309 pass
306 310
307 311 return _RedisLockWrapper()
General Comments 0
You need to be logged in to leave comments. Login now