##// END OF EJS Templates
configs: moved all gunicorn config to python file
super-admin -
r1143:7071fe53 default
parent child Browse files
Show More
@@ -7,7 +7,7 b''
7 7 [server:main]
8 8 ; COMMON HOST/IP CONFIG
9 9 host = 0.0.0.0
10 port = 9900
10 port = 10010
11 11
12 12 ; ##################################################
13 13 ; WAITRESS WSGI SERVER - Recommended for Development
@@ -36,77 +36,6 b' asyncore_use_poll = true'
36 36 ; Module to use, this setting shouldn't be changed
37 37 #use = egg:gunicorn#main
38 38
39 ; Sets the number of process workers. More workers means more concurrent connections
40 ; RhodeCode can handle at the same time. Each additional worker also it increases
41 ; memory usage as each has it's own set of caches.
42 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
43 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
44 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
45 ; when using more than 1 worker.
46 #workers = 2
47
48 ; Gunicorn access log level
49 #loglevel = info
50
51 ; Process name visible in process list
52 #proc_name = rhodecode_vcsserver
53
54 ; Type of worker class, one of `sync`, `gevent`
55 ; currently `sync` is the only option allowed.
56 #worker_class = sync
57
58 ; The maximum number of simultaneous clients. Valid only for gevent
59 #worker_connections = 10
60
61 ; Max number of requests that worker will handle before being gracefully restarted.
62 ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
63 #max_requests = 1000
64 #max_requests_jitter = 30
65
66 ; Amount of time a worker can spend with handling a request before it
67 ; gets killed and restarted. By default set to 21600 (6hrs)
68 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
69 #timeout = 21600
70
71 ; The maximum size of HTTP request line in bytes.
72 ; 0 for unlimited
73 #limit_request_line = 0
74
75 ; Limit the number of HTTP headers fields in a request.
76 ; By default this value is 100 and can't be larger than 32768.
77 #limit_request_fields = 32768
78
79 ; Limit the allowed size of an HTTP request header field.
80 ; Value is a positive number or 0.
81 ; Setting it to 0 will allow unlimited header field sizes.
82 #limit_request_field_size = 0
83
84 ; Timeout for graceful workers restart.
85 ; After receiving a restart signal, workers have this much time to finish
86 ; serving requests. Workers still alive after the timeout (starting from the
87 ; receipt of the restart signal) are force killed.
88 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
89 #graceful_timeout = 21600
90
91 # The number of seconds to wait for requests on a Keep-Alive connection.
92 # Generally set in the 1-5 seconds range.
93 #keepalive = 2
94
95 ; Maximum memory usage that each worker can use before it will receive a
96 ; graceful restart signal 0 = memory monitoring is disabled
97 ; Examples: 268435456 (256MB), 536870912 (512MB)
98 ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
99 #memory_max_usage = 0
100
101 ; How often in seconds to check for memory usage for each gunicorn worker
102 #memory_usage_check_interval = 60
103
104 ; Threshold value for which we don't recycle worker if GarbageCollection
105 ; frees up enough resources. Before each restart we try to run GC on worker
106 ; in case we get enough free memory after that, restart will not happen.
107 #memory_usage_recovery_threshold = 0.8
108
109
110 39 [app:main]
111 40 ; The %(here)s variable will be replaced with the absolute path of parent directory
112 41 ; of this file
@@ -12,6 +12,7 b' import threading'
12 12 import traceback
13 13 import random
14 14 import socket
15 import dataclasses
15 16 from gunicorn.glogging import Logger
16 17
17 18
@@ -19,8 +20,14 b' def get_workers():'
19 20 import multiprocessing
20 21 return multiprocessing.cpu_count() * 2 + 1
21 22
22 # GLOBAL
23
24 bind = "127.0.0.1:10010"
25
26
27 # Error logging output for gunicorn (-) is stdout
23 28 errorlog = '-'
29
30 # Access logging output for gunicorn (-) is stdout
24 31 accesslog = '-'
25 32
26 33
@@ -30,6 +37,7 b" accesslog = '-'"
30 37 worker_tmp_dir = None
31 38 tmp_upload_dir = None
32 39
40 # use re-use port logic
33 41 #reuse_port = True
34 42
35 43 # Custom log format
@@ -40,9 +48,91 b' tmp_upload_dir = None'
40 48 access_log_format = (
41 49 'time="%(t)s" pid=%(p)s level="INFO" type="[GNCRN]" ip="%(h)-15s" rqt="%(L)s" response_code="%(s)s" response_bytes="%(b)-6s" uri="%(m)s:%(U)s %(q)s" user=":%(u)s" user_agent="%(a)s"')
42 50
43 # self adjust workers based on CPU count
51
52 # Sets the number of process workers. More workers means more concurrent connections
53 # RhodeCode can handle at the same time. Each additional worker also it increases
54 # memory usage as each has it's own set of caches.
55 # Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
56 # than 8-10 unless for huge deployments .e.g 700-1000 users.
57 # `instance_id = *` must be set in the [app:main] section below (which is the default)
58 # when using more than 1 worker.
59 workers = 6
60
61 # self adjust workers based on CPU count, to use maximum of CPU and not overquota the resources
44 62 # workers = get_workers()
45 63
64 # Gunicorn access log level
65 loglevel = 'info'
66
67 # Process name visible in process list
68 proc_name = 'rhodecode_vcsserver'
69
70 # Type of worker class, one of `sync`, `gevent`
71 # currently `sync` is the only option allowed.
72 worker_class = 'sync'
73
74 # The maximum number of simultaneous clients. Valid only for gevent
75 worker_connections = 10
76
77 # Max number of requests that worker will handle before being gracefully restarted.
78 # Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
79 max_requests = 2000
80 max_requests_jitter = 30
81
82 # The maximum number of pending connections.
83 # Exceeding this number results in the client getting an error when attempting to connect.
84 backlog = 64
85
86 # Amount of time a worker can spend with handling a request before it
87 # gets killed and restarted. By default set to 21600 (6hrs)
88 # Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
89 timeout = 21600
90
91 # The maximum size of HTTP request line in bytes.
92 # 0 for unlimited
93 limit_request_line = 0
94
95 # Limit the number of HTTP headers fields in a request.
96 # By default this value is 100 and can't be larger than 32768.
97 limit_request_fields = 32768
98
99 # Limit the allowed size of an HTTP request header field.
100 # Value is a positive number or 0.
101 # Setting it to 0 will allow unlimited header field sizes.
102 limit_request_field_size = 0
103
104 # Timeout for graceful workers restart.
105 # After receiving a restart signal, workers have this much time to finish
106 # serving requests. Workers still alive after the timeout (starting from the
107 # receipt of the restart signal) are force killed.
108 # Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
109 graceful_timeout = 21600
110
111 # The number of seconds to wait for requests on a Keep-Alive connection.
112 # Generally set in the 1-5 seconds range.
113 keepalive = 2
114
115 # Maximum memory usage that each worker can use before it will receive a
116 # graceful restart signal 0 = memory monitoring is disabled
117 # Examples: 268435456 (256MB), 536870912 (512MB)
118 # 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
119 memory_max_usage = 0
120
121 # How often in seconds to check for memory usage for each gunicorn worker
122 memory_usage_check_interval = 60
123
124 # Threshold value for which we don't recycle worker if GarbageCollection
125 # frees up enough resources. Before each restart we try to run GC on worker
126 # in case we get enough free memory after that, restart will not happen.
127 memory_usage_recovery_threshold = 0.8
128
129
130 @dataclasses.dataclass
131 class MemoryCheckConfig:
132 max_usage: int
133 check_interval: int
134 recovery_threshold: float
135
46 136
47 137 def _get_process_rss(pid=None):
48 138 try:
@@ -67,8 +157,40 b' def _get_config(ini_path):'
67 157 return None
68 158
69 159
70 def _time_with_offset(memory_usage_check_interval):
71 return time.time() - random.randint(0, memory_usage_check_interval/2.0)
160 def get_memory_usage_params(config=None):
161 # memory spec defaults
162 _memory_max_usage = memory_max_usage
163 _memory_usage_check_interval = memory_usage_check_interval
164 _memory_usage_recovery_threshold = memory_usage_recovery_threshold
165
166 if config:
167 ini_path = os.path.abspath(config)
168 conf = _get_config(ini_path)
169
170 section = 'server:main'
171 if conf and conf.has_section(section):
172
173 if conf.has_option(section, 'memory_max_usage'):
174 _memory_max_usage = conf.getint(section, 'memory_max_usage')
175
176 if conf.has_option(section, 'memory_usage_check_interval'):
177 _memory_usage_check_interval = conf.getint(section, 'memory_usage_check_interval')
178
179 if conf.has_option(section, 'memory_usage_recovery_threshold'):
180 _memory_usage_recovery_threshold = conf.getfloat(section, 'memory_usage_recovery_threshold')
181
182 _memory_max_usage = int(os.environ.get('RC_GUNICORN_MEMORY_MAX_USAGE', '')
183 or _memory_max_usage)
184 _memory_usage_check_interval = int(os.environ.get('RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL', '')
185 or _memory_usage_check_interval)
186 _memory_usage_recovery_threshold = float(os.environ.get('RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD', '')
187 or _memory_usage_recovery_threshold)
188
189 return MemoryCheckConfig(_memory_max_usage, _memory_usage_check_interval, _memory_usage_recovery_threshold)
190
191
192 def _time_with_offset(check_interval):
193 return time.time() - random.randint(0, check_interval/2.0)
72 194
73 195
74 196 def pre_fork(server, worker):
@@ -77,25 +199,10 b' def pre_fork(server, worker):'
77 199
78 200 def post_fork(server, worker):
79 201
80 # memory spec defaults
81 _memory_max_usage = 0
82 _memory_usage_check_interval = 60
83 _memory_usage_recovery_threshold = 0.8
84
85 ini_path = os.path.abspath(server.cfg.paste)
86 conf = _get_config(ini_path)
87
88 section = 'server:main'
89 if conf and conf.has_section(section):
90
91 if conf.has_option(section, 'memory_max_usage'):
92 _memory_max_usage = conf.getint(section, 'memory_max_usage')
93
94 if conf.has_option(section, 'memory_usage_check_interval'):
95 _memory_usage_check_interval = conf.getint(section, 'memory_usage_check_interval')
96
97 if conf.has_option(section, 'memory_usage_recovery_threshold'):
98 _memory_usage_recovery_threshold = conf.getfloat(section, 'memory_usage_recovery_threshold')
202 memory_conf = get_memory_usage_params()
203 _memory_max_usage = memory_conf.max_usage
204 _memory_usage_check_interval = memory_conf.check_interval
205 _memory_usage_recovery_threshold = memory_conf.recovery_threshold
99 206
100 207 worker._memory_max_usage = int(os.environ.get('RC_GUNICORN_MEMORY_MAX_USAGE', '')
101 208 or _memory_max_usage)
@@ -109,10 +216,10 b' def post_fork(server, worker):'
109 216 worker._last_memory_check_time = _time_with_offset(_memory_usage_check_interval)
110 217
111 218 if _memory_max_usage:
112 server.log.info("[%-10s] WORKER spawned with max memory set at %s", worker.pid,
219 server.log.info("pid=[%-10s] WORKER spawned with max memory set at %s", worker.pid,
113 220 _format_data_size(_memory_max_usage))
114 221 else:
115 server.log.info("[%-10s] WORKER spawned", worker.pid)
222 server.log.info("pid=[%-10s] WORKER spawned", worker.pid)
116 223
117 224
118 225 def pre_exec(server):
@@ -181,42 +288,45 b' def _format_data_size(size, unit="B", pr'
181 288
182 289
183 290 def _check_memory_usage(worker):
184 memory_max_usage = worker._memory_max_usage
185 if not memory_max_usage:
291 _memory_max_usage = worker._memory_max_usage
292 if not _memory_max_usage:
186 293 return
187 294
188 memory_usage_check_interval = worker._memory_usage_check_interval
189 memory_usage_recovery_threshold = memory_max_usage * worker._memory_usage_recovery_threshold
295 _memory_usage_check_interval = worker._memory_usage_check_interval
296 _memory_usage_recovery_threshold = memory_max_usage * worker._memory_usage_recovery_threshold
190 297
191 298 elapsed = time.time() - worker._last_memory_check_time
192 if elapsed > memory_usage_check_interval:
299 if elapsed > _memory_usage_check_interval:
193 300 mem_usage = _get_process_rss()
194 if mem_usage and mem_usage > memory_max_usage:
301 if mem_usage and mem_usage > _memory_max_usage:
195 302 worker.log.info(
196 303 "memory usage %s > %s, forcing gc",
197 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
304 _format_data_size(mem_usage), _format_data_size(_memory_max_usage))
198 305 # Try to clean it up by forcing a full collection.
199 306 gc.collect()
200 307 mem_usage = _get_process_rss()
201 if mem_usage > memory_usage_recovery_threshold:
308 if mem_usage > _memory_usage_recovery_threshold:
202 309 # Didn't clean up enough, we'll have to terminate.
203 310 worker.log.warning(
204 311 "memory usage %s > %s after gc, quitting",
205 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
312 _format_data_size(mem_usage), _format_data_size(_memory_max_usage))
206 313 # This will cause worker to auto-restart itself
207 314 worker.alive = False
208 315 worker._last_memory_check_time = time.time()
209 316
210 317
211 318 def worker_int(worker):
212 worker.log.info("[%-10s] worker received INT or QUIT signal", worker.pid)
319 worker.log.info("pid=[%-10s] worker received INT or QUIT signal", worker.pid)
213 320
214 321 # get traceback info, on worker crash
215 id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
322 def get_thread_id(t_id):
323 id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
324 return id2name.get(t_id, "unknown_thread_id")
325
216 326 code = []
217 for thread_id, stack in sys._current_frames().items():
327 for thread_id, stack in sys._current_frames().items(): # noqa
218 328 code.append(
219 "\n# Thread: %s(%d)" % (id2name.get(thread_id, ""), thread_id))
329 "\n# Thread: %s(%d)" % (get_thread_id(thread_id), thread_id))
220 330 for fname, lineno, name, line in traceback.extract_stack(stack):
221 331 code.append('File: "%s", line %d, in %s' % (fname, lineno, name))
222 332 if line:
@@ -225,15 +335,15 b' def worker_int(worker):'
225 335
226 336
227 337 def worker_abort(worker):
228 worker.log.info("[%-10s] worker received SIGABRT signal", worker.pid)
338 worker.log.info("pid=[%-10s] worker received SIGABRT signal", worker.pid)
229 339
230 340
231 341 def worker_exit(server, worker):
232 worker.log.info("[%-10s] worker exit", worker.pid)
342 worker.log.info("pid=[%-10s] worker exit", worker.pid)
233 343
234 344
235 345 def child_exit(server, worker):
236 worker.log.info("[%-10s] worker child exit", worker.pid)
346 worker.log.info("pid=[%-10s] worker child exit", worker.pid)
237 347
238 348
239 349 def pre_request(worker, req):
@@ -306,7 +416,9 b' def get_ip_addr(environ):'
306 416 proxy_key = 'HTTP_X_REAL_IP'
307 417 proxy_key2 = 'HTTP_X_FORWARDED_FOR'
308 418 def_key = 'REMOTE_ADDR'
309 _filters = lambda x: _filter_port(_filter_proxy(x))
419
420 def _filters(x):
421 return _filter_port(_filter_proxy(x))
310 422
311 423 ip = environ.get(proxy_key)
312 424 if ip:
@@ -390,4 +502,5 b' class RhodeCodeLogger(Logger):'
390 502
391 503 return atoms
392 504
505
393 506 logger_class = RhodeCodeLogger
@@ -7,7 +7,7 b''
7 7 [server:main]
8 8 ; COMMON HOST/IP CONFIG
9 9 host = 127.0.0.1
10 port = 9900
10 port = 10010
11 11
12 12
13 13 ; ###########################
@@ -19,77 +19,6 b' port = 9900'
19 19 ; Module to use, this setting shouldn't be changed
20 20 use = egg:gunicorn#main
21 21
22 ; Sets the number of process workers. More workers means more concurrent connections
23 ; RhodeCode can handle at the same time. Each additional worker also it increases
24 ; memory usage as each has it's own set of caches.
25 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
26 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
27 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
28 ; when using more than 1 worker.
29 workers = 2
30
31 ; Gunicorn access log level
32 loglevel = info
33
34 ; Process name visible in process list
35 proc_name = rhodecode_vcsserver
36
37 ; Type of worker class, one of `sync`, `gevent`
38 ; currently `sync` is the only option allowed.
39 worker_class = sync
40
41 ; The maximum number of simultaneous clients. Valid only for gevent
42 worker_connections = 10
43
44 ; Max number of requests that worker will handle before being gracefully restarted.
45 ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
46 max_requests = 1000
47 max_requests_jitter = 30
48
49 ; Amount of time a worker can spend with handling a request before it
50 ; gets killed and restarted. By default set to 21600 (6hrs)
51 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
52 timeout = 21600
53
54 ; The maximum size of HTTP request line in bytes.
55 ; 0 for unlimited
56 limit_request_line = 0
57
58 ; Limit the number of HTTP headers fields in a request.
59 ; By default this value is 100 and can't be larger than 32768.
60 limit_request_fields = 32768
61
62 ; Limit the allowed size of an HTTP request header field.
63 ; Value is a positive number or 0.
64 ; Setting it to 0 will allow unlimited header field sizes.
65 limit_request_field_size = 0
66
67 ; Timeout for graceful workers restart.
68 ; After receiving a restart signal, workers have this much time to finish
69 ; serving requests. Workers still alive after the timeout (starting from the
70 ; receipt of the restart signal) are force killed.
71 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
72 graceful_timeout = 21600
73
74 # The number of seconds to wait for requests on a Keep-Alive connection.
75 # Generally set in the 1-5 seconds range.
76 keepalive = 2
77
78 ; Maximum memory usage that each worker can use before it will receive a
79 ; graceful restart signal 0 = memory monitoring is disabled
80 ; Examples: 268435456 (256MB), 536870912 (512MB)
81 ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
82 memory_max_usage = 0
83
84 ; How often in seconds to check for memory usage for each gunicorn worker
85 memory_usage_check_interval = 60
86
87 ; Threshold value for which we don't recycle worker if GarbageCollection
88 ; frees up enough resources. Before each restart we try to run GC on worker
89 ; in case we get enough free memory after that, restart will not happen.
90 memory_usage_recovery_threshold = 0.8
91
92
93 22 [app:main]
94 23 ; The %(here)s variable will be replaced with the absolute path of parent directory
95 24 ; of this file
General Comments 0
You need to be logged in to leave comments. Login now