##// END OF EJS Templates
gunicorn: moved all configuration of gunicorn workers to .ini files....
marcink -
r4098:ef7e0089 default
parent child Browse files
Show More
@@ -61,21 +61,61 b' asyncore_use_poll = true'
61 61 ## `instance_id = *` must be set in the [app:main] section below (which is the default)
62 62 ## when using more than 1 worker.
63 63 #workers = 2
64
64 65 ## process name visible in process list
65 66 #proc_name = rhodecode
67
66 68 ## type of worker class, one of sync, gevent
67 69 ## recommended for bigger setup is using of of other than sync one
68 70 #worker_class = gevent
71
69 72 ## The maximum number of simultaneous clients. Valid only for Gevent
70 73 #worker_connections = 10
74
71 75 ## max number of requests that worker will handle before being gracefully
72 76 ## restarted, could prevent memory leaks
73 77 #max_requests = 1000
74 78 #max_requests_jitter = 30
79
75 80 ## amount of time a worker can spend with handling a request before it
76 81 ## gets killed and restarted. Set to 6hrs
77 82 #timeout = 21600
78 83
84 ## The maximum size of HTTP request line in bytes.
85 ## 0 for unlimited
86 #limit_request_line = 0
87
88 ## Limit the number of HTTP headers fields in a request.
89 ## By default this value is 100 and can't be larger than 32768.
90 #limit_request_fields = 32768
91
92 ## Limit the allowed size of an HTTP request header field.
93 ## Value is a positive number or 0.
94 ## Setting it to 0 will allow unlimited header field sizes.
95 #limit_request_field_size = 0
96
97 ## Timeout for graceful workers restart.
98 ## After receiving a restart signal, workers have this much time to finish
99 ## serving requests. Workers still alive after the timeout (starting from the
100 ## receipt of the restart signal) are force killed.
101 #graceful_timeout = 3600
102
103 # The number of seconds to wait for requests on a Keep-Alive connection.
104 # Generally set in the 1-5 seconds range.
105 #keepalive = 2
106
107 ## Maximum memory usage that each worker can use before it will receive a
108 ## graceful restart signal, e.g 10MB = 10485760 (10 * 1024 * 1024)
109 # 0 = memory monitoring is disabled
110 #memory_max_usage = 0
111
112 ## How often in seconds to check for memory usage for each gunicorn worker
113 #memory_usage_check_interval = 60
114
115 ## Threshold value for which we don't recycle worker if GarbageCollection
116 ## frees up enough resources. Before each restart we try to run GC on worker
117 ## in case we get enough free memory after that, restart will not happen.
118 #memory_usage_recovery_threshold = 0.8
79 119
80 120 ## prefix middleware for RhodeCode.
81 121 ## recommended when using proxy setup.
@@ -13,9 +13,10 b' available post the .ini config.'
13 13
14 14 """
15 15
16 import gc
17 import os
18 import sys
16 19 import math
17 import gc
18 import sys
19 20 import time
20 21 import threading
21 22 import traceback
@@ -32,30 +33,6 b" errorlog = '-'"
32 33 accesslog = '-'
33 34 loglevel = 'info'
34 35
35 # SECURITY
36
37 # The maximum size of HTTP request line in bytes.
38 # 0 for unlimited
39 limit_request_line = 0
40
41 # Limit the number of HTTP headers fields in a request.
42 # By default this value is 100 and can't be larger than 32768.
43 limit_request_fields = 32768
44
45 # Limit the allowed size of an HTTP request header field.
46 # Value is a positive number or 0.
47 # Setting it to 0 will allow unlimited header field sizes.
48 limit_request_field_size = 0
49
50 # Timeout for graceful workers restart.
51 # After receiving a restart signal, workers have this much time to finish
52 # serving requests. Workers still alive after the timeout (starting from the
53 # receipt of the restart signal) are force killed.
54 graceful_timeout = 60 * 60
55
56 # The number of seconds to wait for requests on a Keep-Alive connection.
57 # Generally set in the 1-5 seconds range.
58 keepalive = 2
59 36
60 37 # SERVER MECHANICS
61 38 # None == system temp dir
@@ -70,15 +47,6 b' access_log_format = ('
70 47 # self adjust workers based on CPU count
71 48 # workers = get_workers()
72 49
73 # n * 1024 * 0124 == n MBs, 0 = memory monitoring is disabled
74 MAX_MEMORY_USAGE = 0 * 1024 * 1024
75
76 # How often in seconds to check for memory usage
77 MEMORY_USAGE_CHECK_INTERVAL = 30
78
79 # If a gc brings us back below this threshold, we can avoid termination.
80 MEMORY_USAGE_RECOVERY_THRESHOLD = MAX_MEMORY_USAGE * 0.8
81
82 50
83 51 def _get_process_rss(pid=None):
84 52 try:
@@ -92,8 +60,22 b' def _get_process_rss(pid=None):'
92 60 return None
93 61
94 62
95 def _time_with_offset():
96 return time.time() - random.randint(0, MEMORY_USAGE_CHECK_INTERVAL/2.0)
63 def _get_config(ini_path):
64
65 try:
66 import configparser
67 except ImportError:
68 import ConfigParser as configparser
69 try:
70 config = configparser.ConfigParser()
71 config.read(ini_path)
72 return config
73 except Exception:
74 return None
75
76
77 def _time_with_offset(memory_usage_check_interval):
78 return time.time() - random.randint(0, memory_usage_check_interval/2.0)
97 79
98 80
99 81 def pre_fork(server, worker):
@@ -101,10 +83,37 b' def pre_fork(server, worker):'
101 83
102 84
103 85 def post_fork(server, worker):
104 server.log.info("<%s> WORKER spawned", worker.pid)
86
87 # memory spec defaults
88 _memory_max_usage = 0
89 _memory_usage_check_interval = 60
90 _memory_usage_recovery_threshold = 0.8
91
92 ini_path = os.path.abspath(server.cfg.paste)
93 conf = _get_config(ini_path)
94 if conf and 'server:main' in conf:
95 section = conf['server:main']
96
97 if section.get('memory_max_usage'):
98 _memory_max_usage = int(section.get('memory_max_usage'))
99 if section.get('memory_usage_check_interval'):
100 _memory_usage_check_interval = int(section.get('memory_usage_check_interval'))
101 if section.get('memory_usage_recovery_threshold'):
102 _memory_usage_recovery_threshold = float(section.get('memory_usage_recovery_threshold'))
103
104 worker._memory_max_usage = _memory_max_usage
105 worker._memory_usage_check_interval = _memory_usage_check_interval
106 worker._memory_usage_recovery_threshold = _memory_usage_recovery_threshold
107
105 108 # register memory last check time, with some random offset so we don't recycle all
106 109 # at once
107 worker._last_memory_check_time = _time_with_offset()
110 worker._last_memory_check_time = _time_with_offset(_memory_usage_check_interval)
111
112 if _memory_max_usage:
113 server.log.info("[%-10s] WORKER spawned with max memory set at %s", worker.pid,
114 _format_data_size(_memory_max_usage))
115 else:
116 server.log.info("[%-10s] WORKER spawned", worker.pid)
108 117
109 118
110 119 def pre_exec(server):
@@ -173,32 +182,35 b' def _format_data_size(size, unit="B", pr'
173 182
174 183
175 184 def _check_memory_usage(worker):
185 memory_max_usage = worker._memory_max_usage
186 if not memory_max_usage:
187 return
176 188
177 if not MAX_MEMORY_USAGE:
178 return
189 memory_usage_check_interval = worker._memory_usage_check_interval
190 memory_usage_recovery_threshold = memory_max_usage * worker._memory_usage_recovery_threshold
179 191
180 192 elapsed = time.time() - worker._last_memory_check_time
181 if elapsed > MEMORY_USAGE_CHECK_INTERVAL:
193 if elapsed > memory_usage_check_interval:
182 194 mem_usage = _get_process_rss()
183 if mem_usage and mem_usage > MAX_MEMORY_USAGE:
195 if mem_usage and mem_usage > memory_max_usage:
184 196 worker.log.info(
185 197 "memory usage %s > %s, forcing gc",
186 _format_data_size(mem_usage), _format_data_size(MAX_MEMORY_USAGE))
198 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
187 199 # Try to clean it up by forcing a full collection.
188 200 gc.collect()
189 201 mem_usage = _get_process_rss()
190 if mem_usage > MEMORY_USAGE_RECOVERY_THRESHOLD:
202 if mem_usage > memory_usage_recovery_threshold:
191 203 # Didn't clean up enough, we'll have to terminate.
192 204 worker.log.warning(
193 205 "memory usage %s > %s after gc, quitting",
194 _format_data_size(mem_usage), _format_data_size(MAX_MEMORY_USAGE))
206 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
195 207 # This will cause worker to auto-restart itself
196 208 worker.alive = False
197 209 worker._last_memory_check_time = time.time()
198 210
199 211
200 212 def worker_int(worker):
201 worker.log.info("[<%-10s>] worker received INT or QUIT signal", worker.pid)
213 worker.log.info("[%-10s] worker received INT or QUIT signal", worker.pid)
202 214
203 215 # get traceback info, on worker crash
204 216 id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
@@ -214,15 +226,15 b' def worker_int(worker):'
214 226
215 227
216 228 def worker_abort(worker):
217 worker.log.info("[<%-10s>] worker received SIGABRT signal", worker.pid)
229 worker.log.info("[%-10s] worker received SIGABRT signal", worker.pid)
218 230
219 231
220 232 def worker_exit(server, worker):
221 worker.log.info("[<%-10s>] worker exit", worker.pid)
233 worker.log.info("[%-10s] worker exit", worker.pid)
222 234
223 235
224 236 def child_exit(server, worker):
225 worker.log.info("[<%-10s>] worker child exit", worker.pid)
237 worker.log.info("[%-10s] worker child exit", worker.pid)
226 238
227 239
228 240 def pre_request(worker, req):
@@ -61,21 +61,61 b' use = egg:gunicorn#main'
61 61 ## `instance_id = *` must be set in the [app:main] section below (which is the default)
62 62 ## when using more than 1 worker.
63 63 workers = 2
64
64 65 ## process name visible in process list
65 66 proc_name = rhodecode
67
66 68 ## type of worker class, one of sync, gevent
67 69 ## recommended for bigger setup is using of of other than sync one
68 70 worker_class = gevent
71
69 72 ## The maximum number of simultaneous clients. Valid only for Gevent
70 73 worker_connections = 10
74
71 75 ## max number of requests that worker will handle before being gracefully
72 76 ## restarted, could prevent memory leaks
73 77 max_requests = 1000
74 78 max_requests_jitter = 30
79
75 80 ## amount of time a worker can spend with handling a request before it
76 81 ## gets killed and restarted. Set to 6hrs
77 82 timeout = 21600
78 83
84 ## The maximum size of HTTP request line in bytes.
85 ## 0 for unlimited
86 limit_request_line = 0
87
88 ## Limit the number of HTTP headers fields in a request.
89 ## By default this value is 100 and can't be larger than 32768.
90 limit_request_fields = 32768
91
92 ## Limit the allowed size of an HTTP request header field.
93 ## Value is a positive number or 0.
94 ## Setting it to 0 will allow unlimited header field sizes.
95 limit_request_field_size = 0
96
97 ## Timeout for graceful workers restart.
98 ## After receiving a restart signal, workers have this much time to finish
99 ## serving requests. Workers still alive after the timeout (starting from the
100 ## receipt of the restart signal) are force killed.
101 graceful_timeout = 3600
102
103 # The number of seconds to wait for requests on a Keep-Alive connection.
104 # Generally set in the 1-5 seconds range.
105 keepalive = 2
106
107 ## Maximum memory usage that each worker can use before it will receive a
108 ## graceful restart signal, e.g 10MB = 10485760 (10 * 1024 * 1024)
109 # 0 = memory monitoring is disabled
110 memory_max_usage = 0
111
112 ## How often in seconds to check for memory usage for each gunicorn worker
113 memory_usage_check_interval = 60
114
115 ## Threshold value for which we don't recycle worker if GarbageCollection
116 ## frees up enough resources. Before each restart we try to run GC on worker
117 ## in case we get enough free memory after that, restart will not happen.
118 memory_usage_recovery_threshold = 0.8
79 119
80 120 ## prefix middleware for RhodeCode.
81 121 ## recommended when using proxy setup.
General Comments 0
You need to be logged in to leave comments. Login now