##// END OF EJS Templates
configs: moved most of configuration back to .ini files instead of gunicorn file
marcink -
r801:ecde08b9 default
parent child Browse files
Show More
@@ -8,7 +8,91 b''
8 host = 0.0.0.0
8 host = 0.0.0.0
9 port = 9900
9 port = 9900
10
10
11 ###########################################################
12 ## WAITRESS WSGI SERVER - Recommended for Development ####
13 ###########################################################
14
11 use = egg:waitress#main
15 use = egg:waitress#main
16 ## number of worker threads
17 threads = 5
18 ## MAX BODY SIZE 100GB
19 max_request_body_size = 107374182400
20 ## Use poll instead of select, fixes file descriptors limits problems.
21 ## May not work on old windows systems.
22 asyncore_use_poll = true
23
24
25 ##########################
26 ## GUNICORN WSGI SERVER ##
27 ##########################
28 ## run with gunicorn --log-config vcsserver.ini --paste vcsserver.ini
29
30 #use = egg:gunicorn#main
31 ## Sets the number of process workers. More workers means more concurrent connections
32 ## RhodeCode can handle at the same time. Each additional worker also it increases
33 ## memory usage as each has it's own set of caches.
34 ## Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
35 ## than 8-10 unless for really big deployments .e.g 700-1000 users.
36 ## `instance_id = *` must be set in the [app:main] section below (which is the default)
37 ## when using more than 1 worker.
38 #workers = 2
39
40 ## Gunicorn access log level
41 #loglevel = info
42
43 ## process name visible in process list
44 #proc_name = rhodecode_vcsserver
45
46 ## type of worker class, currently `sync` is the only option allowed.
47 #worker_class = sync
48
49 ## The maximum number of simultaneous clients. Valid only for Gevent
50 #worker_connections = 10
51
52 ## max number of requests that worker will handle before being gracefully
53 ## restarted, could prevent memory leaks
54 #max_requests = 1000
55 #max_requests_jitter = 30
56
57 ## amount of time a worker can spend with handling a request before it
58 ## gets killed and restarted. Set to 6hrs
59 #timeout = 21600
60
61 ## The maximum size of HTTP request line in bytes.
62 ## 0 for unlimited
63 #limit_request_line = 0
64
65 ## Limit the number of HTTP headers fields in a request.
66 ## By default this value is 100 and can't be larger than 32768.
67 #limit_request_fields = 32768
68
69 ## Limit the allowed size of an HTTP request header field.
70 ## Value is a positive number or 0.
71 ## Setting it to 0 will allow unlimited header field sizes.
72 #limit_request_field_size = 0
73
74 ## Timeout for graceful workers restart.
75 ## After receiving a restart signal, workers have this much time to finish
76 ## serving requests. Workers still alive after the timeout (starting from the
77 ## receipt of the restart signal) are force killed.
78 #graceful_timeout = 3600
79
80 # The number of seconds to wait for requests on a Keep-Alive connection.
81 # Generally set in the 1-5 seconds range.
82 #keepalive = 2
83
84 ## Maximum memory usage that each worker can use before it will receive a
85 ## graceful restart signal, e.g 10MB = 10485760 (10 * 1024 * 1024)
86 # 0 = memory monitoring is disabled
87 #memory_max_usage = 0
88
89 ## How often in seconds to check for memory usage for each gunicorn worker
90 #memory_usage_check_interval = 60
91
92 ## Threshold value for which we don't recycle worker if GarbageCollection
93 ## frees up enough resources. Before each restart we try to run GC on worker
94 ## in case we get enough free memory after that, restart will not happen.
95 #memory_usage_recovery_threshold = 0.8
12
96
13
97
14 [app:main]
98 [app:main]
@@ -13,9 +13,10 b' available post the .ini config.'
13
13
14 """
14 """
15
15
16 import gc
17 import os
18 import sys
16 import math
19 import math
17 import gc
18 import sys
19 import time
20 import time
20 import threading
21 import threading
21 import traceback
22 import traceback
@@ -30,32 +31,7 b' def get_workers():'
30 # GLOBAL
31 # GLOBAL
31 errorlog = '-'
32 errorlog = '-'
32 accesslog = '-'
33 accesslog = '-'
33 loglevel = 'info'
34
34
35 # SECURITY
36
37 # The maximum size of HTTP request line in bytes.
38 # 0 for unlimited
39 limit_request_line = 0
40
41 # Limit the number of HTTP headers fields in a request.
42 # By default this value is 100 and can't be larger than 32768.
43 limit_request_fields = 32768
44
45 # Limit the allowed size of an HTTP request header field.
46 # Value is a positive number or 0.
47 # Setting it to 0 will allow unlimited header field sizes.
48 limit_request_field_size = 0
49
50 # Timeout for graceful workers restart.
51 # After receiving a restart signal, workers have this much time to finish
52 # serving requests. Workers still alive after the timeout (starting from the
53 # receipt of the restart signal) are force killed.
54 graceful_timeout = 60 * 60
55
56 # The number of seconds to wait for requests on a Keep-Alive connection.
57 # Generally set in the 1-5 seconds range.
58 keepalive = 2
59
35
60 # SERVER MECHANICS
36 # SERVER MECHANICS
61 # None == system temp dir
37 # None == system temp dir
@@ -70,15 +46,6 b' access_log_format = ('
70 # self adjust workers based on CPU count
46 # self adjust workers based on CPU count
71 # workers = get_workers()
47 # workers = get_workers()
72
48
73 # n * 1024 * 0124 == n MBs, 0 = memory monitoring is disabled
74 MAX_MEMORY_USAGE = 0 * 1024 * 1024
75
76 # How often in seconds to check for memory usage
77 MEMORY_USAGE_CHECK_INTERVAL = 30
78
79 # If a gc brings us back below this threshold, we can avoid termination.
80 MEMORY_USAGE_RECOVERY_THRESHOLD = MAX_MEMORY_USAGE * 0.8
81
82
49
83 def _get_process_rss(pid=None):
50 def _get_process_rss(pid=None):
84 try:
51 try:
@@ -92,8 +59,22 b' def _get_process_rss(pid=None):'
92 return None
59 return None
93
60
94
61
95 def _time_with_offset():
62 def _get_config(ini_path):
96 return time.time() - random.randint(0, MEMORY_USAGE_CHECK_INTERVAL/2.0)
63
64 try:
65 import configparser
66 except ImportError:
67 import ConfigParser as configparser
68 try:
69 config = configparser.ConfigParser()
70 config.read(ini_path)
71 return config
72 except Exception:
73 return None
74
75
76 def _time_with_offset(memory_usage_check_interval):
77 return time.time() - random.randint(0, memory_usage_check_interval/2.0)
97
78
98
79
99 def pre_fork(server, worker):
80 def pre_fork(server, worker):
@@ -101,10 +82,37 b' def pre_fork(server, worker):'
101
82
102
83
103 def post_fork(server, worker):
84 def post_fork(server, worker):
104 server.log.info("<%s> WORKER spawned", worker.pid)
85
86 # memory spec defaults
87 _memory_max_usage = 0
88 _memory_usage_check_interval = 60
89 _memory_usage_recovery_threshold = 0.8
90
91 ini_path = os.path.abspath(server.cfg.paste)
92 conf = _get_config(ini_path)
93 if conf and 'server:main' in conf:
94 section = conf['server:main']
95
96 if section.get('memory_max_usage'):
97 _memory_max_usage = int(section.get('memory_max_usage'))
98 if section.get('memory_usage_check_interval'):
99 _memory_usage_check_interval = int(section.get('memory_usage_check_interval'))
100 if section.get('memory_usage_recovery_threshold'):
101 _memory_usage_recovery_threshold = float(section.get('memory_usage_recovery_threshold'))
102
103 worker._memory_max_usage = _memory_max_usage
104 worker._memory_usage_check_interval = _memory_usage_check_interval
105 worker._memory_usage_recovery_threshold = _memory_usage_recovery_threshold
106
105 # register memory last check time, with some random offset so we don't recycle all
107 # register memory last check time, with some random offset so we don't recycle all
106 # at once
108 # at once
107 worker._last_memory_check_time = _time_with_offset()
109 worker._last_memory_check_time = _time_with_offset(_memory_usage_check_interval)
110
111 if _memory_max_usage:
112 server.log.info("[%-10s] WORKER spawned with max memory set at %s", worker.pid,
113 _format_data_size(_memory_max_usage))
114 else:
115 server.log.info("[%-10s] WORKER spawned", worker.pid)
108
116
109
117
110 def pre_exec(server):
118 def pre_exec(server):
@@ -173,32 +181,35 b' def _format_data_size(size, unit="B", pr'
173
181
174
182
175 def _check_memory_usage(worker):
183 def _check_memory_usage(worker):
184 memory_max_usage = worker._memory_max_usage
185 if not memory_max_usage:
186 return
176
187
177 if not MAX_MEMORY_USAGE:
188 memory_usage_check_interval = worker._memory_usage_check_interval
178 return
189 memory_usage_recovery_threshold = memory_max_usage * worker._memory_usage_recovery_threshold
179
190
180 elapsed = time.time() - worker._last_memory_check_time
191 elapsed = time.time() - worker._last_memory_check_time
181 if elapsed > MEMORY_USAGE_CHECK_INTERVAL:
192 if elapsed > memory_usage_check_interval:
182 mem_usage = _get_process_rss()
193 mem_usage = _get_process_rss()
183 if mem_usage and mem_usage > MAX_MEMORY_USAGE:
194 if mem_usage and mem_usage > memory_max_usage:
184 worker.log.info(
195 worker.log.info(
185 "memory usage %s > %s, forcing gc",
196 "memory usage %s > %s, forcing gc",
186 _format_data_size(mem_usage), _format_data_size(MAX_MEMORY_USAGE))
197 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
187 # Try to clean it up by forcing a full collection.
198 # Try to clean it up by forcing a full collection.
188 gc.collect()
199 gc.collect()
189 mem_usage = _get_process_rss()
200 mem_usage = _get_process_rss()
190 if mem_usage > MEMORY_USAGE_RECOVERY_THRESHOLD:
201 if mem_usage > memory_usage_recovery_threshold:
191 # Didn't clean up enough, we'll have to terminate.
202 # Didn't clean up enough, we'll have to terminate.
192 worker.log.warning(
203 worker.log.warning(
193 "memory usage %s > %s after gc, quitting",
204 "memory usage %s > %s after gc, quitting",
194 _format_data_size(mem_usage), _format_data_size(MAX_MEMORY_USAGE))
205 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
195 # This will cause worker to auto-restart itself
206 # This will cause worker to auto-restart itself
196 worker.alive = False
207 worker.alive = False
197 worker._last_memory_check_time = time.time()
208 worker._last_memory_check_time = time.time()
198
209
199
210
200 def worker_int(worker):
211 def worker_int(worker):
201 worker.log.info("[<%-10s>] worker received INT or QUIT signal", worker.pid)
212 worker.log.info("[%-10s] worker received INT or QUIT signal", worker.pid)
202
213
203 # get traceback info, on worker crash
214 # get traceback info, on worker crash
204 id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
215 id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
@@ -214,15 +225,15 b' def worker_int(worker):'
214
225
215
226
216 def worker_abort(worker):
227 def worker_abort(worker):
217 worker.log.info("[<%-10s>] worker received SIGABRT signal", worker.pid)
228 worker.log.info("[%-10s] worker received SIGABRT signal", worker.pid)
218
229
219
230
220 def worker_exit(server, worker):
231 def worker_exit(server, worker):
221 worker.log.info("[<%-10s>] worker exit", worker.pid)
232 worker.log.info("[%-10s] worker exit", worker.pid)
222
233
223
234
224 def child_exit(server, worker):
235 def child_exit(server, worker):
225 worker.log.info("[<%-10s>] worker child exit", worker.pid)
236 worker.log.info("[%-10s] worker child exit", worker.pid)
226
237
227
238
228 def pre_request(worker, req):
239 def pre_request(worker, req):
@@ -8,29 +8,92 b''
8 host = 127.0.0.1
8 host = 127.0.0.1
9 port = 9900
9 port = 9900
10
10
11 ###########################################################
12 ## WAITRESS WSGI SERVER - Recommended for Development ####
13 ###########################################################
14
15 #use = egg:waitress#main
16 ## number of worker threads
17 #threads = 5
18 ## MAX BODY SIZE 100GB
19 #max_request_body_size = 107374182400
20 ## Use poll instead of select, fixes file descriptors limits problems.
21 ## May not work on old windows systems.
22 #asyncore_use_poll = true
23
11
24
12 ##########################
25 ##########################
13 ## GUNICORN WSGI SERVER ##
26 ## GUNICORN WSGI SERVER ##
14 ##########################
27 ##########################
15 ## run with gunicorn --log-config vcsserver.ini --paste vcsserver.ini
28 ## run with gunicorn --log-config vcsserver.ini --paste vcsserver.ini
29
16 use = egg:gunicorn#main
30 use = egg:gunicorn#main
17 ## Sets the number of process workers. Recommended
31 ## Sets the number of process workers. More workers means more concurrent connections
18 ## value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers
32 ## RhodeCode can handle at the same time. Each additional worker also it increases
33 ## memory usage as each has it's own set of caches.
34 ## Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
35 ## than 8-10 unless for really big deployments .e.g 700-1000 users.
36 ## `instance_id = *` must be set in the [app:main] section below (which is the default)
37 ## when using more than 1 worker.
19 workers = 2
38 workers = 2
20 ## process name
39
40 ## Gunicorn access log level
41 loglevel = info
42
43 ## process name visible in process list
21 proc_name = rhodecode_vcsserver
44 proc_name = rhodecode_vcsserver
45
22 ## type of worker class, currently `sync` is the only option allowed.
46 ## type of worker class, currently `sync` is the only option allowed.
23 worker_class = sync
47 worker_class = sync
48
24 ## The maximum number of simultaneous clients. Valid only for Gevent
49 ## The maximum number of simultaneous clients. Valid only for Gevent
25 #worker_connections = 10
50 worker_connections = 10
51
26 ## max number of requests that worker will handle before being gracefully
52 ## max number of requests that worker will handle before being gracefully
27 ## restarted, could prevent memory leaks
53 ## restarted, could prevent memory leaks
28 max_requests = 1000
54 max_requests = 1000
29 max_requests_jitter = 30
55 max_requests_jitter = 30
56
30 ## amount of time a worker can spend with handling a request before it
57 ## amount of time a worker can spend with handling a request before it
31 ## gets killed and restarted. Set to 6hrs
58 ## gets killed and restarted. Set to 6hrs
32 timeout = 21600
59 timeout = 21600
33
60
61 ## The maximum size of HTTP request line in bytes.
62 ## 0 for unlimited
63 limit_request_line = 0
64
65 ## Limit the number of HTTP headers fields in a request.
66 ## By default this value is 100 and can't be larger than 32768.
67 limit_request_fields = 32768
68
69 ## Limit the allowed size of an HTTP request header field.
70 ## Value is a positive number or 0.
71 ## Setting it to 0 will allow unlimited header field sizes.
72 limit_request_field_size = 0
73
74 ## Timeout for graceful workers restart.
75 ## After receiving a restart signal, workers have this much time to finish
76 ## serving requests. Workers still alive after the timeout (starting from the
77 ## receipt of the restart signal) are force killed.
78 graceful_timeout = 3600
79
80 # The number of seconds to wait for requests on a Keep-Alive connection.
81 # Generally set in the 1-5 seconds range.
82 keepalive = 2
83
84 ## Maximum memory usage that each worker can use before it will receive a
85 ## graceful restart signal, e.g 10MB = 10485760 (10 * 1024 * 1024)
86 # 0 = memory monitoring is disabled
87 memory_max_usage = 0
88
89 ## How often in seconds to check for memory usage for each gunicorn worker
90 memory_usage_check_interval = 60
91
92 ## Threshold value for which we don't recycle worker if GarbageCollection
93 ## frees up enough resources. Before each restart we try to run GC on worker
94 ## in case we get enough free memory after that, restart will not happen.
95 memory_usage_recovery_threshold = 0.8
96
34
97
35 [app:main]
98 [app:main]
36 use = egg:rhodecode-vcsserver
99 use = egg:rhodecode-vcsserver
General Comments 0
You need to be logged in to leave comments. Login now