##// END OF EJS Templates
gunicorn: updated gunicorn config based on release changes
marcink -
r808:418e3613 default
parent child Browse files
Show More
@@ -1,273 +1,265 b''
1 1 """
2 gunicorn config extension and hooks. Sets additional configuration that is
3 available post the .ini config.
4
5 - workers = ${cpu_number}
6 - threads = 1
7 - proc_name = ${gunicorn_proc_name}
8 - worker_class = sync
9 - worker_connections = 10
10 - max_requests = 1000
11 - max_requests_jitter = 30
12 - timeout = 21600
13
2 Gunicorn config extension and hooks. This config file adds some extra settings and memory management.
3 Gunicorn configuration should be managed by .ini files entries of RhodeCode or VCSServer
14 4 """
15 5
16 6 import gc
17 7 import os
18 8 import sys
19 9 import math
20 10 import time
21 11 import threading
22 12 import traceback
23 13 import random
24 14 from gunicorn.glogging import Logger
25 15
26 16
27 17 def get_workers():
28 18 import multiprocessing
29 19 return multiprocessing.cpu_count() * 2 + 1
30 20
31 21 # GLOBAL
32 22 errorlog = '-'
33 23 accesslog = '-'
34 24
35 25
36 26 # SERVER MECHANICS
37 27 # None == system temp dir
38 28 # worker_tmp_dir is recommended to be set to some tmpfs
39 29 worker_tmp_dir = None
40 30 tmp_upload_dir = None
41 31
42 32 # Custom log format
43 33 access_log_format = (
44 34 '%(t)s %(p)s INFO [GNCRN] %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"')
45 35
46 36 # self adjust workers based on CPU count
47 37 # workers = get_workers()
48 38
49 39
50 40 def _get_process_rss(pid=None):
51 41 try:
52 42 import psutil
53 43 if pid:
54 44 proc = psutil.Process(pid)
55 45 else:
56 46 proc = psutil.Process()
57 47 return proc.memory_info().rss
58 48 except Exception:
59 49 return None
60 50
61 51
62 52 def _get_config(ini_path):
63 53
64 54 try:
65 55 import configparser
66 56 except ImportError:
67 57 import ConfigParser as configparser
68 58 try:
69 59 config = configparser.RawConfigParser()
70 60 config.read(ini_path)
71 61 return config
72 62 except Exception:
73 63 return None
74 64
75 65
76 66 def _time_with_offset(memory_usage_check_interval):
77 67 return time.time() - random.randint(0, memory_usage_check_interval/2.0)
78 68
79 69
80 70 def pre_fork(server, worker):
81 71 pass
82 72
83 73
84 74 def post_fork(server, worker):
85 75
86 76 # memory spec defaults
87 77 _memory_max_usage = 0
88 78 _memory_usage_check_interval = 60
89 79 _memory_usage_recovery_threshold = 0.8
90 80
91 81 ini_path = os.path.abspath(server.cfg.paste)
92 82 conf = _get_config(ini_path)
93 83
94 84 section = 'server:main'
95 85 if conf and conf.has_section(section):
96 86
97 87 if conf.has_option(section, 'memory_max_usage'):
98 88 _memory_max_usage = conf.getint(section, 'memory_max_usage')
99 89
100 90 if conf.has_option(section, 'memory_usage_check_interval'):
101 91 _memory_usage_check_interval = conf.getint(section, 'memory_usage_check_interval')
102 92
103 93 if conf.has_option(section, 'memory_usage_recovery_threshold'):
104 94 _memory_usage_recovery_threshold = conf.getfloat(section, 'memory_usage_recovery_threshold')
105 95
106 96 worker._memory_max_usage = _memory_max_usage
107 97 worker._memory_usage_check_interval = _memory_usage_check_interval
108 98 worker._memory_usage_recovery_threshold = _memory_usage_recovery_threshold
109 99
110 100 # register memory last check time, with some random offset so we don't recycle all
111 101 # at once
112 102 worker._last_memory_check_time = _time_with_offset(_memory_usage_check_interval)
113 103
114 104 if _memory_max_usage:
115 105 server.log.info("[%-10s] WORKER spawned with max memory set at %s", worker.pid,
116 106 _format_data_size(_memory_max_usage))
117 107 else:
118 108 server.log.info("[%-10s] WORKER spawned", worker.pid)
119 109
120 110
121 111 def pre_exec(server):
122 112 server.log.info("Forked child, re-executing.")
123 113
124 114
125 115 def on_starting(server):
126 116 server_lbl = '{} {}'.format(server.proc_name, server.address)
127 117 server.log.info("Server %s is starting.", server_lbl)
128 118
129 119
130 120 def when_ready(server):
131 121 server.log.info("Server %s is ready. Spawning workers", server)
132 122
133 123
134 124 def on_reload(server):
135 125 pass
136 126
137 127
138 128 def _format_data_size(size, unit="B", precision=1, binary=True):
139 129 """Format a number using SI units (kilo, mega, etc.).
140 130
141 131 ``size``: The number as a float or int.
142 132
143 133 ``unit``: The unit name in plural form. Examples: "bytes", "B".
144 134
145 135 ``precision``: How many digits to the right of the decimal point. Default
146 136 is 1. 0 suppresses the decimal point.
147 137
148 138 ``binary``: If false, use base-10 decimal prefixes (kilo = K = 1000).
149 139 If true, use base-2 binary prefixes (kibi = Ki = 1024).
150 140
151 141 ``full_name``: If false (default), use the prefix abbreviation ("k" or
152 142 "Ki"). If true, use the full prefix ("kilo" or "kibi"). If false,
153 143 use abbreviation ("k" or "Ki").
154 144
155 145 """
156 146
157 147 if not binary:
158 148 base = 1000
159 149 multiples = ('', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
160 150 else:
161 151 base = 1024
162 152 multiples = ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi')
163 153
164 154 sign = ""
165 155 if size > 0:
166 156 m = int(math.log(size, base))
167 157 elif size < 0:
168 158 sign = "-"
169 159 size = -size
170 160 m = int(math.log(size, base))
171 161 else:
172 162 m = 0
173 163 if m > 8:
174 164 m = 8
175 165
176 166 if m == 0:
177 167 precision = '%.0f'
178 168 else:
179 169 precision = '%%.%df' % precision
180 170
181 171 size = precision % (size / math.pow(base, m))
182 172
183 173 return '%s%s %s%s' % (sign, size.strip(), multiples[m], unit)
184 174
185 175
186 176 def _check_memory_usage(worker):
187 177 memory_max_usage = worker._memory_max_usage
188 178 if not memory_max_usage:
189 179 return
190 180
191 181 memory_usage_check_interval = worker._memory_usage_check_interval
192 182 memory_usage_recovery_threshold = memory_max_usage * worker._memory_usage_recovery_threshold
193 183
194 184 elapsed = time.time() - worker._last_memory_check_time
195 185 if elapsed > memory_usage_check_interval:
196 186 mem_usage = _get_process_rss()
197 187 if mem_usage and mem_usage > memory_max_usage:
198 188 worker.log.info(
199 189 "memory usage %s > %s, forcing gc",
200 190 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
201 191 # Try to clean it up by forcing a full collection.
202 192 gc.collect()
203 193 mem_usage = _get_process_rss()
204 194 if mem_usage > memory_usage_recovery_threshold:
205 195 # Didn't clean up enough, we'll have to terminate.
206 196 worker.log.warning(
207 197 "memory usage %s > %s after gc, quitting",
208 198 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
209 199 # This will cause worker to auto-restart itself
210 200 worker.alive = False
211 201 worker._last_memory_check_time = time.time()
212 202
213 203
214 204 def worker_int(worker):
215 205 worker.log.info("[%-10s] worker received INT or QUIT signal", worker.pid)
216 206
217 207 # get traceback info, on worker crash
218 208 id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
219 209 code = []
220 210 for thread_id, stack in sys._current_frames().items():
221 211 code.append(
222 212 "\n# Thread: %s(%d)" % (id2name.get(thread_id, ""), thread_id))
223 213 for fname, lineno, name, line in traceback.extract_stack(stack):
224 214 code.append('File: "%s", line %d, in %s' % (fname, lineno, name))
225 215 if line:
226 216 code.append(" %s" % (line.strip()))
227 217 worker.log.debug("\n".join(code))
228 218
229 219
230 220 def worker_abort(worker):
231 221 worker.log.info("[%-10s] worker received SIGABRT signal", worker.pid)
232 222
233 223
234 224 def worker_exit(server, worker):
235 225 worker.log.info("[%-10s] worker exit", worker.pid)
236 226
237 227
238 228 def child_exit(server, worker):
239 229 worker.log.info("[%-10s] worker child exit", worker.pid)
240 230
241 231
242 232 def pre_request(worker, req):
243 233 worker.start_time = time.time()
244 234 worker.log.debug(
245 235 "GNCRN PRE WORKER [cnt:%s]: %s %s", worker.nr, req.method, req.path)
246 236
247 237
248 238 def post_request(worker, req, environ, resp):
249 239 total_time = time.time() - worker.start_time
240 # Gunicorn sometimes has problems with reading the status_code
241 status_code = getattr(resp, 'status_code', '')
250 242 worker.log.debug(
251 243 "GNCRN POST WORKER [cnt:%s]: %s %s resp: %s, Load Time: %.4fs",
252 worker.nr, req.method, req.path, resp.status_code, total_time)
244 worker.nr, req.method, req.path, status_code, total_time)
253 245 _check_memory_usage(worker)
254 246
255 247
256 248 class RhodeCodeLogger(Logger):
257 249 """
258 250 Custom Logger that allows some customization that gunicorn doesn't allow
259 251 """
260 252
261 253 datefmt = r"%Y-%m-%d %H:%M:%S"
262 254
263 255 def __init__(self, cfg):
264 256 Logger.__init__(self, cfg)
265 257
266 258 def now(self):
267 259 """ return date in RhodeCode Log format """
268 260 now = time.time()
269 261 msecs = int((now - long(now)) * 1000)
270 262 return time.strftime(self.datefmt, time.localtime(now)) + '.{0:03d}'.format(msecs)
271 263
272 264
273 265 logger_class = RhodeCodeLogger
General Comments 0
You need to be logged in to leave comments. Login now