##// END OF EJS Templates
gunicorn: updated config template with better compatability with configparser
marcink -
r4167:a57c25da default
parent child Browse files
Show More
@@ -1,270 +1,273 b''
1 1 """
2 2 gunicorn config extension and hooks. Sets additional configuration that is
3 3 available post the .ini config.
4 4
5 5 - workers = ${cpu_number}
6 6 - threads = 1
7 7 - proc_name = ${gunicorn_proc_name}
8 8 - worker_class = sync
9 9 - worker_connections = 10
10 10 - max_requests = 1000
11 11 - max_requests_jitter = 30
12 12 - timeout = 21600
13 13
14 14 """
15 15
16 16 import gc
17 17 import os
18 18 import sys
19 19 import math
20 20 import time
21 21 import threading
22 22 import traceback
23 23 import random
24 24 from gunicorn.glogging import Logger
25 25
26 26
27 27 def get_workers():
28 28 import multiprocessing
29 29 return multiprocessing.cpu_count() * 2 + 1
30 30
31 31 # GLOBAL
32 32 errorlog = '-'
33 33 accesslog = '-'
34 34
35 35
36 36 # SERVER MECHANICS
37 37 # None == system temp dir
38 38 # worker_tmp_dir is recommended to be set to some tmpfs
39 39 worker_tmp_dir = None
40 40 tmp_upload_dir = None
41 41
42 42 # Custom log format
43 43 access_log_format = (
44 44 '%(t)s %(p)s INFO [GNCRN] %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"')
45 45
46 46 # self adjust workers based on CPU count
47 47 # workers = get_workers()
48 48
49 49
50 50 def _get_process_rss(pid=None):
51 51 try:
52 52 import psutil
53 53 if pid:
54 54 proc = psutil.Process(pid)
55 55 else:
56 56 proc = psutil.Process()
57 57 return proc.memory_info().rss
58 58 except Exception:
59 59 return None
60 60
61 61
62 62 def _get_config(ini_path):
63 63
64 64 try:
65 65 import configparser
66 66 except ImportError:
67 67 import ConfigParser as configparser
68 68 try:
69 config = configparser.ConfigParser()
69 config = configparser.RawConfigParser()
70 70 config.read(ini_path)
71 71 return config
72 72 except Exception:
73 73 return None
74 74
75 75
76 76 def _time_with_offset(memory_usage_check_interval):
77 77 return time.time() - random.randint(0, memory_usage_check_interval/2.0)
78 78
79 79
80 80 def pre_fork(server, worker):
81 81 pass
82 82
83 83
84 84 def post_fork(server, worker):
85 85
86 86 # memory spec defaults
87 87 _memory_max_usage = 0
88 88 _memory_usage_check_interval = 60
89 89 _memory_usage_recovery_threshold = 0.8
90 90
91 91 ini_path = os.path.abspath(server.cfg.paste)
92 92 conf = _get_config(ini_path)
93 if conf and 'server:main' in conf:
94 section = conf['server:main']
93
94 section = 'server:main'
95 if conf and conf.has_section(section):
95 96
96 if section.get('memory_max_usage'):
97 _memory_max_usage = int(section.get('memory_max_usage'))
98 if section.get('memory_usage_check_interval'):
99 _memory_usage_check_interval = int(section.get('memory_usage_check_interval'))
100 if section.get('memory_usage_recovery_threshold'):
101 _memory_usage_recovery_threshold = float(section.get('memory_usage_recovery_threshold'))
97 if conf.has_option(section, 'memory_max_usage'):
98 _memory_max_usage = conf.getint(section, 'memory_max_usage')
99
100 if conf.has_option(section, 'memory_usage_check_interval'):
101 _memory_usage_check_interval = conf.getint(section, 'memory_usage_check_interval')
102
103 if conf.has_option(section, 'memory_usage_recovery_threshold'):
104 _memory_usage_recovery_threshold = conf.getfloat(section, 'memory_usage_recovery_threshold')
102 105
103 106 worker._memory_max_usage = _memory_max_usage
104 107 worker._memory_usage_check_interval = _memory_usage_check_interval
105 108 worker._memory_usage_recovery_threshold = _memory_usage_recovery_threshold
106 109
107 110 # register memory last check time, with some random offset so we don't recycle all
108 111 # at once
109 112 worker._last_memory_check_time = _time_with_offset(_memory_usage_check_interval)
110 113
111 114 if _memory_max_usage:
112 115 server.log.info("[%-10s] WORKER spawned with max memory set at %s", worker.pid,
113 116 _format_data_size(_memory_max_usage))
114 117 else:
115 118 server.log.info("[%-10s] WORKER spawned", worker.pid)
116 119
117 120
118 121 def pre_exec(server):
119 122 server.log.info("Forked child, re-executing.")
120 123
121 124
122 125 def on_starting(server):
123 126 server_lbl = '{} {}'.format(server.proc_name, server.address)
124 127 server.log.info("Server %s is starting.", server_lbl)
125 128
126 129
127 130 def when_ready(server):
128 131 server.log.info("Server %s is ready. Spawning workers", server)
129 132
130 133
131 134 def on_reload(server):
132 135 pass
133 136
134 137
135 138 def _format_data_size(size, unit="B", precision=1, binary=True):
136 139 """Format a number using SI units (kilo, mega, etc.).
137 140
138 141 ``size``: The number as a float or int.
139 142
140 143 ``unit``: The unit name in plural form. Examples: "bytes", "B".
141 144
142 145 ``precision``: How many digits to the right of the decimal point. Default
143 146 is 1. 0 suppresses the decimal point.
144 147
145 148 ``binary``: If false, use base-10 decimal prefixes (kilo = K = 1000).
146 149 If true, use base-2 binary prefixes (kibi = Ki = 1024).
147 150
148 151 ``full_name``: If false (default), use the prefix abbreviation ("k" or
149 152 "Ki"). If true, use the full prefix ("kilo" or "kibi"). If false,
150 153 use abbreviation ("k" or "Ki").
151 154
152 155 """
153 156
154 157 if not binary:
155 158 base = 1000
156 159 multiples = ('', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
157 160 else:
158 161 base = 1024
159 162 multiples = ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi')
160 163
161 164 sign = ""
162 165 if size > 0:
163 166 m = int(math.log(size, base))
164 167 elif size < 0:
165 168 sign = "-"
166 169 size = -size
167 170 m = int(math.log(size, base))
168 171 else:
169 172 m = 0
170 173 if m > 8:
171 174 m = 8
172 175
173 176 if m == 0:
174 177 precision = '%.0f'
175 178 else:
176 179 precision = '%%.%df' % precision
177 180
178 181 size = precision % (size / math.pow(base, m))
179 182
180 183 return '%s%s %s%s' % (sign, size.strip(), multiples[m], unit)
181 184
182 185
183 186 def _check_memory_usage(worker):
184 187 memory_max_usage = worker._memory_max_usage
185 188 if not memory_max_usage:
186 189 return
187 190
188 191 memory_usage_check_interval = worker._memory_usage_check_interval
189 192 memory_usage_recovery_threshold = memory_max_usage * worker._memory_usage_recovery_threshold
190 193
191 194 elapsed = time.time() - worker._last_memory_check_time
192 195 if elapsed > memory_usage_check_interval:
193 196 mem_usage = _get_process_rss()
194 197 if mem_usage and mem_usage > memory_max_usage:
195 198 worker.log.info(
196 199 "memory usage %s > %s, forcing gc",
197 200 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
198 201 # Try to clean it up by forcing a full collection.
199 202 gc.collect()
200 203 mem_usage = _get_process_rss()
201 204 if mem_usage > memory_usage_recovery_threshold:
202 205 # Didn't clean up enough, we'll have to terminate.
203 206 worker.log.warning(
204 207 "memory usage %s > %s after gc, quitting",
205 208 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
206 209 # This will cause worker to auto-restart itself
207 210 worker.alive = False
208 211 worker._last_memory_check_time = time.time()
209 212
210 213
211 214 def worker_int(worker):
212 215 worker.log.info("[%-10s] worker received INT or QUIT signal", worker.pid)
213 216
214 217 # get traceback info, on worker crash
215 218 id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
216 219 code = []
217 220 for thread_id, stack in sys._current_frames().items():
218 221 code.append(
219 222 "\n# Thread: %s(%d)" % (id2name.get(thread_id, ""), thread_id))
220 223 for fname, lineno, name, line in traceback.extract_stack(stack):
221 224 code.append('File: "%s", line %d, in %s' % (fname, lineno, name))
222 225 if line:
223 226 code.append(" %s" % (line.strip()))
224 227 worker.log.debug("\n".join(code))
225 228
226 229
227 230 def worker_abort(worker):
228 231 worker.log.info("[%-10s] worker received SIGABRT signal", worker.pid)
229 232
230 233
231 234 def worker_exit(server, worker):
232 235 worker.log.info("[%-10s] worker exit", worker.pid)
233 236
234 237
235 238 def child_exit(server, worker):
236 239 worker.log.info("[%-10s] worker child exit", worker.pid)
237 240
238 241
239 242 def pre_request(worker, req):
240 243 worker.start_time = time.time()
241 244 worker.log.debug(
242 245 "GNCRN PRE WORKER [cnt:%s]: %s %s", worker.nr, req.method, req.path)
243 246
244 247
245 248 def post_request(worker, req, environ, resp):
246 249 total_time = time.time() - worker.start_time
247 250 worker.log.debug(
248 251 "GNCRN POST WORKER [cnt:%s]: %s %s resp: %s, Load Time: %.4fs",
249 252 worker.nr, req.method, req.path, resp.status_code, total_time)
250 253 _check_memory_usage(worker)
251 254
252 255
253 256 class RhodeCodeLogger(Logger):
254 257 """
255 258 Custom Logger that allows some customization that gunicorn doesn't allow
256 259 """
257 260
258 261 datefmt = r"%Y-%m-%d %H:%M:%S"
259 262
260 263 def __init__(self, cfg):
261 264 Logger.__init__(self, cfg)
262 265
263 266 def now(self):
264 267 """ return date in RhodeCode Log format """
265 268 now = time.time()
266 269 msecs = int((now - long(now)) * 1000)
267 270 return time.strftime(self.datefmt, time.localtime(now)) + '.{0:03d}'.format(msecs)
268 271
269 272
270 273 logger_class = RhodeCodeLogger
General Comments 0
You need to be logged in to leave comments. Login now