Show More
@@ -1,154 +1,259 b'' | |||
|
1 | 1 | """ |
|
2 | 2 | gunicorn config extension and hooks. Sets additional configuration that is |
|
3 | 3 | available post the .ini config. |
|
4 | 4 | |
|
5 | 5 | - workers = ${cpu_number} |
|
6 | 6 | - threads = 1 |
|
7 | 7 | - proc_name = ${gunicorn_proc_name} |
|
8 | 8 | - worker_class = sync |
|
9 | 9 | - worker_connections = 10 |
|
10 | 10 | - max_requests = 1000 |
|
11 | 11 | - max_requests_jitter = 30 |
|
12 | 12 | - timeout = 21600 |
|
13 | 13 | |
|
14 | 14 | """ |
|
15 | 15 | |
|
16 | import multiprocessing | |
|
16 | import math | |
|
17 | import gc | |
|
17 | 18 | import sys |
|
18 | 19 | import time |
|
19 | import datetime | |
|
20 | 20 | import threading |
|
21 | 21 | import traceback |
|
22 | import random | |
|
22 | 23 | from gunicorn.glogging import Logger |
|
23 | 24 | |
|
24 | 25 | |
|
26 | def get_workers(): | |
|
27 | import multiprocessing | |
|
28 | return multiprocessing.cpu_count() * 2 + 1 | |
|
29 | ||
|
25 | 30 | # GLOBAL |
|
26 | 31 | errorlog = '-' |
|
27 | 32 | accesslog = '-' |
|
28 |
loglevel = ' |
|
|
33 | loglevel = 'info' | |
|
29 | 34 | |
|
30 | 35 | # SECURITY |
|
31 | 36 | |
|
32 | 37 | # The maximum size of HTTP request line in bytes. |
|
33 | 38 | # 0 for unlimited |
|
34 | 39 | limit_request_line = 0 |
|
35 | 40 | |
|
36 | 41 | # Limit the number of HTTP headers fields in a request. |
|
37 | 42 | # By default this value is 100 and can't be larger than 32768. |
|
38 |
limit_request_fields = |
|
|
43 | limit_request_fields = 32768 | |
|
39 | 44 | |
|
40 | 45 | # Limit the allowed size of an HTTP request header field. |
|
41 | 46 | # Value is a positive number or 0. |
|
42 | 47 | # Setting it to 0 will allow unlimited header field sizes. |
|
43 | 48 | limit_request_field_size = 0 |
|
44 | 49 | |
|
45 | ||
|
46 | 50 | # Timeout for graceful workers restart. |
|
47 | 51 | # After receiving a restart signal, workers have this much time to finish |
|
48 | 52 | # serving requests. Workers still alive after the timeout (starting from the |
|
49 | 53 | # receipt of the restart signal) are force killed. |
|
50 |
graceful_timeout = |
|
|
51 | ||
|
54 | graceful_timeout = 60 * 60 | |
|
52 | 55 | |
|
53 | 56 | # The number of seconds to wait for requests on a Keep-Alive connection. |
|
54 | 57 | # Generally set in the 1-5 seconds range. |
|
55 | 58 | keepalive = 2 |
|
56 | 59 | |
|
57 | ||
|
58 | 60 | # SERVER MECHANICS |
|
59 | 61 | # None == system temp dir |
|
60 | 62 | # worker_tmp_dir is recommended to be set to some tmpfs |
|
61 | 63 | worker_tmp_dir = None |
|
62 | 64 | tmp_upload_dir = None |
|
63 | 65 | |
|
64 | 66 | # Custom log format |
|
65 | 67 | access_log_format = ( |
|
66 |
'%(t)s |
|
|
68 | '%(t)s %(p)s INFO [GNCRN] %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"') | |
|
67 | 69 | |
|
68 | 70 | # self adjust workers based on CPU count |
|
69 | # workers = multiprocessing.cpu_count() * 2 + 1 | |
|
71 | # workers = get_workers() | |
|
72 | ||
|
73 | # n * 1024 * 0124 == n MBs, 0 = memory monitoring is disabled | |
|
74 | MAX_MEMORY_USAGE = 0 * 1024 * 1024 | |
|
75 | ||
|
76 | # How often in seconds to check for memory usage | |
|
77 | MEMORY_USAGE_CHECK_INTERVAL = 30 | |
|
78 | ||
|
79 | # If a gc brings us back below this threshold, we can avoid termination. | |
|
80 | MEMORY_USAGE_RECOVERY_THRESHOLD = MAX_MEMORY_USAGE * 0.8 | |
|
70 | 81 | |
|
71 | 82 | |
|
72 | def post_fork(server, worker): | |
|
73 | server.log.info("[<%-10s>] WORKER spawned", worker.pid) | |
|
83 | def _get_process_rss(pid=None): | |
|
84 | try: | |
|
85 | import psutil | |
|
86 | if pid: | |
|
87 | proc = psutil.Process(pid) | |
|
88 | else: | |
|
89 | proc = psutil.Process() | |
|
90 | return proc.memory_info().rss | |
|
91 | except Exception: | |
|
92 | return None | |
|
93 | ||
|
94 | ||
|
95 | def _time_with_offset(): | |
|
96 | return time.time() - random.randint(0, MEMORY_USAGE_CHECK_INTERVAL/2.0) | |
|
74 | 97 | |
|
75 | 98 | |
|
76 | 99 | def pre_fork(server, worker): |
|
77 | 100 | pass |
|
78 | 101 | |
|
79 | 102 | |
|
103 | def post_fork(server, worker): | |
|
104 | server.log.info("<%s> WORKER spawned", worker.pid) | |
|
105 | # register memory last check time, with some random offset so we don't recycle all | |
|
106 | # at once | |
|
107 | worker._last_memory_check_time = _time_with_offset() | |
|
108 | ||
|
109 | ||
|
80 | 110 | def pre_exec(server): |
|
81 | 111 | server.log.info("Forked child, re-executing.") |
|
82 | 112 | |
|
83 | 113 | |
|
84 | 114 | def on_starting(server): |
|
85 | server.log.info("Server is starting.") | |
|
115 | server_lbl = '{} {}'.format(server.proc_name, server.address) | |
|
116 | server.log.info("Server %s is starting.", server_lbl) | |
|
86 | 117 | |
|
87 | 118 | |
|
88 | 119 | def when_ready(server): |
|
89 | server.log.info("Server is ready. Spawning workers") | |
|
120 | server.log.info("Server %s is ready. Spawning workers", server) | |
|
90 | 121 | |
|
91 | 122 | |
|
92 | 123 | def on_reload(server): |
|
93 | 124 | pass |
|
94 | 125 | |
|
95 | 126 | |
|
127 | def _format_data_size(size, unit="B", precision=1, binary=True): | |
|
128 | """Format a number using SI units (kilo, mega, etc.). | |
|
129 | ||
|
130 | ``size``: The number as a float or int. | |
|
131 | ||
|
132 | ``unit``: The unit name in plural form. Examples: "bytes", "B". | |
|
133 | ||
|
134 | ``precision``: How many digits to the right of the decimal point. Default | |
|
135 | is 1. 0 suppresses the decimal point. | |
|
136 | ||
|
137 | ``binary``: If false, use base-10 decimal prefixes (kilo = K = 1000). | |
|
138 | If true, use base-2 binary prefixes (kibi = Ki = 1024). | |
|
139 | ||
|
140 | ``full_name``: If false (default), use the prefix abbreviation ("k" or | |
|
141 | "Ki"). If true, use the full prefix ("kilo" or "kibi"). If false, | |
|
142 | use abbreviation ("k" or "Ki"). | |
|
143 | ||
|
144 | """ | |
|
145 | ||
|
146 | if not binary: | |
|
147 | base = 1000 | |
|
148 | multiples = ('', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') | |
|
149 | else: | |
|
150 | base = 1024 | |
|
151 | multiples = ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi') | |
|
152 | ||
|
153 | sign = "" | |
|
154 | if size > 0: | |
|
155 | m = int(math.log(size, base)) | |
|
156 | elif size < 0: | |
|
157 | sign = "-" | |
|
158 | size = -size | |
|
159 | m = int(math.log(size, base)) | |
|
160 | else: | |
|
161 | m = 0 | |
|
162 | if m > 8: | |
|
163 | m = 8 | |
|
164 | ||
|
165 | if m == 0: | |
|
166 | precision = '%.0f' | |
|
167 | else: | |
|
168 | precision = '%%.%df' % precision | |
|
169 | ||
|
170 | size = precision % (size / math.pow(base, m)) | |
|
171 | ||
|
172 | return '%s%s %s%s' % (sign, size.strip(), multiples[m], unit) | |
|
173 | ||
|
174 | ||
|
175 | def _check_memory_usage(worker): | |
|
176 | ||
|
177 | if not MAX_MEMORY_USAGE: | |
|
178 | return | |
|
179 | ||
|
180 | elapsed = time.time() - worker._last_memory_check_time | |
|
181 | if elapsed > MEMORY_USAGE_CHECK_INTERVAL: | |
|
182 | mem_usage = _get_process_rss() | |
|
183 | if mem_usage and mem_usage > MAX_MEMORY_USAGE: | |
|
184 | worker.log.info( | |
|
185 | "memory usage %s > %s, forcing gc", | |
|
186 | _format_data_size(mem_usage), _format_data_size(MAX_MEMORY_USAGE)) | |
|
187 | # Try to clean it up by forcing a full collection. | |
|
188 | gc.collect() | |
|
189 | mem_usage = _get_process_rss() | |
|
190 | if mem_usage > MEMORY_USAGE_RECOVERY_THRESHOLD: | |
|
191 | # Didn't clean up enough, we'll have to terminate. | |
|
192 | worker.log.warning( | |
|
193 | "memory usage %s > %s after gc, quitting", | |
|
194 | _format_data_size(mem_usage), _format_data_size(MAX_MEMORY_USAGE)) | |
|
195 | # This will cause worker to auto-restart itself | |
|
196 | worker.alive = False | |
|
197 | worker._last_memory_check_time = time.time() | |
|
198 | ||
|
199 | ||
|
96 | 200 | def worker_int(worker): |
|
97 | 201 | worker.log.info("[<%-10s>] worker received INT or QUIT signal", worker.pid) |
|
98 | 202 | |
|
99 | 203 | # get traceback info, on worker crash |
|
100 | 204 | id2name = dict([(th.ident, th.name) for th in threading.enumerate()]) |
|
101 | 205 | code = [] |
|
102 | 206 | for thread_id, stack in sys._current_frames().items(): |
|
103 | 207 | code.append( |
|
104 | 208 | "\n# Thread: %s(%d)" % (id2name.get(thread_id, ""), thread_id)) |
|
105 | 209 | for fname, lineno, name, line in traceback.extract_stack(stack): |
|
106 | 210 | code.append('File: "%s", line %d, in %s' % (fname, lineno, name)) |
|
107 | 211 | if line: |
|
108 | 212 | code.append(" %s" % (line.strip())) |
|
109 | 213 | worker.log.debug("\n".join(code)) |
|
110 | 214 | |
|
111 | 215 | |
|
112 | 216 | def worker_abort(worker): |
|
113 | 217 | worker.log.info("[<%-10s>] worker received SIGABRT signal", worker.pid) |
|
114 | 218 | |
|
115 | 219 | |
|
116 | 220 | def worker_exit(server, worker): |
|
117 | 221 | worker.log.info("[<%-10s>] worker exit", worker.pid) |
|
118 | 222 | |
|
119 | 223 | |
|
120 | 224 | def child_exit(server, worker): |
|
121 | 225 | worker.log.info("[<%-10s>] worker child exit", worker.pid) |
|
122 | 226 | |
|
123 | 227 | |
|
124 | 228 | def pre_request(worker, req): |
|
125 | 229 | worker.start_time = time.time() |
|
126 | 230 | worker.log.debug( |
|
127 | 231 | "GNCRN PRE WORKER [cnt:%s]: %s %s", worker.nr, req.method, req.path) |
|
128 | 232 | |
|
129 | 233 | |
|
130 | 234 | def post_request(worker, req, environ, resp): |
|
131 | 235 | total_time = time.time() - worker.start_time |
|
132 | 236 | worker.log.debug( |
|
133 | 237 | "GNCRN POST WORKER [cnt:%s]: %s %s resp: %s, Load Time: %.4fs", |
|
134 | 238 | worker.nr, req.method, req.path, resp.status_code, total_time) |
|
239 | _check_memory_usage(worker) | |
|
135 | 240 | |
|
136 | 241 | |
|
137 | 242 | class RhodeCodeLogger(Logger): |
|
138 | 243 | """ |
|
139 | 244 | Custom Logger that allows some customization that gunicorn doesn't allow |
|
140 | 245 | """ |
|
141 | 246 | |
|
142 | 247 | datefmt = r"%Y-%m-%d %H:%M:%S" |
|
143 | 248 | |
|
144 | 249 | def __init__(self, cfg): |
|
145 | 250 | Logger.__init__(self, cfg) |
|
146 | 251 | |
|
147 | 252 | def now(self): |
|
148 | 253 | """ return date in RhodeCode Log format """ |
|
149 | 254 | now = time.time() |
|
150 | 255 | msecs = int((now - long(now)) * 1000) |
|
151 | 256 | return time.strftime(self.datefmt, time.localtime(now)) + '.{0:03d}'.format(msecs) |
|
152 | 257 | |
|
153 | 258 | |
|
154 | 259 | logger_class = RhodeCodeLogger |
General Comments 0
You need to be logged in to leave comments.
Login now