Show More
@@ -1,96 +1,90 b'' | |||||
1 | """gunicorn config hooks""" |
|
1 | """ | |
|
2 | gunicorn config extension and hooks. Sets additional configuration that is | |||
|
3 | available post the .ini config. | |||
|
4 | ||||
|
5 | - workers = ${cpu_number} | |||
|
6 | - threads = 1 | |||
|
7 | - proc_name = ${gunicorn_proc_name} | |||
|
8 | - worker_class = sync | |||
|
9 | - worker_connections = 10 | |||
|
10 | - max_requests = 1000 | |||
|
11 | - max_requests_jitter = 30 | |||
|
12 | - timeout = 21600 | |||
|
13 | ||||
|
14 | """ | |||
2 |
|
15 | |||
3 | import multiprocessing |
|
16 | import multiprocessing | |
4 | import sys |
|
17 | import sys | |
5 | import threading |
|
18 | import threading | |
6 | import traceback |
|
19 | import traceback | |
7 |
|
20 | |||
8 |
|
21 | |||
9 |
# GLOBAL |
|
22 | # GLOBAL | |
10 | errorlog = '-' |
|
23 | errorlog = '-' | |
11 | accesslog = '-' |
|
24 | accesslog = '-' | |
12 | loglevel = 'debug' |
|
25 | loglevel = 'debug' | |
13 |
|
26 | |||
14 |
# SECURITY |
|
27 | # SECURITY | |
15 | limit_request_line = 4094 |
|
28 | limit_request_line = 4094 | |
16 | limit_request_fields = 100 |
|
29 | limit_request_fields = 100 | |
17 | limit_request_field_size = 8190 |
|
30 | limit_request_field_size = 8190 | |
18 |
|
31 | |||
19 |
# SERVER MECHANICS |
|
32 | # SERVER MECHANICS | |
20 |
# None == system temp dir |
|
33 | # None == system temp dir | |
21 | worker_tmp_dir = None |
|
34 | worker_tmp_dir = None | |
22 | tmp_upload_dir = None |
|
35 | tmp_upload_dir = None | |
23 | #proc_name = |
|
|||
24 |
|
||||
25 | # self adjust workers based on CPU # |
|
|||
26 | #workers = multiprocessing.cpu_count() * 2 + 1 |
|
|||
27 |
|
||||
28 | access_log_format = '[%(p)s] %(h)15s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s" request_time:%(L)s' |
|
|||
29 |
|
||||
30 | # For the gevent worker classes # |
|
|||
31 | # this limits the maximum number of simultaneous clients that # |
|
|||
32 | # a single process can handle. # |
|
|||
33 | #worker_connections = 10 |
|
|||
34 |
|
36 | |||
35 | # Max requests to handle by each worker before restarting it, # |
|
37 | # Custom log format | |
36 | # could prevent memory leaks # |
|
38 | access_log_format = ( | |
37 | #max_requests = 1000 |
|
39 | '%(t)s GNCRN %(p)-8s %(h)-15s rqt:%(L)s %(s)s %(b)s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"') | |
38 | #max_requests_jitter = 30 |
|
|||
39 |
|
||||
40 |
|
40 | |||
41 | # If a worker does not notify the master process in this # |
|
41 | # self adjust workers based on CPU count | |
42 | # number of seconds it is killed and a new worker is spawned # |
|
42 | # workers = multiprocessing.cpu_count() * 2 + 1 | |
43 | # to replace it. # |
|
|||
44 | #timeout = 3600 |
|
|||
45 |
|
||||
46 | access_log_format = ( |
|
|||
47 | '[%(p)-10s] %(h)s time:%(L)s %(l)s %(u)s ' |
|
|||
48 | '%(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"') |
|
|||
49 |
|
43 | |||
50 |
|
44 | |||
51 | def post_fork(server, worker): |
|
45 | def post_fork(server, worker): | |
52 |
server.log.info("[<% |
|
46 | server.log.info("[<%-10s>] WORKER spawned", worker.pid) | |
53 |
|
47 | |||
54 |
|
48 | |||
55 | def pre_fork(server, worker): |
|
49 | def pre_fork(server, worker): | |
56 | pass |
|
50 | pass | |
57 |
|
51 | |||
58 |
|
52 | |||
59 | def pre_exec(server): |
|
53 | def pre_exec(server): | |
60 | server.log.info("Forked child, re-executing.") |
|
54 | server.log.info("Forked child, re-executing.") | |
61 |
|
55 | |||
62 |
|
56 | |||
63 | def when_ready(server): |
|
57 | def when_ready(server): | |
64 | server.log.info("Server is ready. Spawning workers") |
|
58 | server.log.info("Server is ready. Spawning workers") | |
65 |
|
59 | |||
66 |
|
60 | |||
67 | def worker_int(worker): |
|
61 | def worker_int(worker): | |
68 | worker.log.info("[<%-10s>] worker received INT or QUIT signal", worker.pid) |
|
62 | worker.log.info("[<%-10s>] worker received INT or QUIT signal", worker.pid) | |
69 |
|
63 | |||
70 | # get traceback info |
|
64 | # get traceback info, on worker crash | |
71 | id2name = dict([(th.ident, th.name) for th in threading.enumerate()]) |
|
65 | id2name = dict([(th.ident, th.name) for th in threading.enumerate()]) | |
72 | code = [] |
|
66 | code = [] | |
73 | for thread_id, stack in sys._current_frames().items(): |
|
67 | for thread_id, stack in sys._current_frames().items(): | |
74 | code.append( |
|
68 | code.append( | |
75 | "\n# Thread: %s(%d)" % (id2name.get(thread_id, ""), thread_id)) |
|
69 | "\n# Thread: %s(%d)" % (id2name.get(thread_id, ""), thread_id)) | |
76 | for fname, lineno, name, line in traceback.extract_stack(stack): |
|
70 | for fname, lineno, name, line in traceback.extract_stack(stack): | |
77 | code.append('File: "%s", line %d, in %s' % (fname, lineno, name)) |
|
71 | code.append('File: "%s", line %d, in %s' % (fname, lineno, name)) | |
78 | if line: |
|
72 | if line: | |
79 | code.append(" %s" % (line.strip())) |
|
73 | code.append(" %s" % (line.strip())) | |
80 | worker.log.debug("\n".join(code)) |
|
74 | worker.log.debug("\n".join(code)) | |
81 |
|
75 | |||
82 |
|
76 | |||
83 | def worker_abort(worker): |
|
77 | def worker_abort(worker): | |
84 | worker.log.info("[<%-10s>] worker received SIGABRT signal", worker.pid) |
|
78 | worker.log.info("[<%-10s>] worker received SIGABRT signal", worker.pid) | |
85 |
|
79 | |||
86 |
|
80 | |||
87 | def pre_request(worker, req): |
|
81 | def pre_request(worker, req): | |
88 | return |
|
82 | return | |
89 | worker.log.debug("[<%-10s>] PRE WORKER: %s %s", |
|
83 | worker.log.debug("[<%-10s>] PRE WORKER: %s %s", | |
90 | worker.pid, req.method, req.path) |
|
84 | worker.pid, req.method, req.path) | |
91 |
|
85 | |||
92 |
|
86 | |||
93 | def post_request(worker, req, environ, resp): |
|
87 | def post_request(worker, req, environ, resp): | |
94 | return |
|
88 | return | |
95 | worker.log.debug("[<%-10s>] POST WORKER: %s %s resp: %s", worker.pid, |
|
89 | worker.log.debug("[<%-10s>] POST WORKER: %s %s resp: %s", worker.pid, | |
96 | req.method, req.path, resp.status_code) No newline at end of file |
|
90 | req.method, req.path, resp.status_code) |
General Comments 0
You need to be logged in to leave comments.
Login now