##// END OF EJS Templates
gunicorn: print config on startup
super-admin -
r1150:7050b122 default
parent child Browse files
Show More
@@ -1,112 +1,112 b''
1 # required for pushd to work..
1 # required for pushd to work..
2 #SHELL = /bin/bash
2 #SHELL = /bin/bash
3
3
4
4
5 # set by: PATH_TO_OUTDATED_PACKAGES=/some/path/outdated_packages.py
5 # set by: PATH_TO_OUTDATED_PACKAGES=/some/path/outdated_packages.py
6 OUTDATED_PACKAGES = ${PATH_TO_OUTDATED_PACKAGES}
6 OUTDATED_PACKAGES = ${PATH_TO_OUTDATED_PACKAGES}
7
7
8 .PHONY: clean
8 .PHONY: clean
9 ## Cleanup compiled and cache py files
9 ## Cleanup compiled and cache py files
10 clean:
10 clean:
11 make test-clean
11 make test-clean
12 find . -type f \( -iname '*.c' -o -iname '*.pyc' -o -iname '*.so' -o -iname '*.orig' \) -exec rm '{}' ';'
12 find . -type f \( -iname '*.c' -o -iname '*.pyc' -o -iname '*.so' -o -iname '*.orig' \) -exec rm '{}' ';'
13 find . -type d -name "build" -prune -exec rm -rf '{}' ';'
13 find . -type d -name "build" -prune -exec rm -rf '{}' ';'
14
14
15
15
16 .PHONY: test
16 .PHONY: test
17 ## run test-clean and tests
17 ## run test-clean and tests
18 test:
18 test:
19 make test-clean
19 make test-clean
20 make test-only
20 make test-only
21
21
22
22
23 .PHONY: test-clean
23 .PHONY: test-clean
24 ## run test-clean and tests
24 ## run test-clean and tests
25 test-clean:
25 test-clean:
26 rm -rf coverage.xml htmlcov junit.xml pylint.log result
26 rm -rf coverage.xml htmlcov junit.xml pylint.log result
27 find . -type d -name "__pycache__" -prune -exec rm -rf '{}' ';'
27 find . -type d -name "__pycache__" -prune -exec rm -rf '{}' ';'
28 find . -type f \( -iname '.coverage.*' \) -exec rm '{}' ';'
28 find . -type f \( -iname '.coverage.*' \) -exec rm '{}' ';'
29
29
30
30
31 .PHONY: test-only
31 .PHONY: test-only
32 ## Run tests only without cleanup
32 ## Run tests only without cleanup
33 test-only:
33 test-only:
34 PYTHONHASHSEED=random \
34 PYTHONHASHSEED=random \
35 py.test -x -vv -r xw -p no:sugar \
35 py.test -x -vv -r xw -p no:sugar \
36 --cov-report=term-missing --cov-report=html \
36 --cov-report=term-missing --cov-report=html \
37 --cov=vcsserver vcsserver
37 --cov=vcsserver vcsserver
38
38
39
39
40 .PHONY: pip-packages
40 .PHONY: pip-packages
41 ## Show outdated packages
41 ## Show outdated packages
42 pip-packages:
42 pip-packages:
43 python ${OUTDATED_PACKAGES}
43 python ${OUTDATED_PACKAGES}
44
44
45
45
46 .PHONY: build
46 .PHONY: build
47 ## Build sdist/egg
47 ## Build sdist/egg
48 build:
48 build:
49 python -m build
49 python -m build
50
50
51
51
52 .PHONY: dev-env
52 .PHONY: dev-env
53 ## make dev-env based on the requirements files and install develop of packages
53 ## make dev-env based on the requirements files and install develop of packages
54 dev-env:
54 dev-env:
55 pip install build virtualenv
55 pip install build virtualenv
56 pip wheel --wheel-dir=/home/rhodecode/.cache/pip/wheels -r requirements.txt -r requirements_test.txt -r requirements_debug.txt
56 pip wheel --wheel-dir=/home/rhodecode/.cache/pip/wheels -r requirements.txt -r requirements_test.txt -r requirements_debug.txt
57 pip install --no-index --find-links=/home/rhodecode/.cache/pip/wheels -r requirements.txt -r requirements_test.txt -r requirements_debug.txt
57 pip install --no-index --find-links=/home/rhodecode/.cache/pip/wheels -r requirements.txt -r requirements_test.txt -r requirements_debug.txt
58 pip install -e .
58 pip install -e .
59
59
60
60
61 .PHONY: dev-srv
61 .PHONY: dev-srv
62 ## run develop server instance
62 ## run develop server instance
63 dev-srv:
63 dev-srv:
64 pserve --reload .dev/dev.ini
64 pserve --reload .dev/dev.ini
65
65
66
66
67 .PHONY: dev-srv-g
67 .PHONY: dev-srv-g
68 ## run gunicorn multi process workers
68 ## run gunicorn multi process workers
69 dev-srv-g:
69 dev-srv-g:
70 gunicorn --workers=4 --paste .dev/dev.ini --bind=0.0.0.0:10010 --worker-class=sync --threads=1 --config=configs/gunicorn_config.py --timeout=120
70 gunicorn --workers=4 --paste .dev/dev.ini --bind=0.0.0.0:10010 --config=.dev/gunicorn_config.py
71
71
72 # Default command on calling make
72 # Default command on calling make
73 .DEFAULT_GOAL := show-help
73 .DEFAULT_GOAL := show-help
74
74
75 .PHONY: show-help
75 .PHONY: show-help
76 show-help:
76 show-help:
77 @echo "$$(tput bold)Available rules:$$(tput sgr0)"
77 @echo "$$(tput bold)Available rules:$$(tput sgr0)"
78 @echo
78 @echo
79 @sed -n -e "/^## / { \
79 @sed -n -e "/^## / { \
80 h; \
80 h; \
81 s/.*//; \
81 s/.*//; \
82 :doc" \
82 :doc" \
83 -e "H; \
83 -e "H; \
84 n; \
84 n; \
85 s/^## //; \
85 s/^## //; \
86 t doc" \
86 t doc" \
87 -e "s/:.*//; \
87 -e "s/:.*//; \
88 G; \
88 G; \
89 s/\\n## /---/; \
89 s/\\n## /---/; \
90 s/\\n/ /g; \
90 s/\\n/ /g; \
91 p; \
91 p; \
92 }" ${MAKEFILE_LIST} \
92 }" ${MAKEFILE_LIST} \
93 | LC_ALL='C' sort --ignore-case \
93 | LC_ALL='C' sort --ignore-case \
94 | awk -F '---' \
94 | awk -F '---' \
95 -v ncol=$$(tput cols) \
95 -v ncol=$$(tput cols) \
96 -v indent=19 \
96 -v indent=19 \
97 -v col_on="$$(tput setaf 6)" \
97 -v col_on="$$(tput setaf 6)" \
98 -v col_off="$$(tput sgr0)" \
98 -v col_off="$$(tput sgr0)" \
99 '{ \
99 '{ \
100 printf "%s%*s%s ", col_on, -indent, $$1, col_off; \
100 printf "%s%*s%s ", col_on, -indent, $$1, col_off; \
101 n = split($$2, words, " "); \
101 n = split($$2, words, " "); \
102 line_length = ncol - indent; \
102 line_length = ncol - indent; \
103 for (i = 1; i <= n; i++) { \
103 for (i = 1; i <= n; i++) { \
104 line_length -= length(words[i]) + 1; \
104 line_length -= length(words[i]) + 1; \
105 if (line_length <= 0) { \
105 if (line_length <= 0) { \
106 line_length = ncol - indent - length(words[i]) - 1; \
106 line_length = ncol - indent - length(words[i]) - 1; \
107 printf "\n%*s ", -indent, " "; \
107 printf "\n%*s ", -indent, " "; \
108 } \
108 } \
109 printf "%s ", words[i]; \
109 printf "%s ", words[i]; \
110 } \
110 } \
111 printf "\n"; \
111 printf "\n"; \
112 }'
112 }'
@@ -1,506 +1,510 b''
1 """
1 """
2 Gunicorn config extension and hooks. This config file adds some extra settings and memory management.
2 Gunicorn config extension and hooks. This config file adds some extra settings and memory management.
3 Gunicorn configuration should be managed by .ini files entries of RhodeCode or VCSServer
3 Gunicorn configuration should be managed by .ini files entries of RhodeCode or VCSServer
4 """
4 """
5
5
6 import gc
6 import gc
7 import os
7 import os
8 import sys
8 import sys
9 import math
9 import math
10 import time
10 import time
11 import threading
11 import threading
12 import traceback
12 import traceback
13 import random
13 import random
14 import socket
14 import socket
15 import dataclasses
15 import dataclasses
16 from gunicorn.glogging import Logger
16 from gunicorn.glogging import Logger
17
17
18
18
19 def get_workers():
19 def get_workers():
20 import multiprocessing
20 import multiprocessing
21 return multiprocessing.cpu_count() * 2 + 1
21 return multiprocessing.cpu_count() * 2 + 1
22
22
23
23
24 bind = "127.0.0.1:10010"
24 bind = "127.0.0.1:10010"
25
25
26
26
27 # Error logging output for gunicorn (-) is stdout
27 # Error logging output for gunicorn (-) is stdout
28 errorlog = '-'
28 errorlog = '-'
29
29
30 # Access logging output for gunicorn (-) is stdout
30 # Access logging output for gunicorn (-) is stdout
31 accesslog = '-'
31 accesslog = '-'
32
32
33
33
34 # SERVER MECHANICS
34 # SERVER MECHANICS
35 # None == system temp dir
35 # None == system temp dir
36 # worker_tmp_dir is recommended to be set to some tmpfs
36 # worker_tmp_dir is recommended to be set to some tmpfs
37 worker_tmp_dir = None
37 worker_tmp_dir = None
38 tmp_upload_dir = None
38 tmp_upload_dir = None
39
39
40 # use re-use port logic
40 # use re-use port logic
41 #reuse_port = True
41 #reuse_port = True
42
42
43 # Custom log format
43 # Custom log format
44 #access_log_format = (
44 #access_log_format = (
45 # '%(t)s %(p)s INFO [GNCRN] %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"')
45 # '%(t)s %(p)s INFO [GNCRN] %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"')
46
46
47 # loki format for easier parsing in grafana
47 # loki format for easier parsing in grafana
48 access_log_format = (
48 access_log_format = (
49 'time="%(t)s" pid=%(p)s level="INFO" type="[GNCRN]" ip="%(h)-15s" rqt="%(L)s" response_code="%(s)s" response_bytes="%(b)-6s" uri="%(m)s:%(U)s %(q)s" user=":%(u)s" user_agent="%(a)s"')
49 'time="%(t)s" pid=%(p)s level="INFO" type="[GNCRN]" ip="%(h)-15s" rqt="%(L)s" response_code="%(s)s" response_bytes="%(b)-6s" uri="%(m)s:%(U)s %(q)s" user=":%(u)s" user_agent="%(a)s"')
50
50
51
51
52 # Sets the number of process workers. More workers means more concurrent connections
52 # Sets the number of process workers. More workers means more concurrent connections
53 # RhodeCode can handle at the same time. Each additional worker also it increases
53 # RhodeCode can handle at the same time. Each additional worker also it increases
54 # memory usage as each has it's own set of caches.
54 # memory usage as each has it's own set of caches.
55 # Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
55 # Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
56 # than 8-10 unless for huge deployments .e.g 700-1000 users.
56 # than 8-10 unless for huge deployments .e.g 700-1000 users.
57 # `instance_id = *` must be set in the [app:main] section below (which is the default)
57 # `instance_id = *` must be set in the [app:main] section below (which is the default)
58 # when using more than 1 worker.
58 # when using more than 1 worker.
59 workers = 6
59 workers = 6
60
60
61 # self adjust workers based on CPU count, to use maximum of CPU and not overquota the resources
61 # self adjust workers based on CPU count, to use maximum of CPU and not overquota the resources
62 # workers = get_workers()
62 # workers = get_workers()
63
63
64 # Gunicorn access log level
64 # Gunicorn access log level
65 loglevel = 'info'
65 loglevel = 'info'
66
66
67 # Process name visible in process list
67 # Process name visible in process list
68 proc_name = 'rhodecode_vcsserver'
68 proc_name = 'rhodecode_vcsserver'
69
69
70 # Type of worker class, one of `sync`, `gevent`
70 # Type of worker class, one of `sync`, `gevent`
71 # currently `sync` is the only option allowed.
71 # currently `sync` is the only option allowed.
72 worker_class = 'sync'
72 worker_class = 'sync'
73
73
74 # The maximum number of simultaneous clients. Valid only for gevent
74 # The maximum number of simultaneous clients. Valid only for gevent
75 worker_connections = 10
75 worker_connections = 10
76
76
77 # Max number of requests that worker will handle before being gracefully restarted.
77 # Max number of requests that worker will handle before being gracefully restarted.
78 # Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
78 # Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
79 max_requests = 2000
79 max_requests = 2000
80 max_requests_jitter = 30
80 max_requests_jitter = 30
81
81
82 # The maximum number of pending connections.
82 # The maximum number of pending connections.
83 # Exceeding this number results in the client getting an error when attempting to connect.
83 # Exceeding this number results in the client getting an error when attempting to connect.
84 backlog = 64
84 backlog = 64
85
85
86 # Amount of time a worker can spend with handling a request before it
86 # Amount of time a worker can spend with handling a request before it
87 # gets killed and restarted. By default set to 21600 (6hrs)
87 # gets killed and restarted. By default set to 21600 (6hrs)
88 # Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
88 # Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
89 timeout = 21600
89 timeout = 21600
90
90
91 # The maximum size of HTTP request line in bytes.
91 # The maximum size of HTTP request line in bytes.
92 # 0 for unlimited
92 # 0 for unlimited
93 limit_request_line = 0
93 limit_request_line = 0
94
94
95 # Limit the number of HTTP headers fields in a request.
95 # Limit the number of HTTP headers fields in a request.
96 # By default this value is 100 and can't be larger than 32768.
96 # By default this value is 100 and can't be larger than 32768.
97 limit_request_fields = 32768
97 limit_request_fields = 32768
98
98
99 # Limit the allowed size of an HTTP request header field.
99 # Limit the allowed size of an HTTP request header field.
100 # Value is a positive number or 0.
100 # Value is a positive number or 0.
101 # Setting it to 0 will allow unlimited header field sizes.
101 # Setting it to 0 will allow unlimited header field sizes.
102 limit_request_field_size = 0
102 limit_request_field_size = 0
103
103
104 # Timeout for graceful workers restart.
104 # Timeout for graceful workers restart.
105 # After receiving a restart signal, workers have this much time to finish
105 # After receiving a restart signal, workers have this much time to finish
106 # serving requests. Workers still alive after the timeout (starting from the
106 # serving requests. Workers still alive after the timeout (starting from the
107 # receipt of the restart signal) are force killed.
107 # receipt of the restart signal) are force killed.
108 # Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
108 # Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
109 graceful_timeout = 21600
109 graceful_timeout = 21600
110
110
111 # The number of seconds to wait for requests on a Keep-Alive connection.
111 # The number of seconds to wait for requests on a Keep-Alive connection.
112 # Generally set in the 1-5 seconds range.
112 # Generally set in the 1-5 seconds range.
113 keepalive = 2
113 keepalive = 2
114
114
115 # Maximum memory usage that each worker can use before it will receive a
115 # Maximum memory usage that each worker can use before it will receive a
116 # graceful restart signal 0 = memory monitoring is disabled
116 # graceful restart signal 0 = memory monitoring is disabled
117 # Examples: 268435456 (256MB), 536870912 (512MB)
117 # Examples: 268435456 (256MB), 536870912 (512MB)
118 # 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
118 # 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
119 # Dynamic formula 1024 * 1024 * 256 == 256MBs
119 memory_max_usage = 0
120 memory_max_usage = 0
120
121
121 # How often in seconds to check for memory usage for each gunicorn worker
122 # How often in seconds to check for memory usage for each gunicorn worker
122 memory_usage_check_interval = 60
123 memory_usage_check_interval = 60
123
124
124 # Threshold value for which we don't recycle worker if GarbageCollection
125 # Threshold value for which we don't recycle worker if GarbageCollection
125 # frees up enough resources. Before each restart we try to run GC on worker
126 # frees up enough resources. Before each restart, we try to run GC on worker
126 # in case we get enough free memory after that, restart will not happen.
127 # in case we get enough free memory after that, restart will not happen.
127 memory_usage_recovery_threshold = 0.8
128 memory_usage_recovery_threshold = 0.8
128
129
129
130
130 @dataclasses.dataclass
131 @dataclasses.dataclass
131 class MemoryCheckConfig:
132 class MemoryCheckConfig:
132 max_usage: int
133 max_usage: int
133 check_interval: int
134 check_interval: int
134 recovery_threshold: float
135 recovery_threshold: float
135
136
136
137
137 def _get_process_rss(pid=None):
138 def _get_process_rss(pid=None):
138 try:
139 try:
139 import psutil
140 import psutil
140 if pid:
141 if pid:
141 proc = psutil.Process(pid)
142 proc = psutil.Process(pid)
142 else:
143 else:
143 proc = psutil.Process()
144 proc = psutil.Process()
144 return proc.memory_info().rss
145 return proc.memory_info().rss
145 except Exception:
146 except Exception:
146 return None
147 return None
147
148
148
149
149 def _get_config(ini_path):
150 def _get_config(ini_path):
150 import configparser
151 import configparser
151
152
152 try:
153 try:
153 config = configparser.RawConfigParser()
154 config = configparser.RawConfigParser()
154 config.read(ini_path)
155 config.read(ini_path)
155 return config
156 return config
156 except Exception:
157 except Exception:
157 return None
158 return None
158
159
159
160
160 def get_memory_usage_params(config=None):
161 def get_memory_usage_params(config=None):
161 # memory spec defaults
162 # memory spec defaults
162 _memory_max_usage = memory_max_usage
163 _memory_max_usage = memory_max_usage
163 _memory_usage_check_interval = memory_usage_check_interval
164 _memory_usage_check_interval = memory_usage_check_interval
164 _memory_usage_recovery_threshold = memory_usage_recovery_threshold
165 _memory_usage_recovery_threshold = memory_usage_recovery_threshold
165
166
166 if config:
167 if config:
167 ini_path = os.path.abspath(config)
168 ini_path = os.path.abspath(config)
168 conf = _get_config(ini_path)
169 conf = _get_config(ini_path)
169
170
170 section = 'server:main'
171 section = 'server:main'
171 if conf and conf.has_section(section):
172 if conf and conf.has_section(section):
172
173
173 if conf.has_option(section, 'memory_max_usage'):
174 if conf.has_option(section, 'memory_max_usage'):
174 _memory_max_usage = conf.getint(section, 'memory_max_usage')
175 _memory_max_usage = conf.getint(section, 'memory_max_usage')
175
176
176 if conf.has_option(section, 'memory_usage_check_interval'):
177 if conf.has_option(section, 'memory_usage_check_interval'):
177 _memory_usage_check_interval = conf.getint(section, 'memory_usage_check_interval')
178 _memory_usage_check_interval = conf.getint(section, 'memory_usage_check_interval')
178
179
179 if conf.has_option(section, 'memory_usage_recovery_threshold'):
180 if conf.has_option(section, 'memory_usage_recovery_threshold'):
180 _memory_usage_recovery_threshold = conf.getfloat(section, 'memory_usage_recovery_threshold')
181 _memory_usage_recovery_threshold = conf.getfloat(section, 'memory_usage_recovery_threshold')
181
182
182 _memory_max_usage = int(os.environ.get('RC_GUNICORN_MEMORY_MAX_USAGE', '')
183 _memory_max_usage = int(os.environ.get('RC_GUNICORN_MEMORY_MAX_USAGE', '')
183 or _memory_max_usage)
184 or _memory_max_usage)
184 _memory_usage_check_interval = int(os.environ.get('RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL', '')
185 _memory_usage_check_interval = int(os.environ.get('RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL', '')
185 or _memory_usage_check_interval)
186 or _memory_usage_check_interval)
186 _memory_usage_recovery_threshold = float(os.environ.get('RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD', '')
187 _memory_usage_recovery_threshold = float(os.environ.get('RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD', '')
187 or _memory_usage_recovery_threshold)
188 or _memory_usage_recovery_threshold)
188
189
189 return MemoryCheckConfig(_memory_max_usage, _memory_usage_check_interval, _memory_usage_recovery_threshold)
190 return MemoryCheckConfig(_memory_max_usage, _memory_usage_check_interval, _memory_usage_recovery_threshold)
190
191
191
192
192 def _time_with_offset(check_interval):
193 def _time_with_offset(check_interval):
193 return time.time() - random.randint(0, check_interval/2.0)
194 return time.time() - random.randint(0, check_interval/2.0)
194
195
195
196
196 def pre_fork(server, worker):
197 def pre_fork(server, worker):
197 pass
198 pass
198
199
199
200
200 def post_fork(server, worker):
201 def post_fork(server, worker):
201
202
202 memory_conf = get_memory_usage_params()
203 memory_conf = get_memory_usage_params()
203 _memory_max_usage = memory_conf.max_usage
204 _memory_max_usage = memory_conf.max_usage
204 _memory_usage_check_interval = memory_conf.check_interval
205 _memory_usage_check_interval = memory_conf.check_interval
205 _memory_usage_recovery_threshold = memory_conf.recovery_threshold
206 _memory_usage_recovery_threshold = memory_conf.recovery_threshold
206
207
207 worker._memory_max_usage = int(os.environ.get('RC_GUNICORN_MEMORY_MAX_USAGE', '')
208 worker._memory_max_usage = int(os.environ.get('RC_GUNICORN_MEMORY_MAX_USAGE', '')
208 or _memory_max_usage)
209 or _memory_max_usage)
209 worker._memory_usage_check_interval = int(os.environ.get('RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL', '')
210 worker._memory_usage_check_interval = int(os.environ.get('RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL', '')
210 or _memory_usage_check_interval)
211 or _memory_usage_check_interval)
211 worker._memory_usage_recovery_threshold = float(os.environ.get('RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD', '')
212 worker._memory_usage_recovery_threshold = float(os.environ.get('RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD', '')
212 or _memory_usage_recovery_threshold)
213 or _memory_usage_recovery_threshold)
213
214
214 # register memory last check time, with some random offset so we don't recycle all
215 # register memory last check time, with some random offset so we don't recycle all
215 # at once
216 # at once
216 worker._last_memory_check_time = _time_with_offset(_memory_usage_check_interval)
217 worker._last_memory_check_time = _time_with_offset(_memory_usage_check_interval)
217
218
218 if _memory_max_usage:
219 if _memory_max_usage:
219 server.log.info("pid=[%-10s] WORKER spawned with max memory set at %s", worker.pid,
220 server.log.info("pid=[%-10s] WORKER spawned with max memory set at %s", worker.pid,
220 _format_data_size(_memory_max_usage))
221 _format_data_size(_memory_max_usage))
221 else:
222 else:
222 server.log.info("pid=[%-10s] WORKER spawned", worker.pid)
223 server.log.info("pid=[%-10s] WORKER spawned", worker.pid)
223
224
224
225
225 def pre_exec(server):
226 def pre_exec(server):
226 server.log.info("Forked child, re-executing.")
227 server.log.info("Forked child, re-executing.")
227
228
228
229
229 def on_starting(server):
230 def on_starting(server):
230 server_lbl = '{} {}'.format(server.proc_name, server.address)
231 server_lbl = '{} {}'.format(server.proc_name, server.address)
231 server.log.info("Server %s is starting.", server_lbl)
232 server.log.info("Server %s is starting.", server_lbl)
233 server.log.info('Config:')
234 server.log.info(f"\n{server.cfg}")
235 server.log.info(get_memory_usage_params())
232
236
233
237
234 def when_ready(server):
238 def when_ready(server):
235 server.log.info("Server %s is ready. Spawning workers", server)
239 server.log.info("Server %s is ready. Spawning workers", server)
236
240
237
241
238 def on_reload(server):
242 def on_reload(server):
239 pass
243 pass
240
244
241
245
242 def _format_data_size(size, unit="B", precision=1, binary=True):
246 def _format_data_size(size, unit="B", precision=1, binary=True):
243 """Format a number using SI units (kilo, mega, etc.).
247 """Format a number using SI units (kilo, mega, etc.).
244
248
245 ``size``: The number as a float or int.
249 ``size``: The number as a float or int.
246
250
247 ``unit``: The unit name in plural form. Examples: "bytes", "B".
251 ``unit``: The unit name in plural form. Examples: "bytes", "B".
248
252
249 ``precision``: How many digits to the right of the decimal point. Default
253 ``precision``: How many digits to the right of the decimal point. Default
250 is 1. 0 suppresses the decimal point.
254 is 1. 0 suppresses the decimal point.
251
255
252 ``binary``: If false, use base-10 decimal prefixes (kilo = K = 1000).
256 ``binary``: If false, use base-10 decimal prefixes (kilo = K = 1000).
253 If true, use base-2 binary prefixes (kibi = Ki = 1024).
257 If true, use base-2 binary prefixes (kibi = Ki = 1024).
254
258
255 ``full_name``: If false (default), use the prefix abbreviation ("k" or
259 ``full_name``: If false (default), use the prefix abbreviation ("k" or
256 "Ki"). If true, use the full prefix ("kilo" or "kibi"). If false,
260 "Ki"). If true, use the full prefix ("kilo" or "kibi"). If false,
257 use abbreviation ("k" or "Ki").
261 use abbreviation ("k" or "Ki").
258
262
259 """
263 """
260
264
261 if not binary:
265 if not binary:
262 base = 1000
266 base = 1000
263 multiples = ('', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
267 multiples = ('', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
264 else:
268 else:
265 base = 1024
269 base = 1024
266 multiples = ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi')
270 multiples = ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi')
267
271
268 sign = ""
272 sign = ""
269 if size > 0:
273 if size > 0:
270 m = int(math.log(size, base))
274 m = int(math.log(size, base))
271 elif size < 0:
275 elif size < 0:
272 sign = "-"
276 sign = "-"
273 size = -size
277 size = -size
274 m = int(math.log(size, base))
278 m = int(math.log(size, base))
275 else:
279 else:
276 m = 0
280 m = 0
277 if m > 8:
281 if m > 8:
278 m = 8
282 m = 8
279
283
280 if m == 0:
284 if m == 0:
281 precision = '%.0f'
285 precision = '%.0f'
282 else:
286 else:
283 precision = '%%.%df' % precision
287 precision = '%%.%df' % precision
284
288
285 size = precision % (size / math.pow(base, m))
289 size = precision % (size / math.pow(base, m))
286
290
287 return '%s%s %s%s' % (sign, size.strip(), multiples[m], unit)
291 return '%s%s %s%s' % (sign, size.strip(), multiples[m], unit)
288
292
289
293
290 def _check_memory_usage(worker):
294 def _check_memory_usage(worker):
291 _memory_max_usage = worker._memory_max_usage
295 _memory_max_usage = worker._memory_max_usage
292 if not _memory_max_usage:
296 if not _memory_max_usage:
293 return
297 return
294
298
295 _memory_usage_check_interval = worker._memory_usage_check_interval
299 _memory_usage_check_interval = worker._memory_usage_check_interval
296 _memory_usage_recovery_threshold = memory_max_usage * worker._memory_usage_recovery_threshold
300 _memory_usage_recovery_threshold = memory_max_usage * worker._memory_usage_recovery_threshold
297
301
298 elapsed = time.time() - worker._last_memory_check_time
302 elapsed = time.time() - worker._last_memory_check_time
299 if elapsed > _memory_usage_check_interval:
303 if elapsed > _memory_usage_check_interval:
300 mem_usage = _get_process_rss()
304 mem_usage = _get_process_rss()
301 if mem_usage and mem_usage > _memory_max_usage:
305 if mem_usage and mem_usage > _memory_max_usage:
302 worker.log.info(
306 worker.log.info(
303 "memory usage %s > %s, forcing gc",
307 "memory usage %s > %s, forcing gc",
304 _format_data_size(mem_usage), _format_data_size(_memory_max_usage))
308 _format_data_size(mem_usage), _format_data_size(_memory_max_usage))
305 # Try to clean it up by forcing a full collection.
309 # Try to clean it up by forcing a full collection.
306 gc.collect()
310 gc.collect()
307 mem_usage = _get_process_rss()
311 mem_usage = _get_process_rss()
308 if mem_usage > _memory_usage_recovery_threshold:
312 if mem_usage > _memory_usage_recovery_threshold:
309 # Didn't clean up enough, we'll have to terminate.
313 # Didn't clean up enough, we'll have to terminate.
310 worker.log.warning(
314 worker.log.warning(
311 "memory usage %s > %s after gc, quitting",
315 "memory usage %s > %s after gc, quitting",
312 _format_data_size(mem_usage), _format_data_size(_memory_max_usage))
316 _format_data_size(mem_usage), _format_data_size(_memory_max_usage))
313 # This will cause worker to auto-restart itself
317 # This will cause worker to auto-restart itself
314 worker.alive = False
318 worker.alive = False
315 worker._last_memory_check_time = time.time()
319 worker._last_memory_check_time = time.time()
316
320
317
321
318 def worker_int(worker):
322 def worker_int(worker):
319 worker.log.info("pid=[%-10s] worker received INT or QUIT signal", worker.pid)
323 worker.log.info("pid=[%-10s] worker received INT or QUIT signal", worker.pid)
320
324
321 # get traceback info, on worker crash
325 # get traceback info, on worker crash
322 def get_thread_id(t_id):
326 def get_thread_id(t_id):
323 id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
327 id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
324 return id2name.get(t_id, "unknown_thread_id")
328 return id2name.get(t_id, "unknown_thread_id")
325
329
326 code = []
330 code = []
327 for thread_id, stack in sys._current_frames().items(): # noqa
331 for thread_id, stack in sys._current_frames().items(): # noqa
328 code.append(
332 code.append(
329 "\n# Thread: %s(%d)" % (get_thread_id(thread_id), thread_id))
333 "\n# Thread: %s(%d)" % (get_thread_id(thread_id), thread_id))
330 for fname, lineno, name, line in traceback.extract_stack(stack):
334 for fname, lineno, name, line in traceback.extract_stack(stack):
331 code.append('File: "%s", line %d, in %s' % (fname, lineno, name))
335 code.append('File: "%s", line %d, in %s' % (fname, lineno, name))
332 if line:
336 if line:
333 code.append(" %s" % (line.strip()))
337 code.append(" %s" % (line.strip()))
334 worker.log.debug("\n".join(code))
338 worker.log.debug("\n".join(code))
335
339
336
340
337 def worker_abort(worker):
341 def worker_abort(worker):
338 worker.log.info("pid=[%-10s] worker received SIGABRT signal", worker.pid)
342 worker.log.info("pid=[%-10s] worker received SIGABRT signal", worker.pid)
339
343
340
344
341 def worker_exit(server, worker):
345 def worker_exit(server, worker):
342 worker.log.info("pid=[%-10s] worker exit", worker.pid)
346 worker.log.info("pid=[%-10s] worker exit", worker.pid)
343
347
344
348
345 def child_exit(server, worker):
349 def child_exit(server, worker):
346 worker.log.info("pid=[%-10s] worker child exit", worker.pid)
350 worker.log.info("pid=[%-10s] worker child exit", worker.pid)
347
351
348
352
349 def pre_request(worker, req):
353 def pre_request(worker, req):
350 worker.start_time = time.time()
354 worker.start_time = time.time()
351 worker.log.debug(
355 worker.log.debug(
352 "GNCRN PRE WORKER [cnt:%s]: %s %s", worker.nr, req.method, req.path)
356 "GNCRN PRE WORKER [cnt:%s]: %s %s", worker.nr, req.method, req.path)
353
357
354
358
355 def post_request(worker, req, environ, resp):
359 def post_request(worker, req, environ, resp):
356 total_time = time.time() - worker.start_time
360 total_time = time.time() - worker.start_time
357 # Gunicorn sometimes has problems with reading the status_code
361 # Gunicorn sometimes has problems with reading the status_code
358 status_code = getattr(resp, 'status_code', '')
362 status_code = getattr(resp, 'status_code', '')
359 worker.log.debug(
363 worker.log.debug(
360 "GNCRN POST WORKER [cnt:%s]: %s %s resp: %s, Load Time: %.4fs",
364 "GNCRN POST WORKER [cnt:%s]: %s %s resp: %s, Load Time: %.4fs",
361 worker.nr, req.method, req.path, status_code, total_time)
365 worker.nr, req.method, req.path, status_code, total_time)
362 _check_memory_usage(worker)
366 _check_memory_usage(worker)
363
367
364
368
365 def _filter_proxy(ip):
369 def _filter_proxy(ip):
366 """
370 """
367 Passed in IP addresses in HEADERS can be in a special format of multiple
371 Passed in IP addresses in HEADERS can be in a special format of multiple
368 ips. Those comma separated IPs are passed from various proxies in the
372 ips. Those comma separated IPs are passed from various proxies in the
369 chain of request processing. The left-most being the original client.
373 chain of request processing. The left-most being the original client.
370 We only care about the first IP which came from the org. client.
374 We only care about the first IP which came from the org. client.
371
375
372 :param ip: ip string from headers
376 :param ip: ip string from headers
373 """
377 """
374 if ',' in ip:
378 if ',' in ip:
375 _ips = ip.split(',')
379 _ips = ip.split(',')
376 _first_ip = _ips[0].strip()
380 _first_ip = _ips[0].strip()
377 return _first_ip
381 return _first_ip
378 return ip
382 return ip
379
383
380
384
381 def _filter_port(ip):
385 def _filter_port(ip):
382 """
386 """
383 Removes a port from ip, there are 4 main cases to handle here.
387 Removes a port from ip, there are 4 main cases to handle here.
384 - ipv4 eg. 127.0.0.1
388 - ipv4 eg. 127.0.0.1
385 - ipv6 eg. ::1
389 - ipv6 eg. ::1
386 - ipv4+port eg. 127.0.0.1:8080
390 - ipv4+port eg. 127.0.0.1:8080
387 - ipv6+port eg. [::1]:8080
391 - ipv6+port eg. [::1]:8080
388
392
389 :param ip:
393 :param ip:
390 """
394 """
391 def is_ipv6(ip_addr):
395 def is_ipv6(ip_addr):
392 if hasattr(socket, 'inet_pton'):
396 if hasattr(socket, 'inet_pton'):
393 try:
397 try:
394 socket.inet_pton(socket.AF_INET6, ip_addr)
398 socket.inet_pton(socket.AF_INET6, ip_addr)
395 except socket.error:
399 except socket.error:
396 return False
400 return False
397 else:
401 else:
398 return False
402 return False
399 return True
403 return True
400
404
401 if ':' not in ip: # must be ipv4 pure ip
405 if ':' not in ip: # must be ipv4 pure ip
402 return ip
406 return ip
403
407
404 if '[' in ip and ']' in ip: # ipv6 with port
408 if '[' in ip and ']' in ip: # ipv6 with port
405 return ip.split(']')[0][1:].lower()
409 return ip.split(']')[0][1:].lower()
406
410
407 # must be ipv6 or ipv4 with port
411 # must be ipv6 or ipv4 with port
408 if is_ipv6(ip):
412 if is_ipv6(ip):
409 return ip
413 return ip
410 else:
414 else:
411 ip, _port = ip.split(':')[:2] # means ipv4+port
415 ip, _port = ip.split(':')[:2] # means ipv4+port
412 return ip
416 return ip
413
417
414
418
415 def get_ip_addr(environ):
419 def get_ip_addr(environ):
416 proxy_key = 'HTTP_X_REAL_IP'
420 proxy_key = 'HTTP_X_REAL_IP'
417 proxy_key2 = 'HTTP_X_FORWARDED_FOR'
421 proxy_key2 = 'HTTP_X_FORWARDED_FOR'
418 def_key = 'REMOTE_ADDR'
422 def_key = 'REMOTE_ADDR'
419
423
420 def _filters(x):
424 def _filters(x):
421 return _filter_port(_filter_proxy(x))
425 return _filter_port(_filter_proxy(x))
422
426
423 ip = environ.get(proxy_key)
427 ip = environ.get(proxy_key)
424 if ip:
428 if ip:
425 return _filters(ip)
429 return _filters(ip)
426
430
427 ip = environ.get(proxy_key2)
431 ip = environ.get(proxy_key2)
428 if ip:
432 if ip:
429 return _filters(ip)
433 return _filters(ip)
430
434
431 ip = environ.get(def_key, '0.0.0.0')
435 ip = environ.get(def_key, '0.0.0.0')
432 return _filters(ip)
436 return _filters(ip)
433
437
434
438
435 class RhodeCodeLogger(Logger):
439 class RhodeCodeLogger(Logger):
436 """
440 """
437 Custom Logger that allows some customization that gunicorn doesn't allow
441 Custom Logger that allows some customization that gunicorn doesn't allow
438 """
442 """
439
443
440 datefmt = r"%Y-%m-%d %H:%M:%S"
444 datefmt = r"%Y-%m-%d %H:%M:%S"
441
445
442 def __init__(self, cfg):
446 def __init__(self, cfg):
443 Logger.__init__(self, cfg)
447 Logger.__init__(self, cfg)
444
448
445 def now(self):
449 def now(self):
446 """ return date in RhodeCode Log format """
450 """ return date in RhodeCode Log format """
447 now = time.time()
451 now = time.time()
448 msecs = int((now - int(now)) * 1000)
452 msecs = int((now - int(now)) * 1000)
449 return time.strftime(self.datefmt, time.localtime(now)) + '.{0:03d}'.format(msecs)
453 return time.strftime(self.datefmt, time.localtime(now)) + '.{0:03d}'.format(msecs)
450
454
451 def atoms(self, resp, req, environ, request_time):
455 def atoms(self, resp, req, environ, request_time):
452 """ Gets atoms for log formatting.
456 """ Gets atoms for log formatting.
453 """
457 """
454 status = resp.status
458 status = resp.status
455 if isinstance(status, str):
459 if isinstance(status, str):
456 status = status.split(None, 1)[0]
460 status = status.split(None, 1)[0]
457 atoms = {
461 atoms = {
458 'h': get_ip_addr(environ),
462 'h': get_ip_addr(environ),
459 'l': '-',
463 'l': '-',
460 'u': self._get_user(environ) or '-',
464 'u': self._get_user(environ) or '-',
461 't': self.now(),
465 't': self.now(),
462 'r': "%s %s %s" % (environ['REQUEST_METHOD'],
466 'r': "%s %s %s" % (environ['REQUEST_METHOD'],
463 environ['RAW_URI'],
467 environ['RAW_URI'],
464 environ["SERVER_PROTOCOL"]),
468 environ["SERVER_PROTOCOL"]),
465 's': status,
469 's': status,
466 'm': environ.get('REQUEST_METHOD'),
470 'm': environ.get('REQUEST_METHOD'),
467 'U': environ.get('PATH_INFO'),
471 'U': environ.get('PATH_INFO'),
468 'q': environ.get('QUERY_STRING'),
472 'q': environ.get('QUERY_STRING'),
469 'H': environ.get('SERVER_PROTOCOL'),
473 'H': environ.get('SERVER_PROTOCOL'),
470 'b': getattr(resp, 'sent', None) is not None and str(resp.sent) or '-',
474 'b': getattr(resp, 'sent', None) is not None and str(resp.sent) or '-',
471 'B': getattr(resp, 'sent', None),
475 'B': getattr(resp, 'sent', None),
472 'f': environ.get('HTTP_REFERER', '-'),
476 'f': environ.get('HTTP_REFERER', '-'),
473 'a': environ.get('HTTP_USER_AGENT', '-'),
477 'a': environ.get('HTTP_USER_AGENT', '-'),
474 'T': request_time.seconds,
478 'T': request_time.seconds,
475 'D': (request_time.seconds * 1000000) + request_time.microseconds,
479 'D': (request_time.seconds * 1000000) + request_time.microseconds,
476 'M': (request_time.seconds * 1000) + int(request_time.microseconds/1000),
480 'M': (request_time.seconds * 1000) + int(request_time.microseconds/1000),
477 'L': "%d.%06d" % (request_time.seconds, request_time.microseconds),
481 'L': "%d.%06d" % (request_time.seconds, request_time.microseconds),
478 'p': "<%s>" % os.getpid()
482 'p': "<%s>" % os.getpid()
479 }
483 }
480
484
481 # add request headers
485 # add request headers
482 if hasattr(req, 'headers'):
486 if hasattr(req, 'headers'):
483 req_headers = req.headers
487 req_headers = req.headers
484 else:
488 else:
485 req_headers = req
489 req_headers = req
486
490
487 if hasattr(req_headers, "items"):
491 if hasattr(req_headers, "items"):
488 req_headers = req_headers.items()
492 req_headers = req_headers.items()
489
493
490 atoms.update({"{%s}i" % k.lower(): v for k, v in req_headers})
494 atoms.update({"{%s}i" % k.lower(): v for k, v in req_headers})
491
495
492 resp_headers = resp.headers
496 resp_headers = resp.headers
493 if hasattr(resp_headers, "items"):
497 if hasattr(resp_headers, "items"):
494 resp_headers = resp_headers.items()
498 resp_headers = resp_headers.items()
495
499
496 # add response headers
500 # add response headers
497 atoms.update({"{%s}o" % k.lower(): v for k, v in resp_headers})
501 atoms.update({"{%s}o" % k.lower(): v for k, v in resp_headers})
498
502
499 # add environ variables
503 # add environ variables
500 environ_variables = environ.items()
504 environ_variables = environ.items()
501 atoms.update({"{%s}e" % k.lower(): v for k, v in environ_variables})
505 atoms.update({"{%s}e" % k.lower(): v for k, v in environ_variables})
502
506
503 return atoms
507 return atoms
504
508
505
509
506 logger_class = RhodeCodeLogger
510 logger_class = RhodeCodeLogger
General Comments 0
You need to be logged in to leave comments. Login now