gunicorn_config.py
510 lines
| 16.4 KiB
| text/x-python
|
PythonLexer
/ configs / gunicorn_config.py
r1365 | """ | |||
r4176 | Gunicorn config extension and hooks. This config file adds some extra settings and memory management. | |||
Gunicorn configuration should be managed by .ini files entries of RhodeCode or VCSServer | ||||
r1365 | """ | |||
r1 | ||||
r4098 | import gc | |||
import os | ||||
import sys | ||||
r3875 | import math | |||
r2075 | import time | |||
r1 | import threading | |||
import traceback | ||||
r3875 | import random | |||
r4829 | import socket | |||
r5125 | import dataclasses | |||
r2075 | from gunicorn.glogging import Logger | |||
r1 | ||||
r3875 | def get_workers(): | |||
import multiprocessing | ||||
return multiprocessing.cpu_count() * 2 + 1 | ||||
r5125 | ||||
bind = "127.0.0.1:10020" | ||||
# Error logging output for gunicorn (-) is stdout | ||||
r1 | errorlog = '-' | |||
r5125 | ||||
# Access logging output for gunicorn (-) is stdout | ||||
r1 | accesslog = '-' | |||
r2451 | ||||
r1365 | # SERVER MECHANICS | |||
# None == system temp dir | ||||
r2542 | # worker_tmp_dir is recommended to be set to some tmpfs | |||
r1 | worker_tmp_dir = None | |||
tmp_upload_dir = None | ||||
r5125 | # use re-use port logic | |||
r4827 | #reuse_port = True | |||
r1365 | # Custom log format | |||
r4823 | #access_log_format = ( | |||
# '%(t)s %(p)s INFO [GNCRN] %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"') | ||||
r1 | ||||
r4819 | # loki format for easier parsing in grafana | |||
r4823 | access_log_format = ( | |||
'time="%(t)s" pid=%(p)s level="INFO" type="[GNCRN]" ip="%(h)-15s" rqt="%(L)s" response_code="%(s)s" response_bytes="%(b)-6s" uri="%(m)s:%(U)s %(q)s" user=":%(u)s" user_agent="%(a)s"') | ||||
r4819 | ||||
r5125 | ||||
# Sets the number of process workers. More workers means more concurrent connections | ||||
# RhodeCode can handle at the same time. Each additional worker also it increases | ||||
# memory usage as each has it's own set of caches. | ||||
# Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more | ||||
# than 8-10 unless for huge deployments .e.g 700-1000 users. | ||||
# `instance_id = *` must be set in the [app:main] section below (which is the default) | ||||
# when using more than 1 worker. | ||||
workers = 4 | ||||
# self adjust workers based on CPU count, to use maximum of CPU and not overquota the resources | ||||
r3875 | # workers = get_workers() | |||
r5125 | # Gunicorn access log level | |||
loglevel = 'info' | ||||
# Process name visible in process list | ||||
proc_name = 'rhodecode_enterprise' | ||||
# Type of worker class, one of `sync`, `gevent` | ||||
# currently `sync` is the only option allowed. | ||||
worker_class = 'gevent' | ||||
# The maximum number of simultaneous clients. Valid only for gevent | ||||
worker_connections = 10 | ||||
# Max number of requests that worker will handle before being gracefully restarted. | ||||
# Prevents memory leaks, jitter adds variability so not all workers are restarted at once. | ||||
max_requests = 2000 | ||||
max_requests_jitter = 30 | ||||
# The maximum number of pending connections. | ||||
# Exceeding this number results in the client getting an error when attempting to connect. | ||||
backlog = 64 | ||||
# Amount of time a worker can spend with handling a request before it | ||||
# gets killed and restarted. By default set to 21600 (6hrs) | ||||
# Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h) | ||||
timeout = 21600 | ||||
# The maximum size of HTTP request line in bytes. | ||||
# 0 for unlimited | ||||
limit_request_line = 0 | ||||
# Limit the number of HTTP headers fields in a request. | ||||
# By default this value is 100 and can't be larger than 32768. | ||||
limit_request_fields = 32768 | ||||
# Limit the allowed size of an HTTP request header field. | ||||
# Value is a positive number or 0. | ||||
# Setting it to 0 will allow unlimited header field sizes. | ||||
limit_request_field_size = 0 | ||||
# Timeout for graceful workers restart. | ||||
# After receiving a restart signal, workers have this much time to finish | ||||
# serving requests. Workers still alive after the timeout (starting from the | ||||
# receipt of the restart signal) are force killed. | ||||
# Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h) | ||||
graceful_timeout = 21600 | ||||
# The number of seconds to wait for requests on a Keep-Alive connection. | ||||
# Generally set in the 1-5 seconds range. | ||||
keepalive = 2 | ||||
# Maximum memory usage that each worker can use before it will receive a | ||||
# graceful restart signal 0 = memory monitoring is disabled | ||||
# Examples: 268435456 (256MB), 536870912 (512MB) | ||||
# 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB) | ||||
r5136 | # Dynamic formula 1024 * 1024 * 256 == 256MBs | |||
r5125 | memory_max_usage = 0 | |||
# How often in seconds to check for memory usage for each gunicorn worker | ||||
memory_usage_check_interval = 60 | ||||
# Threshold value for which we don't recycle worker if GarbageCollection | ||||
r5136 | # frees up enough resources. Before each restart, we try to run GC on worker | |||
r5125 | # in case we get enough free memory after that, restart will not happen. | |||
memory_usage_recovery_threshold = 0.8 | ||||
@dataclasses.dataclass | ||||
class MemoryCheckConfig: | ||||
max_usage: int | ||||
check_interval: int | ||||
recovery_threshold: float | ||||
r1 | ||||
r3875 | def _get_process_rss(pid=None): | |||
try: | ||||
import psutil | ||||
if pid: | ||||
proc = psutil.Process(pid) | ||||
else: | ||||
proc = psutil.Process() | ||||
return proc.memory_info().rss | ||||
except Exception: | ||||
return None | ||||
r4098 | def _get_config(ini_path): | |||
r4925 | import configparser | |||
r4098 | ||||
try: | ||||
r4167 | config = configparser.RawConfigParser() | |||
r4098 | config.read(ini_path) | |||
return config | ||||
except Exception: | ||||
return None | ||||
r5125 | def get_memory_usage_params(config=None): | |||
# memory spec defaults | ||||
_memory_max_usage = memory_max_usage | ||||
_memory_usage_check_interval = memory_usage_check_interval | ||||
_memory_usage_recovery_threshold = memory_usage_recovery_threshold | ||||
if config: | ||||
ini_path = os.path.abspath(config) | ||||
conf = _get_config(ini_path) | ||||
section = 'server:main' | ||||
if conf and conf.has_section(section): | ||||
if conf.has_option(section, 'memory_max_usage'): | ||||
_memory_max_usage = conf.getint(section, 'memory_max_usage') | ||||
if conf.has_option(section, 'memory_usage_check_interval'): | ||||
_memory_usage_check_interval = conf.getint(section, 'memory_usage_check_interval') | ||||
if conf.has_option(section, 'memory_usage_recovery_threshold'): | ||||
_memory_usage_recovery_threshold = conf.getfloat(section, 'memory_usage_recovery_threshold') | ||||
_memory_max_usage = int(os.environ.get('RC_GUNICORN_MEMORY_MAX_USAGE', '') | ||||
or _memory_max_usage) | ||||
_memory_usage_check_interval = int(os.environ.get('RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL', '') | ||||
or _memory_usage_check_interval) | ||||
_memory_usage_recovery_threshold = float(os.environ.get('RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD', '') | ||||
or _memory_usage_recovery_threshold) | ||||
return MemoryCheckConfig(_memory_max_usage, _memory_usage_check_interval, _memory_usage_recovery_threshold) | ||||
def _time_with_offset(check_interval): | ||||
return time.time() - random.randint(0, check_interval/2.0) | ||||
r1 | ||||
def pre_fork(server, worker): | ||||
pass | ||||
r3875 | def post_fork(server, worker): | |||
r4098 | ||||
r5125 | memory_conf = get_memory_usage_params() | |||
_memory_max_usage = memory_conf.max_usage | ||||
_memory_usage_check_interval = memory_conf.check_interval | ||||
_memory_usage_recovery_threshold = memory_conf.recovery_threshold | ||||
r4098 | ||||
r4823 | worker._memory_max_usage = int(os.environ.get('RC_GUNICORN_MEMORY_MAX_USAGE', '') | |||
or _memory_max_usage) | ||||
worker._memory_usage_check_interval = int(os.environ.get('RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL', '') | ||||
or _memory_usage_check_interval) | ||||
worker._memory_usage_recovery_threshold = float(os.environ.get('RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD', '') | ||||
or _memory_usage_recovery_threshold) | ||||
r4098 | ||||
r3875 | # register memory last check time, with some random offset so we don't recycle all | |||
# at once | ||||
r4098 | worker._last_memory_check_time = _time_with_offset(_memory_usage_check_interval) | |||
if _memory_max_usage: | ||||
r5125 | server.log.info("pid=[%-10s] WORKER spawned with max memory set at %s", worker.pid, | |||
r4098 | _format_data_size(_memory_max_usage)) | |||
else: | ||||
r5125 | server.log.info("pid=[%-10s] WORKER spawned", worker.pid) | |||
r3875 | ||||
r1 | def pre_exec(server): | |||
server.log.info("Forked child, re-executing.") | ||||
r2451 | def on_starting(server): | |||
r3875 | server_lbl = '{} {}'.format(server.proc_name, server.address) | |||
server.log.info("Server %s is starting.", server_lbl) | ||||
r5136 | server.log.info('Config:') | |||
server.log.info(f"\n{server.cfg}") | ||||
server.log.info(get_memory_usage_params()) | ||||
r2451 | ||||
r1 | def when_ready(server): | |||
r3875 | server.log.info("Server %s is ready. Spawning workers", server) | |||
r1 | ||||
r2451 | def on_reload(server): | |||
pass | ||||
r3875 | def _format_data_size(size, unit="B", precision=1, binary=True): | |||
"""Format a number using SI units (kilo, mega, etc.). | ||||
``size``: The number as a float or int. | ||||
``unit``: The unit name in plural form. Examples: "bytes", "B". | ||||
``precision``: How many digits to the right of the decimal point. Default | ||||
is 1. 0 suppresses the decimal point. | ||||
``binary``: If false, use base-10 decimal prefixes (kilo = K = 1000). | ||||
If true, use base-2 binary prefixes (kibi = Ki = 1024). | ||||
``full_name``: If false (default), use the prefix abbreviation ("k" or | ||||
"Ki"). If true, use the full prefix ("kilo" or "kibi"). If false, | ||||
use abbreviation ("k" or "Ki"). | ||||
""" | ||||
if not binary: | ||||
base = 1000 | ||||
multiples = ('', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') | ||||
else: | ||||
base = 1024 | ||||
multiples = ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi') | ||||
sign = "" | ||||
if size > 0: | ||||
m = int(math.log(size, base)) | ||||
elif size < 0: | ||||
sign = "-" | ||||
size = -size | ||||
m = int(math.log(size, base)) | ||||
else: | ||||
m = 0 | ||||
if m > 8: | ||||
m = 8 | ||||
if m == 0: | ||||
precision = '%.0f' | ||||
else: | ||||
precision = '%%.%df' % precision | ||||
size = precision % (size / math.pow(base, m)) | ||||
return '%s%s %s%s' % (sign, size.strip(), multiples[m], unit) | ||||
def _check_memory_usage(worker): | ||||
r5125 | _memory_max_usage = worker._memory_max_usage | |||
if not _memory_max_usage: | ||||
r4098 | return | |||
r3875 | ||||
r5125 | _memory_usage_check_interval = worker._memory_usage_check_interval | |||
_memory_usage_recovery_threshold = memory_max_usage * worker._memory_usage_recovery_threshold | ||||
r3875 | ||||
elapsed = time.time() - worker._last_memory_check_time | ||||
r5125 | if elapsed > _memory_usage_check_interval: | |||
r3875 | mem_usage = _get_process_rss() | |||
r5125 | if mem_usage and mem_usage > _memory_max_usage: | |||
r3875 | worker.log.info( | |||
"memory usage %s > %s, forcing gc", | ||||
r5125 | _format_data_size(mem_usage), _format_data_size(_memory_max_usage)) | |||
r3875 | # Try to clean it up by forcing a full collection. | |||
gc.collect() | ||||
mem_usage = _get_process_rss() | ||||
r5125 | if mem_usage > _memory_usage_recovery_threshold: | |||
r3875 | # Didn't clean up enough, we'll have to terminate. | |||
worker.log.warning( | ||||
"memory usage %s > %s after gc, quitting", | ||||
r5125 | _format_data_size(mem_usage), _format_data_size(_memory_max_usage)) | |||
r3875 | # This will cause worker to auto-restart itself | |||
worker.alive = False | ||||
worker._last_memory_check_time = time.time() | ||||
r1 | def worker_int(worker): | |||
r5125 | worker.log.info("pid=[%-10s] worker received INT or QUIT signal", worker.pid) | |||
r1 | ||||
r1365 | # get traceback info, on worker crash | |||
r5125 | def get_thread_id(t_id): | |||
id2name = dict([(th.ident, th.name) for th in threading.enumerate()]) | ||||
return id2name.get(t_id, "unknown_thread_id") | ||||
r1 | code = [] | |||
r5136 | for thread_id, stack in sys._current_frames().items(): # noqa | |||
r1 | code.append( | |||
r5125 | "\n# Thread: %s(%d)" % (get_thread_id(thread_id), thread_id)) | |||
r1 | for fname, lineno, name, line in traceback.extract_stack(stack): | |||
code.append('File: "%s", line %d, in %s' % (fname, lineno, name)) | ||||
if line: | ||||
code.append(" %s" % (line.strip())) | ||||
worker.log.debug("\n".join(code)) | ||||
def worker_abort(worker): | ||||
r5125 | worker.log.info("pid=[%-10s] worker received SIGABRT signal", worker.pid) | |||
r1 | ||||
r2451 | def worker_exit(server, worker): | |||
r5125 | worker.log.info("pid=[%-10s] worker exit", worker.pid) | |||
r2451 | ||||
def child_exit(server, worker): | ||||
r5125 | worker.log.info("pid=[%-10s] worker child exit", worker.pid) | |||
r2451 | ||||
r1 | def pre_request(worker, req): | |||
r2538 | worker.start_time = time.time() | |||
worker.log.debug( | ||||
r2542 | "GNCRN PRE WORKER [cnt:%s]: %s %s", worker.nr, req.method, req.path) | |||
r1 | ||||
def post_request(worker, req, environ, resp): | ||||
r2538 | total_time = time.time() - worker.start_time | |||
r4176 | # Gunicorn sometimes has problems with reading the status_code | |||
status_code = getattr(resp, 'status_code', '') | ||||
r2538 | worker.log.debug( | |||
r3853 | "GNCRN POST WORKER [cnt:%s]: %s %s resp: %s, Load Time: %.4fs", | |||
r4176 | worker.nr, req.method, req.path, status_code, total_time) | |||
r3875 | _check_memory_usage(worker) | |||
r2075 | ||||
r2451 | ||||
r4829 | def _filter_proxy(ip): | |||
""" | ||||
Passed in IP addresses in HEADERS can be in a special format of multiple | ||||
ips. Those comma separated IPs are passed from various proxies in the | ||||
chain of request processing. The left-most being the original client. | ||||
We only care about the first IP which came from the org. client. | ||||
:param ip: ip string from headers | ||||
""" | ||||
if ',' in ip: | ||||
_ips = ip.split(',') | ||||
_first_ip = _ips[0].strip() | ||||
return _first_ip | ||||
return ip | ||||
def _filter_port(ip): | ||||
""" | ||||
Removes a port from ip, there are 4 main cases to handle here. | ||||
- ipv4 eg. 127.0.0.1 | ||||
- ipv6 eg. ::1 | ||||
- ipv4+port eg. 127.0.0.1:8080 | ||||
- ipv6+port eg. [::1]:8080 | ||||
:param ip: | ||||
""" | ||||
def is_ipv6(ip_addr): | ||||
if hasattr(socket, 'inet_pton'): | ||||
try: | ||||
socket.inet_pton(socket.AF_INET6, ip_addr) | ||||
except socket.error: | ||||
return False | ||||
else: | ||||
return False | ||||
return True | ||||
if ':' not in ip: # must be ipv4 pure ip | ||||
return ip | ||||
if '[' in ip and ']' in ip: # ipv6 with port | ||||
return ip.split(']')[0][1:].lower() | ||||
# must be ipv6 or ipv4 with port | ||||
if is_ipv6(ip): | ||||
return ip | ||||
else: | ||||
ip, _port = ip.split(':')[:2] # means ipv4+port | ||||
return ip | ||||
def get_ip_addr(environ): | ||||
proxy_key = 'HTTP_X_REAL_IP' | ||||
proxy_key2 = 'HTTP_X_FORWARDED_FOR' | ||||
def_key = 'REMOTE_ADDR' | ||||
r5125 | ||||
def _filters(x): | ||||
return _filter_port(_filter_proxy(x)) | ||||
r4829 | ||||
ip = environ.get(proxy_key) | ||||
if ip: | ||||
return _filters(ip) | ||||
ip = environ.get(proxy_key2) | ||||
if ip: | ||||
return _filters(ip) | ||||
ip = environ.get(def_key, '0.0.0.0') | ||||
return _filters(ip) | ||||
r2075 | class RhodeCodeLogger(Logger): | |||
""" | ||||
Custom Logger that allows some customization that gunicorn doesn't allow | ||||
""" | ||||
datefmt = r"%Y-%m-%d %H:%M:%S" | ||||
def __init__(self, cfg): | ||||
Logger.__init__(self, cfg) | ||||
def now(self): | ||||
""" return date in RhodeCode Log format """ | ||||
now = time.time() | ||||
r4925 | msecs = int((now - int(now)) * 1000) | |||
r2075 | return time.strftime(self.datefmt, time.localtime(now)) + '.{0:03d}'.format(msecs) | |||
r4829 | def atoms(self, resp, req, environ, request_time): | |||
""" Gets atoms for log formatting. | ||||
""" | ||||
status = resp.status | ||||
if isinstance(status, str): | ||||
status = status.split(None, 1)[0] | ||||
atoms = { | ||||
'h': get_ip_addr(environ), | ||||
'l': '-', | ||||
'u': self._get_user(environ) or '-', | ||||
't': self.now(), | ||||
'r': "%s %s %s" % (environ['REQUEST_METHOD'], | ||||
environ['RAW_URI'], | ||||
environ["SERVER_PROTOCOL"]), | ||||
's': status, | ||||
'm': environ.get('REQUEST_METHOD'), | ||||
'U': environ.get('PATH_INFO'), | ||||
'q': environ.get('QUERY_STRING'), | ||||
'H': environ.get('SERVER_PROTOCOL'), | ||||
'b': getattr(resp, 'sent', None) is not None and str(resp.sent) or '-', | ||||
'B': getattr(resp, 'sent', None), | ||||
'f': environ.get('HTTP_REFERER', '-'), | ||||
'a': environ.get('HTTP_USER_AGENT', '-'), | ||||
'T': request_time.seconds, | ||||
'D': (request_time.seconds * 1000000) + request_time.microseconds, | ||||
'M': (request_time.seconds * 1000) + int(request_time.microseconds/1000), | ||||
'L': "%d.%06d" % (request_time.seconds, request_time.microseconds), | ||||
'p': "<%s>" % os.getpid() | ||||
} | ||||
# add request headers | ||||
if hasattr(req, 'headers'): | ||||
req_headers = req.headers | ||||
else: | ||||
req_headers = req | ||||
if hasattr(req_headers, "items"): | ||||
req_headers = req_headers.items() | ||||
atoms.update({"{%s}i" % k.lower(): v for k, v in req_headers}) | ||||
resp_headers = resp.headers | ||||
if hasattr(resp_headers, "items"): | ||||
resp_headers = resp_headers.items() | ||||
# add response headers | ||||
atoms.update({"{%s}o" % k.lower(): v for k, v in resp_headers}) | ||||
# add environ variables | ||||
environ_variables = environ.items() | ||||
atoms.update({"{%s}e" % k.lower(): v for k, v in environ_variables}) | ||||
return atoms | ||||
r2075 | ||||
r5125 | ||||
r2075 | logger_class = RhodeCodeLogger | |||