##// END OF EJS Templates
artifacts: expose a special auth-token based artifacts download urls....
artifacts: expose a special auth-token based artifacts download urls. This will allow sharing download to external locations used new generated artifact download tokens. This feature allows also serving downloads using secret urls with all the fancy logic of our auth tokens.

File last commit:

r3875:c115b183 default
r4003:09f31efc default
Show More
gunicorn_config.py
259 lines | 7.2 KiB | text/x-python | PythonLexer
/ configs / gunicorn_config.py
config: updated example gunicorn configuration.
r1365 """
gunicorn config extension and hooks. Sets additional configuration that is
available post the .ini config.
- workers = ${cpu_number}
- threads = 1
- proc_name = ${gunicorn_proc_name}
- worker_class = sync
- worker_connections = 10
- max_requests = 1000
- max_requests_jitter = 30
- timeout = 21600
"""
project: added all source files and assets
r1
gunicorn: updated config with memory usage monitoring
r3875 import math
import gc
project: added all source files and assets
r1 import sys
gunicorn: allow custom logger to set consistent formatting of requests for gunicorn with rhodecode logging.
r2075 import time
project: added all source files and assets
r1 import threading
import traceback
gunicorn: updated config with memory usage monitoring
r3875 import random
gunicorn: allow custom logger to set consistent formatting of requests for gunicorn with rhodecode logging.
r2075 from gunicorn.glogging import Logger
project: added all source files and assets
r1
gunicorn: updated config with memory usage monitoring
r3875 def get_workers():
import multiprocessing
return multiprocessing.cpu_count() * 2 + 1
config: updated example gunicorn configuration.
r1365 # GLOBAL
project: added all source files and assets
r1 errorlog = '-'
accesslog = '-'
gunicorn: updated config with memory usage monitoring
r3875 loglevel = 'info'
project: added all source files and assets
r1
config: updated example gunicorn configuration.
r1365 # SECURITY
gunicorn: updated with expanded gunicorn_conf.py
r2451
# The maximum size of HTTP request line in bytes.
config: updated header limits on gunicorn
r3527 # 0 for unlimited
limit_request_line = 0
gunicorn: updated with expanded gunicorn_conf.py
r2451
# Limit the number of HTTP headers fields in a request.
backport-stable: config: fixed special character in gunicorn config.
r3856 # By default this value is 100 and can't be larger than 32768.
gunicorn: updated config with memory usage monitoring
r3875 limit_request_fields = 32768
gunicorn: updated with expanded gunicorn_conf.py
r2451
# Limit the allowed size of an HTTP request header field.
# Value is a positive number or 0.
# Setting it to 0 will allow unlimited header field sizes.
limit_request_field_size = 0
# Timeout for graceful workers restart.
# After receiving a restart signal, workers have this much time to finish
# serving requests. Workers still alive after the timeout (starting from the
# receipt of the restart signal) are force killed.
gunicorn: updated config with memory usage monitoring
r3875 graceful_timeout = 60 * 60
gunicorn: updated with expanded gunicorn_conf.py
r2451
# The number of seconds to wait for requests on a Keep-Alive connection.
# Generally set in the 1-5 seconds range.
keepalive = 2
config: updated example gunicorn configuration.
r1365 # SERVER MECHANICS
# None == system temp dir
gunicorn: update config to present consistent pre and post logs.
r2542 # worker_tmp_dir is recommended to be set to some tmpfs
project: added all source files and assets
r1 worker_tmp_dir = None
tmp_upload_dir = None
config: updated example gunicorn configuration.
r1365 # Custom log format
access_log_format = (
gunicorn: updated config with memory usage monitoring
r3875 '%(t)s %(p)s INFO [GNCRN] %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"')
project: added all source files and assets
r1
config: updated example gunicorn configuration.
r1365 # self adjust workers based on CPU count
gunicorn: updated config with memory usage monitoring
r3875 # workers = get_workers()
# n * 1024 * 0124 == n MBs, 0 = memory monitoring is disabled
MAX_MEMORY_USAGE = 0 * 1024 * 1024
# How often in seconds to check for memory usage
MEMORY_USAGE_CHECK_INTERVAL = 30
# If a gc brings us back below this threshold, we can avoid termination.
MEMORY_USAGE_RECOVERY_THRESHOLD = MAX_MEMORY_USAGE * 0.8
project: added all source files and assets
r1
gunicorn: updated config with memory usage monitoring
r3875 def _get_process_rss(pid=None):
try:
import psutil
if pid:
proc = psutil.Process(pid)
else:
proc = psutil.Process()
return proc.memory_info().rss
except Exception:
return None
def _time_with_offset():
return time.time() - random.randint(0, MEMORY_USAGE_CHECK_INTERVAL/2.0)
project: added all source files and assets
r1
def pre_fork(server, worker):
pass
gunicorn: updated config with memory usage monitoring
r3875 def post_fork(server, worker):
server.log.info("<%s> WORKER spawned", worker.pid)
# register memory last check time, with some random offset so we don't recycle all
# at once
worker._last_memory_check_time = _time_with_offset()
project: added all source files and assets
r1 def pre_exec(server):
server.log.info("Forked child, re-executing.")
gunicorn: updated with expanded gunicorn_conf.py
r2451 def on_starting(server):
gunicorn: updated config with memory usage monitoring
r3875 server_lbl = '{} {}'.format(server.proc_name, server.address)
server.log.info("Server %s is starting.", server_lbl)
gunicorn: updated with expanded gunicorn_conf.py
r2451
project: added all source files and assets
r1 def when_ready(server):
gunicorn: updated config with memory usage monitoring
r3875 server.log.info("Server %s is ready. Spawning workers", server)
project: added all source files and assets
r1
gunicorn: updated with expanded gunicorn_conf.py
r2451 def on_reload(server):
pass
gunicorn: updated config with memory usage monitoring
r3875 def _format_data_size(size, unit="B", precision=1, binary=True):
"""Format a number using SI units (kilo, mega, etc.).
``size``: The number as a float or int.
``unit``: The unit name in plural form. Examples: "bytes", "B".
``precision``: How many digits to the right of the decimal point. Default
is 1. 0 suppresses the decimal point.
``binary``: If false, use base-10 decimal prefixes (kilo = K = 1000).
If true, use base-2 binary prefixes (kibi = Ki = 1024).
``full_name``: If false (default), use the prefix abbreviation ("k" or
"Ki"). If true, use the full prefix ("kilo" or "kibi"). If false,
use abbreviation ("k" or "Ki").
"""
if not binary:
base = 1000
multiples = ('', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
else:
base = 1024
multiples = ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi')
sign = ""
if size > 0:
m = int(math.log(size, base))
elif size < 0:
sign = "-"
size = -size
m = int(math.log(size, base))
else:
m = 0
if m > 8:
m = 8
if m == 0:
precision = '%.0f'
else:
precision = '%%.%df' % precision
size = precision % (size / math.pow(base, m))
return '%s%s %s%s' % (sign, size.strip(), multiples[m], unit)
def _check_memory_usage(worker):
if not MAX_MEMORY_USAGE:
return
elapsed = time.time() - worker._last_memory_check_time
if elapsed > MEMORY_USAGE_CHECK_INTERVAL:
mem_usage = _get_process_rss()
if mem_usage and mem_usage > MAX_MEMORY_USAGE:
worker.log.info(
"memory usage %s > %s, forcing gc",
_format_data_size(mem_usage), _format_data_size(MAX_MEMORY_USAGE))
# Try to clean it up by forcing a full collection.
gc.collect()
mem_usage = _get_process_rss()
if mem_usage > MEMORY_USAGE_RECOVERY_THRESHOLD:
# Didn't clean up enough, we'll have to terminate.
worker.log.warning(
"memory usage %s > %s after gc, quitting",
_format_data_size(mem_usage), _format_data_size(MAX_MEMORY_USAGE))
# This will cause worker to auto-restart itself
worker.alive = False
worker._last_memory_check_time = time.time()
project: added all source files and assets
r1 def worker_int(worker):
worker.log.info("[<%-10s>] worker received INT or QUIT signal", worker.pid)
config: updated example gunicorn configuration.
r1365 # get traceback info, on worker crash
project: added all source files and assets
r1 id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
code = []
for thread_id, stack in sys._current_frames().items():
code.append(
"\n# Thread: %s(%d)" % (id2name.get(thread_id, ""), thread_id))
for fname, lineno, name, line in traceback.extract_stack(stack):
code.append('File: "%s", line %d, in %s' % (fname, lineno, name))
if line:
code.append(" %s" % (line.strip()))
worker.log.debug("\n".join(code))
def worker_abort(worker):
worker.log.info("[<%-10s>] worker received SIGABRT signal", worker.pid)
gunicorn: updated with expanded gunicorn_conf.py
r2451 def worker_exit(server, worker):
worker.log.info("[<%-10s>] worker exit", worker.pid)
def child_exit(server, worker):
worker.log.info("[<%-10s>] worker child exit", worker.pid)
project: added all source files and assets
r1 def pre_request(worker, req):
gunicorn: show total gunicorn execution time, and worker counter base on the gunicorn_conf.py
r2538 worker.start_time = time.time()
worker.log.debug(
gunicorn: update config to present consistent pre and post logs.
r2542 "GNCRN PRE WORKER [cnt:%s]: %s %s", worker.nr, req.method, req.path)
project: added all source files and assets
r1
def post_request(worker, req, environ, resp):
gunicorn: show total gunicorn execution time, and worker counter base on the gunicorn_conf.py
r2538 total_time = time.time() - worker.start_time
worker.log.debug(
core: added more accurate time measurement for called functions
r3853 "GNCRN POST WORKER [cnt:%s]: %s %s resp: %s, Load Time: %.4fs",
gunicorn: show total gunicorn execution time, and worker counter base on the gunicorn_conf.py
r2538 worker.nr, req.method, req.path, resp.status_code, total_time)
gunicorn: updated config with memory usage monitoring
r3875 _check_memory_usage(worker)
gunicorn: allow custom logger to set consistent formatting of requests for gunicorn with rhodecode logging.
r2075
gunicorn: updated with expanded gunicorn_conf.py
r2451
gunicorn: allow custom logger to set consistent formatting of requests for gunicorn with rhodecode logging.
r2075 class RhodeCodeLogger(Logger):
"""
Custom Logger that allows some customization that gunicorn doesn't allow
"""
datefmt = r"%Y-%m-%d %H:%M:%S"
def __init__(self, cfg):
Logger.__init__(self, cfg)
def now(self):
""" return date in RhodeCode Log format """
now = time.time()
msecs = int((now - long(now)) * 1000)
return time.strftime(self.datefmt, time.localtime(now)) + '.{0:03d}'.format(msecs)
logger_class = RhodeCodeLogger