##// END OF EJS Templates
api: fixed problem with setting repo group description via API....
api: fixed problem with setting repo group description via API. - fixes #5423

File last commit:

r2451:b2d6dad7 default
r2491:679204a0 default
Show More
gunicorn_config.py
151 lines | 3.7 KiB | text/x-python | PythonLexer
"""
gunicorn config extension and hooks. Sets additional configuration that is
available post the .ini config.
- workers = ${cpu_number}
- threads = 1
- proc_name = ${gunicorn_proc_name}
- worker_class = sync
- worker_connections = 10
- max_requests = 1000
- max_requests_jitter = 30
- timeout = 21600
"""
import multiprocessing
import sys
import time
import datetime
import threading
import traceback
from gunicorn.glogging import Logger
# GLOBAL
errorlog = '-'
accesslog = '-'
loglevel = 'debug'
# SECURITY
# The maximum size of HTTP request line in bytes.
limit_request_line = 4094
# Limit the number of HTTP headers fields in a request.
limit_request_fields = 1024
# Limit the allowed size of an HTTP request header field.
# Value is a positive number or 0.
# Setting it to 0 will allow unlimited header field sizes.
limit_request_field_size = 0
# Timeout for graceful workers restart.
# After receiving a restart signal, workers have this much time to finish
# serving requests. Workers still alive after the timeout (starting from the
# receipt of the restart signal) are force killed.
graceful_timeout = 30
# The number of seconds to wait for requests on a Keep-Alive connection.
# Generally set in the 1-5 seconds range.
keepalive = 2
# SERVER MECHANICS
# None == system temp dir
worker_tmp_dir = None
tmp_upload_dir = None
# Custom log format
access_log_format = (
'%(t)s GNCRN %(p)-8s %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"')
# self adjust workers based on CPU count
# workers = multiprocessing.cpu_count() * 2 + 1
def post_fork(server, worker):
server.log.info("[<%-10s>] WORKER spawned", worker.pid)
def pre_fork(server, worker):
pass
def pre_exec(server):
server.log.info("Forked child, re-executing.")
def on_starting(server):
server.log.info("Server is starting.")
def when_ready(server):
server.log.info("Server is ready. Spawning workers")
def on_reload(server):
pass
def worker_int(worker):
worker.log.info("[<%-10s>] worker received INT or QUIT signal", worker.pid)
# get traceback info, on worker crash
id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
code = []
for thread_id, stack in sys._current_frames().items():
code.append(
"\n# Thread: %s(%d)" % (id2name.get(thread_id, ""), thread_id))
for fname, lineno, name, line in traceback.extract_stack(stack):
code.append('File: "%s", line %d, in %s' % (fname, lineno, name))
if line:
code.append(" %s" % (line.strip()))
worker.log.debug("\n".join(code))
def worker_abort(worker):
worker.log.info("[<%-10s>] worker received SIGABRT signal", worker.pid)
def worker_exit(server, worker):
worker.log.info("[<%-10s>] worker exit", worker.pid)
def child_exit(server, worker):
worker.log.info("[<%-10s>] worker child exit", worker.pid)
def pre_request(worker, req):
return
worker.log.debug("[<%-10s>] PRE WORKER: %s %s",
worker.pid, req.method, req.path)
def post_request(worker, req, environ, resp):
return
worker.log.debug("[<%-10s>] POST WORKER: %s %s resp: %s", worker.pid,
req.method, req.path, resp.status_code)
class RhodeCodeLogger(Logger):
"""
Custom Logger that allows some customization that gunicorn doesn't allow
"""
datefmt = r"%Y-%m-%d %H:%M:%S"
def __init__(self, cfg):
Logger.__init__(self, cfg)
def now(self):
""" return date in RhodeCode Log format """
now = time.time()
msecs = int((now - long(now)) * 1000)
return time.strftime(self.datefmt, time.localtime(now)) + '.{0:03d}'.format(msecs)
logger_class = RhodeCodeLogger