|
|
#
|
|
|
|
|
|
; #################################
|
|
|
; RHODECODE VCSSERVER CONFIGURATION
|
|
|
; #################################
|
|
|
|
|
|
[server:main]
|
|
|
; COMMON HOST/IP CONFIG
|
|
|
host = 0.0.0.0
|
|
|
port = 9900
|
|
|
|
|
|
; ##################################################
|
|
|
; WAITRESS WSGI SERVER - Recommended for Development
|
|
|
; ##################################################
|
|
|
|
|
|
; use server type
|
|
|
use = egg:waitress#main
|
|
|
|
|
|
; number of worker threads
|
|
|
threads = 5
|
|
|
|
|
|
; MAX BODY SIZE 100GB
|
|
|
max_request_body_size = 107374182400
|
|
|
|
|
|
; Use poll instead of select, fixes file descriptors limits problems.
|
|
|
; May not work on old windows systems.
|
|
|
asyncore_use_poll = true
|
|
|
|
|
|
|
|
|
; ###########################
|
|
|
; GUNICORN APPLICATION SERVER
|
|
|
; ###########################
|
|
|
|
|
|
; run with gunicorn --paste rhodecode.ini
|
|
|
|
|
|
; Module to use, this setting shouldn't be changed
|
|
|
#use = egg:gunicorn#main
|
|
|
|
|
|
; Sets the number of process workers. More workers means more concurrent connections
|
|
|
; RhodeCode can handle at the same time. Each additional worker also it increases
|
|
|
; memory usage as each has it's own set of caches.
|
|
|
; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
|
|
|
; than 8-10 unless for really big deployments .e.g 700-1000 users.
|
|
|
; `instance_id = *` must be set in the [app:main] section below (which is the default)
|
|
|
; when using more than 1 worker.
|
|
|
#workers = 2
|
|
|
|
|
|
; Gunicorn access log level
|
|
|
#loglevel = info
|
|
|
|
|
|
; Process name visible in process list
|
|
|
#proc_name = rhodecode_vcsserver
|
|
|
|
|
|
; Type of worker class, one of `sync`, `gevent`
|
|
|
; currently `sync` is the only option allowed.
|
|
|
#worker_class = sync
|
|
|
|
|
|
; The maximum number of simultaneous clients. Valid only for gevent
|
|
|
#worker_connections = 10
|
|
|
|
|
|
; Max number of requests that worker will handle before being gracefully restarted.
|
|
|
; Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
|
|
|
#max_requests = 1000
|
|
|
#max_requests_jitter = 30
|
|
|
|
|
|
; Amount of time a worker can spend with handling a request before it
|
|
|
; gets killed and restarted. By default set to 21600 (6hrs)
|
|
|
; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
|
|
|
#timeout = 21600
|
|
|
|
|
|
; The maximum size of HTTP request line in bytes.
|
|
|
; 0 for unlimited
|
|
|
#limit_request_line = 0
|
|
|
|
|
|
; Limit the number of HTTP headers fields in a request.
|
|
|
; By default this value is 100 and can't be larger than 32768.
|
|
|
#limit_request_fields = 32768
|
|
|
|
|
|
; Limit the allowed size of an HTTP request header field.
|
|
|
; Value is a positive number or 0.
|
|
|
; Setting it to 0 will allow unlimited header field sizes.
|
|
|
#limit_request_field_size = 0
|
|
|
|
|
|
; Timeout for graceful workers restart.
|
|
|
; After receiving a restart signal, workers have this much time to finish
|
|
|
; serving requests. Workers still alive after the timeout (starting from the
|
|
|
; receipt of the restart signal) are force killed.
|
|
|
; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
|
|
|
#graceful_timeout = 21600
|
|
|
|
|
|
# The number of seconds to wait for requests on a Keep-Alive connection.
|
|
|
# Generally set in the 1-5 seconds range.
|
|
|
#keepalive = 2
|
|
|
|
|
|
; Maximum memory usage that each worker can use before it will receive a
|
|
|
; graceful restart signal 0 = memory monitoring is disabled
|
|
|
; Examples: 268435456 (256MB), 536870912 (512MB)
|
|
|
; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
|
|
|
#memory_max_usage = 0
|
|
|
|
|
|
; How often in seconds to check for memory usage for each gunicorn worker
|
|
|
#memory_usage_check_interval = 60
|
|
|
|
|
|
; Threshold value for which we don't recycle worker if GarbageCollection
|
|
|
; frees up enough resources. Before each restart we try to run GC on worker
|
|
|
; in case we get enough free memory after that, restart will not happen.
|
|
|
#memory_usage_recovery_threshold = 0.8
|
|
|
|
|
|
|
|
|
[app:main]
|
|
|
; The %(here)s variable will be replaced with the absolute path of parent directory
|
|
|
; of this file
|
|
|
; Each option in the app:main can be override by an environmental variable
|
|
|
;
|
|
|
;To override an option:
|
|
|
;
|
|
|
;RC_<KeyName>
|
|
|
;Everything should be uppercase, . and - should be replaced by _.
|
|
|
;For example, if you have these configuration settings:
|
|
|
;rc_cache.repo_object.backend = foo
|
|
|
;can be overridden by
|
|
|
;export RC_CACHE_REPO_OBJECT_BACKEND=foo
|
|
|
|
|
|
use = egg:rhodecode-vcsserver
|
|
|
|
|
|
|
|
|
; #############
|
|
|
; DEBUG OPTIONS
|
|
|
; #############
|
|
|
|
|
|
# During development the we want to have the debug toolbar enabled
|
|
|
pyramid.includes =
|
|
|
pyramid_debugtoolbar
|
|
|
|
|
|
debugtoolbar.hosts = 0.0.0.0/0
|
|
|
debugtoolbar.exclude_prefixes =
|
|
|
/css
|
|
|
/fonts
|
|
|
/images
|
|
|
/js
|
|
|
|
|
|
; #################
|
|
|
; END DEBUG OPTIONS
|
|
|
; #################
|
|
|
|
|
|
; Pyramid default locales, we need this to be set
|
|
|
#pyramid.default_locale_name = en
|
|
|
|
|
|
; default locale used by VCS systems
|
|
|
#locale = en_US.UTF-8
|
|
|
|
|
|
; path to binaries for vcsserver, it should be set by the installer
|
|
|
; at installation time, e.g /home/user/.rccontrol/vcsserver-1/profile/bin
|
|
|
; it can also be a path to nix-build output in case of development
|
|
|
core.binary_dir = ""
|
|
|
|
|
|
; Custom exception store path, defaults to TMPDIR
|
|
|
; This is used to store exception from RhodeCode in shared directory
|
|
|
#exception_tracker.store_path =
|
|
|
|
|
|
; #############
|
|
|
; DOGPILE CACHE
|
|
|
; #############
|
|
|
|
|
|
; Default cache dir for caches. Putting this into a ramdisk can boost performance.
|
|
|
; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
|
|
|
#cache_dir = %(here)s/data
|
|
|
|
|
|
; ***************************************
|
|
|
; `repo_object` cache, default file based
|
|
|
; ***************************************
|
|
|
|
|
|
; `repo_object` cache settings for vcs methods for repositories
|
|
|
#rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
|
|
|
|
|
|
; cache auto-expires after N seconds
|
|
|
; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
|
|
|
#rc_cache.repo_object.expiration_time = 2592000
|
|
|
|
|
|
; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
|
|
|
#rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache_repo_object.db
|
|
|
|
|
|
; ***********************************************************
|
|
|
; `repo_object` cache with redis backend
|
|
|
; recommended for larger instance, and for better performance
|
|
|
; ***********************************************************
|
|
|
|
|
|
; `repo_object` cache settings for vcs methods for repositories
|
|
|
#rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack
|
|
|
|
|
|
; cache auto-expires after N seconds
|
|
|
; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
|
|
|
#rc_cache.repo_object.expiration_time = 2592000
|
|
|
|
|
|
; redis_expiration_time needs to be greater then expiration_time
|
|
|
#rc_cache.repo_object.arguments.redis_expiration_time = 3592000
|
|
|
|
|
|
#rc_cache.repo_object.arguments.host = localhost
|
|
|
#rc_cache.repo_object.arguments.port = 6379
|
|
|
#rc_cache.repo_object.arguments.db = 5
|
|
|
#rc_cache.repo_object.arguments.socket_timeout = 30
|
|
|
; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
|
|
|
#rc_cache.repo_object.arguments.distributed_lock = true
|
|
|
|
|
|
; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
|
|
|
#rc_cache.repo_object.arguments.lock_auto_renewal = true
|
|
|
|
|
|
; Statsd client config, this is used to send metrics to statsd
|
|
|
; We recommend setting statsd_exported and scrape them using Promethues
|
|
|
#statsd.enabled = false
|
|
|
#statsd.statsd_host = 0.0.0.0
|
|
|
#statsd.statsd_port = 8125
|
|
|
#statsd.statsd_prefix =
|
|
|
#statsd.statsd_ipv6 = false
|
|
|
|
|
|
; configure logging automatically at server startup set to false
|
|
|
; to use the below custom logging config.
|
|
|
; RC_LOGGING_FORMATTER
|
|
|
; RC_LOGGING_LEVEL
|
|
|
; env variables can control the settings for logging in case of autoconfigure
|
|
|
|
|
|
#logging.autoconfigure = true
|
|
|
|
|
|
; specify your own custom logging config file to configure logging
|
|
|
#logging.logging_conf_file = /path/to/custom_logging.ini
|
|
|
|
|
|
; #####################
|
|
|
; LOGGING CONFIGURATION
|
|
|
; #####################
|
|
|
|
|
|
[loggers]
|
|
|
keys = root, vcsserver
|
|
|
|
|
|
[handlers]
|
|
|
keys = console
|
|
|
|
|
|
[formatters]
|
|
|
keys = generic, json
|
|
|
|
|
|
; #######
|
|
|
; LOGGERS
|
|
|
; #######
|
|
|
[logger_root]
|
|
|
level = NOTSET
|
|
|
handlers = console
|
|
|
|
|
|
[logger_vcsserver]
|
|
|
level = DEBUG
|
|
|
handlers =
|
|
|
qualname = vcsserver
|
|
|
propagate = 1
|
|
|
|
|
|
; ########
|
|
|
; HANDLERS
|
|
|
; ########
|
|
|
|
|
|
[handler_console]
|
|
|
class = StreamHandler
|
|
|
args = (sys.stderr, )
|
|
|
level = DEBUG
|
|
|
; To enable JSON formatted logs replace 'generic' with 'json'
|
|
|
; This allows sending properly formatted logs to grafana loki or elasticsearch
|
|
|
formatter = generic
|
|
|
|
|
|
; ##########
|
|
|
; FORMATTERS
|
|
|
; ##########
|
|
|
|
|
|
[formatter_generic]
|
|
|
format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
|
|
|
datefmt = %Y-%m-%d %H:%M:%S
|
|
|
|
|
|
[formatter_json]
|
|
|
format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
|
|
|
class = vcsserver.lib._vendor.jsonlogger.JsonFormatter
|
|
|
|