diff --git a/configs/development.ini b/configs/development.ini --- a/configs/development.ini +++ b/configs/development.ini @@ -1,138 +1,200 @@ -################################################################################ -# RhodeCode VCSServer with HTTP Backend - configuration # -################################################################################ +## -*- coding: utf-8 -*- +; ################################# +; RHODECODE VCSSERVER CONFIGURATION +; ################################# [server:main] -## COMMON ## +; COMMON HOST/IP CONFIG host = 0.0.0.0 port = 9900 -########################################################### -## WAITRESS WSGI SERVER - Recommended for Development #### -########################################################### +; ################################################## +; WAITRESS WSGI SERVER - Recommended for Development +; ################################################## +; use server type use = egg:waitress#main -## number of worker threads + +; number of worker threads threads = 5 -## MAX BODY SIZE 100GB + +; MAX BODY SIZE 100GB max_request_body_size = 107374182400 -## Use poll instead of select, fixes file descriptors limits problems. -## May not work on old windows systems. + +; Use poll instead of select, fixes file descriptors limits problems. +; May not work on old windows systems. asyncore_use_poll = true -########################## -## GUNICORN WSGI SERVER ## -########################## -## run with gunicorn --log-config vcsserver.ini --paste vcsserver.ini +; ########################### +; GUNICORN APPLICATION SERVER +; ########################### +; run with gunicorn --log-config rhodecode.ini --paste rhodecode.ini + +; Module to use, this setting shouldn't be changed #use = egg:gunicorn#main -## Sets the number of process workers. More workers means more concurrent connections -## RhodeCode can handle at the same time. Each additional worker also it increases -## memory usage as each has it's own set of caches. -## Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more -## than 8-10 unless for really big deployments .e.g 700-1000 users. -## `instance_id = *` must be set in the [app:main] section below (which is the default) -## when using more than 1 worker. + +; Sets the number of process workers. More workers means more concurrent connections +; RhodeCode can handle at the same time. Each additional worker also it increases +; memory usage as each has it's own set of caches. +; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more +; than 8-10 unless for really big deployments .e.g 700-1000 users. +; `instance_id = *` must be set in the [app:main] section below (which is the default) +; when using more than 1 worker. #workers = 2 -## Gunicorn access log level +; Gunicorn access log level #loglevel = info -## process name visible in process list +; Process name visible in process list #proc_name = rhodecode_vcsserver -## type of worker class, currently `sync` is the only option allowed. +; Type of worker class, one of sync, gevent +; currently `sync` is the only option allowed. #worker_class = sync -## The maximum number of simultaneous clients. Valid only for Gevent +; The maximum number of simultaneous clients. Valid only for gevent #worker_connections = 10 -## max number of requests that worker will handle before being gracefully -## restarted, could prevent memory leaks +; Max number of requests that worker will handle before being gracefully restarted. +; Prevents memory leaks, jitter adds variability so not all workers are restarted at once. #max_requests = 1000 #max_requests_jitter = 30 -## amount of time a worker can spend with handling a request before it -## gets killed and restarted. Set to 6hrs +; Amount of time a worker can spend with handling a request before it +; gets killed and restarted. By default set to 21600 (6hrs) +; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h) #timeout = 21600 -## The maximum size of HTTP request line in bytes. -## 0 for unlimited +; The maximum size of HTTP request line in bytes. +; 0 for unlimited #limit_request_line = 0 -## Limit the number of HTTP headers fields in a request. -## By default this value is 100 and can't be larger than 32768. +; Limit the number of HTTP headers fields in a request. +; By default this value is 100 and can't be larger than 32768. #limit_request_fields = 32768 -## Limit the allowed size of an HTTP request header field. -## Value is a positive number or 0. -## Setting it to 0 will allow unlimited header field sizes. +; Limit the allowed size of an HTTP request header field. +; Value is a positive number or 0. +; Setting it to 0 will allow unlimited header field sizes. #limit_request_field_size = 0 -## Timeout for graceful workers restart. -## After receiving a restart signal, workers have this much time to finish -## serving requests. Workers still alive after the timeout (starting from the -## receipt of the restart signal) are force killed. +; Timeout for graceful workers restart. +; After receiving a restart signal, workers have this much time to finish +; serving requests. Workers still alive after the timeout (starting from the +; receipt of the restart signal) are force killed. +; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h) #graceful_timeout = 3600 # The number of seconds to wait for requests on a Keep-Alive connection. # Generally set in the 1-5 seconds range. #keepalive = 2 -## Maximum memory usage that each worker can use before it will receive a -## graceful restart signal, e.g 10MB = 10485760 (10 * 1024 * 1024) -# 0 = memory monitoring is disabled +; Maximum memory usage that each worker can use before it will receive a +; graceful restart signal 0 = memory monitoring is disabled +; Examples: 268435456 (256MB), 536870912 (512MB) +; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB) #memory_max_usage = 0 -## How often in seconds to check for memory usage for each gunicorn worker +; How often in seconds to check for memory usage for each gunicorn worker #memory_usage_check_interval = 60 -## Threshold value for which we don't recycle worker if GarbageCollection -## frees up enough resources. Before each restart we try to run GC on worker -## in case we get enough free memory after that, restart will not happen. +; Threshold value for which we don't recycle worker if GarbageCollection +; frees up enough resources. Before each restart we try to run GC on worker +; in case we get enough free memory after that, restart will not happen. #memory_usage_recovery_threshold = 0.8 [app:main] +; The %(here)s variable will be replaced with the absolute path of parent directory +; of this file use = egg:rhodecode-vcsserver -pyramid.default_locale_name = en + +; ############# +; DEBUG OPTIONS +; ############# + +# During development the we want to have the debug toolbar enabled pyramid.includes = + pyramid_debugtoolbar -## default locale used by VCS systems +debugtoolbar.hosts = 0.0.0.0/0 +debugtoolbar.exclude_prefixes = + /css + /fonts + /images + /js + +; ################# +; END DEBUG OPTIONS +; ################# + +; Pyramid default locales, we need this to be set +pyramid.default_locale_name = en + +; default locale used by VCS systems locale = en_US.UTF-8 - -## path to binaries for vcsserver, it should be set by the installer -## at installation time, e.g /home/user/vcsserver-1/profile/bin +; path to binaries for vcsserver, it should be set by the installer +; at installation time, e.g /home/user/vcsserver-1/profile/bin +; it can also be a path to nix-build output in case of development core.binary_dir = "" -## Custom exception store path, defaults to TMPDIR -## This is used to store exception from RhodeCode in shared directory +; Custom exception store path, defaults to TMPDIR +; This is used to store exception from RhodeCode in shared directory #exception_tracker.store_path = -## Default cache dir for caches. Putting this into a ramdisk -## can boost performance, eg. /tmpfs/data_ramdisk, however this directory might require -## large amount of space -cache_dir = %(here)s/rcdev/data +; ############# +; DOGPILE CACHE +; ############# + +; Default cache dir for caches. Putting this into a ramdisk can boost performance. +; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space +cache_dir = %(here)s/data -## cache region for storing repo_objects cache +; *************************************** +; `repo_object` cache, default file based +; *************************************** + +; `repo_object` cache settings for vcs methods for repositories rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace -## cache auto-expires after N seconds (2592000 == 30 days) + +; cache auto-expires after N seconds +; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days) rc_cache.repo_object.expiration_time = 2592000 -## cache file store path, if empty set automatically to tmp dir location +; cache file store path, defaults to temporary directory if not set #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache.db -## max size of LRU, old values will be discarded if the size of cache reaches max_size -rc_cache.repo_object.max_size = 100 +; ********************************************************** +; `repo_object` cache with redis backend +; recommended for larger instance, or for better performance +; ********************************************************** + +; `repo_object` cache settings for vcs methods for repositories +#rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack + +; cache auto-expires after N seconds +; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days) +#rc_cache.repo_object.expiration_time = 2592000 + +; redis_expiration_time needs to be greater then expiration_time +#rc_cache.repo_object.arguments.redis_expiration_time = 3592000 + +#rc_cache.repo_object.arguments.host = localhost +#rc_cache.repo_object.arguments.port = 6379 +#rc_cache.repo_object.arguments.db = 5 +#rc_cache.repo_object.arguments.socket_timeout = 30 +; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends +#rc_cache.repo_object.arguments.distributed_lock = true -################################ -### LOGGING CONFIGURATION #### -################################ +; ##################### +; LOGGING CONFIGURATION +; ##################### [loggers] keys = root, vcsserver @@ -142,9 +204,9 @@ keys = console [formatters] keys = generic -############# -## LOGGERS ## -############# +; ####### +; LOGGERS +; ####### [logger_root] level = NOTSET handlers = console @@ -156,19 +218,19 @@ qualname = vcsserver propagate = 1 -############## -## HANDLERS ## -############## +; ######## +; HANDLERS +; ######## [handler_console] class = StreamHandler -args = (sys.stderr,) +args = (sys.stderr, ) level = DEBUG formatter = generic -################ -## FORMATTERS ## -################ +; ########## +; FORMATTERS +; ########## [formatter_generic] format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s diff --git a/configs/production.ini b/configs/production.ini --- a/configs/production.ini +++ b/configs/production.ini @@ -1,138 +1,163 @@ -################################################################################ -# RhodeCode VCSServer with HTTP Backend - configuration # -################################################################################ +## -*- coding: utf-8 -*- +; ################################# +; RHODECODE VCSSERVER CONFIGURATION +; ################################# [server:main] -## COMMON ## +; COMMON HOST/IP CONFIG host = 127.0.0.1 port = 9900 -########################################################### -## WAITRESS WSGI SERVER - Recommended for Development #### -########################################################### -#use = egg:waitress#main -## number of worker threads -#threads = 5 -## MAX BODY SIZE 100GB -#max_request_body_size = 107374182400 -## Use poll instead of select, fixes file descriptors limits problems. -## May not work on old windows systems. -#asyncore_use_poll = true +; ########################### +; GUNICORN APPLICATION SERVER +; ########################### +; run with gunicorn --log-config rhodecode.ini --paste rhodecode.ini -########################## -## GUNICORN WSGI SERVER ## -########################## -## run with gunicorn --log-config vcsserver.ini --paste vcsserver.ini - +; Module to use, this setting shouldn't be changed use = egg:gunicorn#main -## Sets the number of process workers. More workers means more concurrent connections -## RhodeCode can handle at the same time. Each additional worker also it increases -## memory usage as each has it's own set of caches. -## Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more -## than 8-10 unless for really big deployments .e.g 700-1000 users. -## `instance_id = *` must be set in the [app:main] section below (which is the default) -## when using more than 1 worker. + +; Sets the number of process workers. More workers means more concurrent connections +; RhodeCode can handle at the same time. Each additional worker also it increases +; memory usage as each has it's own set of caches. +; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more +; than 8-10 unless for really big deployments .e.g 700-1000 users. +; `instance_id = *` must be set in the [app:main] section below (which is the default) +; when using more than 1 worker. workers = 2 -## Gunicorn access log level +; Gunicorn access log level loglevel = info -## process name visible in process list +; Process name visible in process list proc_name = rhodecode_vcsserver -## type of worker class, currently `sync` is the only option allowed. +; Type of worker class, one of sync, gevent +; currently `sync` is the only option allowed. worker_class = sync -## The maximum number of simultaneous clients. Valid only for Gevent +; The maximum number of simultaneous clients. Valid only for gevent worker_connections = 10 -## max number of requests that worker will handle before being gracefully -## restarted, could prevent memory leaks +; Max number of requests that worker will handle before being gracefully restarted. +; Prevents memory leaks, jitter adds variability so not all workers are restarted at once. max_requests = 1000 max_requests_jitter = 30 -## amount of time a worker can spend with handling a request before it -## gets killed and restarted. Set to 6hrs +; Amount of time a worker can spend with handling a request before it +; gets killed and restarted. By default set to 21600 (6hrs) +; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h) timeout = 21600 -## The maximum size of HTTP request line in bytes. -## 0 for unlimited +; The maximum size of HTTP request line in bytes. +; 0 for unlimited limit_request_line = 0 -## Limit the number of HTTP headers fields in a request. -## By default this value is 100 and can't be larger than 32768. +; Limit the number of HTTP headers fields in a request. +; By default this value is 100 and can't be larger than 32768. limit_request_fields = 32768 -## Limit the allowed size of an HTTP request header field. -## Value is a positive number or 0. -## Setting it to 0 will allow unlimited header field sizes. +; Limit the allowed size of an HTTP request header field. +; Value is a positive number or 0. +; Setting it to 0 will allow unlimited header field sizes. limit_request_field_size = 0 -## Timeout for graceful workers restart. -## After receiving a restart signal, workers have this much time to finish -## serving requests. Workers still alive after the timeout (starting from the -## receipt of the restart signal) are force killed. +; Timeout for graceful workers restart. +; After receiving a restart signal, workers have this much time to finish +; serving requests. Workers still alive after the timeout (starting from the +; receipt of the restart signal) are force killed. +; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h) graceful_timeout = 3600 # The number of seconds to wait for requests on a Keep-Alive connection. # Generally set in the 1-5 seconds range. keepalive = 2 -## Maximum memory usage that each worker can use before it will receive a -## graceful restart signal, e.g 10MB = 10485760 (10 * 1024 * 1024) -# 0 = memory monitoring is disabled +; Maximum memory usage that each worker can use before it will receive a +; graceful restart signal 0 = memory monitoring is disabled +; Examples: 268435456 (256MB), 536870912 (512MB) +; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB) memory_max_usage = 0 -## How often in seconds to check for memory usage for each gunicorn worker +; How often in seconds to check for memory usage for each gunicorn worker memory_usage_check_interval = 60 -## Threshold value for which we don't recycle worker if GarbageCollection -## frees up enough resources. Before each restart we try to run GC on worker -## in case we get enough free memory after that, restart will not happen. +; Threshold value for which we don't recycle worker if GarbageCollection +; frees up enough resources. Before each restart we try to run GC on worker +; in case we get enough free memory after that, restart will not happen. memory_usage_recovery_threshold = 0.8 [app:main] +; The %(here)s variable will be replaced with the absolute path of parent directory +; of this file use = egg:rhodecode-vcsserver +; Pyramid default locales, we need this to be set pyramid.default_locale_name = en -pyramid.includes = -## default locale used by VCS systems +; default locale used by VCS systems locale = en_US.UTF-8 - -## path to binaries for vcsserver, it should be set by the installer -## at installation time, e.g /home/user/vcsserver-1/profile/bin +; path to binaries for vcsserver, it should be set by the installer +; at installation time, e.g /home/user/vcsserver-1/profile/bin +; it can also be a path to nix-build output in case of development core.binary_dir = "" -## Custom exception store path, defaults to TMPDIR -## This is used to store exception from RhodeCode in shared directory +; Custom exception store path, defaults to TMPDIR +; This is used to store exception from RhodeCode in shared directory #exception_tracker.store_path = -## Default cache dir for caches. Putting this into a ramdisk -## can boost performance, eg. /tmpfs/data_ramdisk, however this directory might require -## large amount of space -cache_dir = %(here)s/rcdev/data +; ############# +; DOGPILE CACHE +; ############# + +; Default cache dir for caches. Putting this into a ramdisk can boost performance. +; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space +cache_dir = %(here)s/data -## cache region for storing repo_objects cache +; *************************************** +; `repo_object` cache, default file based +; *************************************** + +; `repo_object` cache settings for vcs methods for repositories rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace -## cache auto-expires after N seconds (2592000 == 30 days) + +; cache auto-expires after N seconds +; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days) rc_cache.repo_object.expiration_time = 2592000 -## cache file store path, if empty set automatically to tmp dir location +; cache file store path, defaults to temporary directory if not set #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache.db -## max size of LRU, old values will be discarded if the size of cache reaches max_size -rc_cache.repo_object.max_size = 100 +; ********************************************************** +; `repo_object` cache with redis backend +; recommended for larger instance, or for better performance +; ********************************************************** + +; `repo_object` cache settings for vcs methods for repositories +#rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack + +; cache auto-expires after N seconds +; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days) +#rc_cache.repo_object.expiration_time = 2592000 + +; redis_expiration_time needs to be greater then expiration_time +#rc_cache.repo_object.arguments.redis_expiration_time = 3592000 + +#rc_cache.repo_object.arguments.host = localhost +#rc_cache.repo_object.arguments.port = 6379 +#rc_cache.repo_object.arguments.db = 5 +#rc_cache.repo_object.arguments.socket_timeout = 30 +; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends +#rc_cache.repo_object.arguments.distributed_lock = true -################################ -### LOGGING CONFIGURATION #### -################################ +; ##################### +; LOGGING CONFIGURATION +; ##################### [loggers] keys = root, vcsserver @@ -142,9 +167,9 @@ keys = console [formatters] keys = generic -############# -## LOGGERS ## -############# +; ####### +; LOGGERS +; ####### [logger_root] level = NOTSET handlers = console @@ -156,19 +181,19 @@ qualname = vcsserver propagate = 1 -############## -## HANDLERS ## -############## +; ######## +; HANDLERS +; ######## [handler_console] class = StreamHandler -args = (sys.stderr,) -level = DEBUG +args = (sys.stderr, ) +level = INFO formatter = generic -################ -## FORMATTERS ## -################ +; ########## +; FORMATTERS +; ########## [formatter_generic] format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s