# HG changeset patch # User RhodeCode Admin # Date 2023-07-18 07:51:33 # Node ID 5e04f327c51ca15138752d27c75d1518948047d8 # Parent e30cd90ef80dff331dbf7e5f6bace84f6bddd747 configs: updated with new options and settings for python3 diff --git a/configs/development.ini b/configs/development.ini --- a/configs/development.ini +++ b/configs/development.ini @@ -1,4 +1,4 @@ -## -*- coding: utf-8 -*- + ; ######################################### ; RHODECODE COMMUNITY EDITION CONFIGURATION @@ -77,7 +77,7 @@ asyncore_use_poll = true ; Recommended type is `gevent` #worker_class = gevent -; The maximum number of simultaneous clients. Valid only for gevent +; The maximum number of simultaneous clients per worker. Valid only for gevent #worker_connections = 10 ; The maximum number of pending connections worker will queue to handle @@ -222,12 +222,6 @@ lang = en ; Settings this to true could lead to very long startup time. startup.import_repos = false -; Uncomment and set this path to use archive download cache. -; Once enabled, generated archives will be cached at this location -; and served from the cache during subsequent requests for the same archive of -; the repository. -#archive_cache_dir = /tmp/tarballcache - ; URL at which the application is running. This is used for Bootstrapping ; requests in context when no web request is available. Used in ishell, or ; SSH calls. Set this for events to receive proper url for SSH calls. @@ -381,6 +375,20 @@ file_store.backend = local ; path to store the uploaded binaries file_store.storage_path = %(here)s/data/file_store +; Uncomment and set this path to control settings for archive download cache. +; Generated repo archives will be cached at this location +; and served from the cache during subsequent requests for the same archive of +; the repository. This path is important to be shared across filesystems and with +; RhodeCode and vcsserver + +; Default is $cache_dir/archive_cache if not set +archive_cache.store_dir = %(here)s/data/archive_cache + +; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb +archive_cache.cache_size_gb = 10 + +; By default cache uses sharding technique, this specifies how many shards are there +archive_cache.cache_shards = 10 ; ############# ; CELERY CONFIG @@ -440,7 +448,7 @@ rc_cache.cache_repo_longterm.max_size = rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace rc_cache.cache_general.expiration_time = 43200 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set -#rc_cache.cache_general.arguments.filename = /tmp/cache_general.db +#rc_cache.cache_general.arguments.filename = /tmp/cache_general_db ; alternative `cache_general` redis backend with distributed lock #rc_cache.cache_general.backend = dogpile.cache.rc.redis @@ -467,7 +475,7 @@ rc_cache.cache_general.expiration_time = rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace rc_cache.cache_perms.expiration_time = 3600 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set -#rc_cache.cache_perms.arguments.filename = /tmp/cache_perms.db +#rc_cache.cache_perms.arguments.filename = /tmp/cache_perms_db ; alternative `cache_perms` redis backend with distributed lock #rc_cache.cache_perms.backend = dogpile.cache.rc.redis @@ -494,7 +502,7 @@ rc_cache.cache_perms.expiration_time = 3 rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace rc_cache.cache_repo.expiration_time = 2592000 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set -#rc_cache.cache_repo.arguments.filename = /tmp/cache_repo.db +#rc_cache.cache_repo.arguments.filename = /tmp/cache_repo_db ; alternative `cache_repo` redis backend with distributed lock #rc_cache.cache_repo.backend = dogpile.cache.rc.redis @@ -805,7 +813,7 @@ ssh.enable_ui_key_generator = true #appenlight.log_namespace_blacklist = ; Statsd client config, this is used to send metrics to statsd -; We recommend setting statsd_exported and scrape them using Promethues +; We recommend setting statsd_exported and scrape them using Prometheus #statsd.enabled = false #statsd.statsd_host = 0.0.0.0 #statsd.statsd_port = 8125 diff --git a/configs/production.ini b/configs/production.ini --- a/configs/production.ini +++ b/configs/production.ini @@ -1,4 +1,4 @@ -## -*- coding: utf-8 -*- + ; ######################################### ; RHODECODE COMMUNITY EDITION CONFIGURATION @@ -173,12 +173,6 @@ lang = en ; Settings this to true could lead to very long startup time. startup.import_repos = false -; Uncomment and set this path to use archive download cache. -; Once enabled, generated archives will be cached at this location -; and served from the cache during subsequent requests for the same archive of -; the repository. -#archive_cache_dir = /tmp/tarballcache - ; URL at which the application is running. This is used for Bootstrapping ; requests in context when no web request is available. Used in ishell, or ; SSH calls. Set this for events to receive proper url for SSH calls. @@ -332,6 +326,20 @@ file_store.backend = local ; path to store the uploaded binaries file_store.storage_path = %(here)s/data/file_store +; Uncomment and set this path to control settings for archive download cache. +; Generated repo archives will be cached at this location +; and served from the cache during subsequent requests for the same archive of +; the repository. This path is important to be shared across filesystems and with +; RhodeCode and vcsserver + +; Default is $cache_dir/archive_cache if not set +archive_cache.store_dir = %(here)s/data/archive_cache + +; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb +archive_cache.cache_size_gb = 10 + +; By default cache uses sharding technique, this specifies how many shards are there +archive_cache.cache_shards = 10 ; ############# ; CELERY CONFIG @@ -391,7 +399,7 @@ rc_cache.cache_repo_longterm.max_size = rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace rc_cache.cache_general.expiration_time = 43200 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set -#rc_cache.cache_general.arguments.filename = /tmp/cache_general.db +#rc_cache.cache_general.arguments.filename = /tmp/cache_general_db ; alternative `cache_general` redis backend with distributed lock #rc_cache.cache_general.backend = dogpile.cache.rc.redis @@ -418,7 +426,7 @@ rc_cache.cache_general.expiration_time = rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace rc_cache.cache_perms.expiration_time = 3600 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set -#rc_cache.cache_perms.arguments.filename = /tmp/cache_perms.db +#rc_cache.cache_perms.arguments.filename = /tmp/cache_perms_db ; alternative `cache_perms` redis backend with distributed lock #rc_cache.cache_perms.backend = dogpile.cache.rc.redis @@ -445,7 +453,7 @@ rc_cache.cache_perms.expiration_time = 3 rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace rc_cache.cache_repo.expiration_time = 2592000 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set -#rc_cache.cache_repo.arguments.filename = /tmp/cache_repo.db +#rc_cache.cache_repo.arguments.filename = /tmp/cache_repo_db ; alternative `cache_repo` redis backend with distributed lock #rc_cache.cache_repo.backend = dogpile.cache.rc.redis @@ -568,6 +576,9 @@ sqlalchemy.db1.pool_recycle = 3600 ; the number of connections to keep open inside the connection pool. ; 0 indicates no limit +; the general calculus with gevent is: +; if your system allows 500 concurrent greenlets (max_connections) that all do database access, +; then increase pool size + max overflow so that they add up to 500. #sqlalchemy.db1.pool_size = 5 ; The number of connections to allow in connection pool "overflow", that is