diff --git a/configs/development.ini b/configs/development.ini --- a/configs/development.ini +++ b/configs/development.ini @@ -312,43 +312,53 @@ celery.task_always_eager = false ### DOGPILE CACHE #### ##################################### ## Default cache dir for caches. Putting this into a ramdisk -## can boost performance, eg. /tmpfs/data_ramdisk, however this might require lots -## of space +## can boost performance, eg. /tmpfs/data_ramdisk, however this directory might require +## large ammount of space cache_dir = /tmp/rcdev/data -## cache settings for permission tree, auth TTL. +## `cache_perms` cache settings for permission tree, auth TTL. rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace rc_cache.cache_perms.expiration_time = 300 -rc_cache.cache_perms.arguments.filename = /tmp/rc_cache_1 -## redis backend with distributed locks +## alternative `cache_perms` redis backend with distributed lock #rc_cache.cache_perms.backend = dogpile.cache.rc.redis #rc_cache.cache_perms.expiration_time = 300 +## redis_expiration_time needs to be greater then expiration_time +#rc_cache.cache_perms.arguments.redis_expiration_time = 7200 +#rc_cache.cache_perms.arguments.socket_timeout = 30 #rc_cache.cache_perms.arguments.host = localhost #rc_cache.cache_perms.arguments.port = 6379 #rc_cache.cache_perms.arguments.db = 0 -#rc_cache.cache_perms.arguments.redis_expiration_time = 7200 #rc_cache.cache_perms.arguments.distributed_lock = true - +## `cache_repo` cache settings for FileTree, Readme, RSS FEEDS rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace rc_cache.cache_repo.expiration_time = 2592000 -rc_cache.cache_repo.arguments.filename = /tmp/rc_cache_2 -## redis backend with distributed locks +## alternative `cache_repo` redis backend with distributed lock #rc_cache.cache_repo.backend = dogpile.cache.rc.redis #rc_cache.cache_repo.expiration_time = 2592000 -## this needs to be greater then expiration_time +## redis_expiration_time needs to be greater then expiration_time #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400 +#rc_cache.cache_repo.arguments.socket_timeout = 30 #rc_cache.cache_repo.arguments.host = localhost #rc_cache.cache_repo.arguments.port = 6379 #rc_cache.cache_repo.arguments.db = 1 #rc_cache.cache_repo.arguments.distributed_lock = true -## cache settings for SQL queries +## cache settings for SQL queries, this needs to use memory type backend rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru rc_cache.sql_cache_short.expiration_time = 30 +## `cache_repo_longterm` cache for repo object instances, this needs to use memory +## type backend as the objects kept are not pickle serializable +rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru +# by default we use 96H, this is using invalidation on push anyway +rc_cache.cache_repo_longterm.expiration_time = 345600 +# max items in LRU cache, reduce this number to save memory, and expire last used +# cached objects +rc_cache.cache_repo_longterm.max_size = 10000 + #################################### ### BEAKER SESSION #### @@ -505,6 +515,7 @@ debug_style = true #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30 #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode +# pymysql is an alternative driver for MySQL, use in case of problems with default one #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30 @@ -543,7 +554,6 @@ vcs.server.protocol = http ## Push/Pull operations protocol, available options are: ## `http` - use http-rpc backend (default) -## vcs.scm_app_implementation = http ## Push/Pull operations hooks protocol, available options are: diff --git a/configs/production.ini b/configs/production.ini --- a/configs/production.ini +++ b/configs/production.ini @@ -81,7 +81,7 @@ proc_name = rhodecode ## recommended for bigger setup is using of of other than sync one worker_class = gevent ## The maximum number of simultaneous clients. Valid only for Gevent -#worker_connections = 10 +worker_connections = 10 ## max number of requests that worker will handle before being gracefully ## restarted, could prevent memory leaks max_requests = 1000 @@ -287,43 +287,53 @@ celery.task_always_eager = false ### DOGPILE CACHE #### ##################################### ## Default cache dir for caches. Putting this into a ramdisk -## can boost performance, eg. /tmpfs/data_ramdisk, however this might require lots -## of space -cache_dir = /tmp/rcdev/data +## can boost performance, eg. /tmpfs/data_ramdisk, however this directory might require +## large ammount of space +cache_dir = /%(here)s/rcdev/data -## cache settings for permission tree, auth TTL. +## `cache_perms` cache settings for permission tree, auth TTL. rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace rc_cache.cache_perms.expiration_time = 300 -rc_cache.cache_perms.arguments.filename = /tmp/rc_cache_1 -## redis backend with distributed locks +## alternative `cache_perms` redis backend with distributed lock #rc_cache.cache_perms.backend = dogpile.cache.rc.redis #rc_cache.cache_perms.expiration_time = 300 +## redis_expiration_time needs to be greater then expiration_time +#rc_cache.cache_perms.arguments.redis_expiration_time = 7200 +#rc_cache.cache_perms.arguments.socket_timeout = 30 #rc_cache.cache_perms.arguments.host = localhost #rc_cache.cache_perms.arguments.port = 6379 #rc_cache.cache_perms.arguments.db = 0 -#rc_cache.cache_perms.arguments.redis_expiration_time = 7200 #rc_cache.cache_perms.arguments.distributed_lock = true - +## `cache_repo` cache settings for FileTree, Readme, RSS FEEDS rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace rc_cache.cache_repo.expiration_time = 2592000 -rc_cache.cache_repo.arguments.filename = /tmp/rc_cache_2 -## redis backend with distributed locks +## alternative `cache_repo` redis backend with distributed lock #rc_cache.cache_repo.backend = dogpile.cache.rc.redis #rc_cache.cache_repo.expiration_time = 2592000 -## this needs to be greater then expiration_time +## redis_expiration_time needs to be greater then expiration_time #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400 +#rc_cache.cache_repo.arguments.socket_timeout = 30 #rc_cache.cache_repo.arguments.host = localhost #rc_cache.cache_repo.arguments.port = 6379 #rc_cache.cache_repo.arguments.db = 1 #rc_cache.cache_repo.arguments.distributed_lock = true -## cache settings for SQL queries +## cache settings for SQL queries, this needs to use memory type backend rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru rc_cache.sql_cache_short.expiration_time = 30 +## `cache_repo_longterm` cache for repo object instances, this needs to use memory +## type backend as the objects kept are not pickle serializable +rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru +# by default we use 96H, this is using invalidation on push anyway +rc_cache.cache_repo_longterm.expiration_time = 345600 +# max items in LRU cache, reduce this number to save memory, and expire last used +# cached objects +rc_cache.cache_repo_longterm.max_size = 10000 + #################################### ### BEAKER SESSION #### @@ -475,6 +485,7 @@ set debug = false #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30 #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode +# pymysql is an alternative driver for MySQL, use in case of problems with default one #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode