From 2932384446430628adb459114235324f70e6db82 2021-02-16 13:51:20 From: Marcin Kuzminski Date: 2021-02-16 13:51:20 Subject: [PATCH] docker: added all the initial docker files compose: added initial setup - fix db password secret, fix SVN as aservice 4.24.0 changes Docker: fixes for 4.24.X deployment Setup: added final fixes Config files cleanups Fix license fix --- diff --git a/.bootstrap/download-artifacts.sh b/.bootstrap/download-artifacts.sh new file mode 100755 index 0000000..c624d8e --- /dev/null +++ b/.bootstrap/download-artifacts.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash +set -euo pipefail +IFS=$'\n\t' +# e.g 4.24.1 +source ../.env +VER=$RC_VERSION +INSTALLER_URL=https://dls.rhodecode.com/dls/N2E2ZTY1NzA3NjYxNDA2NTc1NjI3MTcyNzA2MjcxNzIyZTcwNjI3YQ==/rhodecode-control/latest-linux-ee + +echo "Downloading Artifacts for version: $VER" + +MANIFEST=https://dls.rhodecode.com/linux/MANIFEST +CACHE_DIR=../.cache +VER_REGEX="$VER+x86_64" + +echo "Downloading locale-archive" +curl -L https://dls.rhodecode.com/assets/locale-archive -J -O +mv -v locale-archive $CACHE_DIR + +ARTS=$(curl -s $MANIFEST | grep --ignore-case "$VER_REGEX" | cut -d ' ' -f 2) + +# vcsserver/ce/ee +echo "Found following $ARTS" + +for url in $ARTS; do + echo "Downloading $url" + curl -L ${url} -J -O +done + +## rhodecode control +#for url in $(curl -s $MANIFEST | grep --ignore-case -E 'control.+\+x86_64' | cut -d ' ' -f 2); do +# echo "Downloading $url" +# curl -L ${url} -J -O +#done + +## installer +echo "Downloading installer from $INSTALLER_URL" +curl -L $INSTALLER_URL -J -O + +INSTALLER=$(ls -Art RhodeCode-installer-* | tail -n 1) +if [[ -n $INSTALLER ]]; then + chmod +x "${INSTALLER}" +fi + +mv -v "${INSTALLER}" $CACHE_DIR +mv -v *.bz2 $CACHE_DIR diff --git a/.cache/.dirkeep b/.cache/.dirkeep new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/.cache/.dirkeep diff --git a/.env b/.env new file mode 100644 index 0000000..ce1f2f3 --- /dev/null +++ b/.env @@ -0,0 +1,24 @@ +COMPOSE_PROJECT_NAME=rc_cluster +TZ="UTC" + +# Version to deploy and run +RC_VERSION="4.24.1" + +# Database access credentials +POSTGRES_DB=rhodecode +POSTGRES_PASSWORD=hUc1adS7oDd6Oj3in3 + +# base url for running app +RHODECODE_BASE_URL=http://localhost:8888 + +# HTTP and HTTPS ports for running app +RC_HTTP_PORT=8888 +RC_HTTPS_PORT=8443 + +# SSH Port exposed, increased security is to not used default 22 +RC_SSH_PORT=9022 + +# user/password for first admin user created for access +RHODECODE_USER_EMAIL=admin@rhodecode.com +RHODECODE_USER_NAME=admin +RHODECODE_USER_PASS=qweqwe diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..59c8ff5 --- /dev/null +++ b/.gitignore @@ -0,0 +1,12 @@ +# CACHE FILES +.cache/RhodeCode-installer* +.cache/*.tar.bz2 +.cache/locale-archive + +# LOGS +logs/*.log +logs/nginx/*.log +logs/svn/*.log + +.idea +config/compose/.rcmetadata.json diff --git a/README.md b/README.md index 4fcf5ca..676c29d 100644 --- a/README.md +++ b/README.md @@ -1,28 +1,64 @@ -# RhodeCode Docker +# RhodeCode Cluster + +RhodeCode Cluster is a multi-node highly-scalable setup to run RhodeCode and +all its additional components in single environment using Docker. + +Using a docker-compose this setup creates following services for RhodeCode: + +- Nginx HTTP Server for load balancing and reverse proxy +- RhodeCode HTTP +- VCSServer for GIT/SVN/HG support +- SSH Server for cloning over SSH +- SVN webserver for HTTP support over SVN +- Celery workers for asynchronous tasks +- Celery beat for automation tasks +- Redis Database for caching +- Postgres database for persistent storage +- Channelstream websocket server for live components + + +## Pre requisites + +Visit docker site and install docker (min version 20.10) and docker compose: + +- https://docs.docker.com/engine/install/ +- https://docs.docker.com/compose/install/ + + +# Installation steps +Follow these steps to build and run the RhodeCode Cluster via Docker-compose. ## Download installer binaries First start by fetching required installer binaries. This is required to create both simple build and full compose setup. -Download needed installer files, version can be adjusted in the download script -Currently this is version 4.23.2, version can be adjusted in `.env` file +Please check the `.env` file to adjust the version if needed. `cd .boostrap/; ./download-artifacts.sh; cd ../` -## docker compose: +This will download required files and put them into the `.cache` directory. +This directory should look similar to that after downloads have finish: + +```shell +drwxr-xr-x 8 rcdev rcdev 256B Feb 8 13:35 . +drwxr-xr-x 14 rcdev rcdev 448B Feb 8 10:40 .. +-rw-r--r-- 1 rcdev rcdev 0B Feb 8 20:44 .dirkeep +-rwxr-xr-x 1 rcdev rcdev 241M Feb 8 13:35 RhodeCode-installer-linux-build20210208_0800 +-rw-r--r-- 1 rcdev rcdev 156M Feb 8 13:35 RhodeCodeCommunity-4.24.1+x86_64-linux_build20210208_0800.tar.bz2 +-rw-r--r-- 1 rcdev rcdev 171M Feb 8 13:35 RhodeCodeEnterprise-4.24.1+x86_64-linux_build20210208_0800.tar.bz2 +-rw-r--r-- 1 rcdev rcdev 145M Feb 8 13:35 RhodeCodeVCSServer-4.24.1+x86_64-linux_build20210208_0800.tar.bz2 +-rw-r--r-- 1 rcdev rcdev 109M Feb 8 13:35 locale-archive +``` -There's a more advanced high-performance setup using docker-compose. -It bootstraps additional services for RhodeCode: +## Set License for EE version -- RhodeCode -- VCSServer -- SSH Server -- Redis Database -- Postgres database -- Channelstream websocket server -- Celery workers, and automation scheduler -- SVN webserver for HTTP support -- Nginx HTTP Server +This setup would use a provided license from a file +`config/compose/rhodecode_enterprise.license` If you have a full license, or a trial one +please save the license data inside this file, so it will be applied at creation. +This file can also be empty and license can be applied via a WEB interface. + + +## Run Docker compose build: To create a full stack we need to run the database container, so it's ready to build the docker image. @@ -37,7 +73,8 @@ build the docker image. We can now run the full installation. Database needs to be running for the next build command. ```shell - docker-compose build rhodecode && docker-compose build + docker-compose build rhodecode + docker-compose build ``` Once we build the rhodecode app, we can run the whole stack using `docker-compose up` @@ -56,6 +93,39 @@ In case for bigger setups docker-compose can scale more rhodecode/vcsserver work docker-compose up --scale vcsserver=3 rhodecode=3 ``` +## Data structure + +There are 4 volumes defined: + +- `/var/log/rhodecode` # all logs from RhodeCode are saved in this volume +- `/etc/rhodecode/conf` # storing configuration files for rhodecode, vcsserver and supervisord, and some cache data +- `/var/opt/rhodecode_repo_store` # main repository storage where repositories would be stored +- `/var/opt/rhodecode_data` # data dir for rhodecode cache/lock files, or user sessions (for file backend) + + + + +Upgrade: + +- pull the latest repo +- check .env file for correct update version +- re-build rhodecode +- docker-compose build rhodecode +- docker-compose stop +- docker-compose up + + + + + + + + + + + + + Logging is pushed to stdout from all services. @@ -68,12 +138,12 @@ This is a fully running instance good for small use with 3-5 users. ```shell docker build -t rhodecode:4.23.2 -f rhodecode.dockerfile \ ---build-arg RHODECODE_TYPE=Community \ ---build-arg RHODECODE_VERSION=4.23.2 \ ---build-arg RHODECODE_DB=sqlite \ ---build-arg RHODECODE_USER=admin \ ---build-arg RHODECODE_USER_PASS=secret4 \ ---build-arg RHODECODE_USER_EMAIL=support@rhodecode.com \ +-e RHODECODE_TYPE=Community \ +-e RHODECODE_VERSION=4.23.2 \ +-e RHODECODE_DB=sqlite \ +-e RHODECODE_USER_NAME=admin \ +-e RHODECODE_USER_PASS=secret4 \ +-e RHODECODE_USER_EMAIL=support@rhodecode.com \ . ``` @@ -83,13 +153,6 @@ To Build against existing running Postgres or MySQL you can specify: --build-arg RHODECODE_DB=postgresql://postgres:secret@database/rhodecode --build-arg RHODECODE_DB=mysql://root:secret@localhost/rhodecode?charset=utf8 -There are 4 volumes defined: - -- `/var/log/rhodecode` # all logs from RhodeCode are saved in this volume -- `/etc/rhodecode/conf` # storing configuration files for rhodecode, vcsserver and supervisord, and some cache data -- `/var/opt/rhodecode_repo_store` # main repository storage where repositories would be stored -- `/var/opt/rhodecode_data` # data dir for rhodecode cache/lock files, or user sessions (for file backend) - To copy over the data into volumes use such command: ```shell diff --git a/config/compose/channelstream.ini b/config/compose/channelstream.ini new file mode 100644 index 0000000..d07436d --- /dev/null +++ b/config/compose/channelstream.ini @@ -0,0 +1,22 @@ +[channelstream] +host = 0.0.0.0 +port = 9800 + +admin_user = admin +# admin panel password +admin_secret = b39acb28b2304a27a6a0e911500bf7d1 +# auth cookie secret, leave empty for random string generated at server start +# fill in if you need to have multiple servers and want to keep admin session between them +cookie_secret = + +# API password +secret = b39acb28b2304a27a6a0e911500bf7d1 + +demo = false +allow_posting_from = 127.0.0.1, + 0.0.0.0 +log_level = INFO +# should require SSL connections? +enforce_https = +# enforce https:// in links +http_scheme = diff --git a/config/compose/rhodecode.optimized.ini b/config/compose/rhodecode.optimized.ini new file mode 100644 index 0000000..5f2edee --- /dev/null +++ b/config/compose/rhodecode.optimized.ini @@ -0,0 +1,736 @@ + +; ########################################## +; RHODECODE ENTERPRISE EDITION CONFIGURATION +; ########################################## + +[DEFAULT] +; Debug flag sets all loggers to debug, and enables request tracking +debug = false + +; ######################################################################## +; EMAIL CONFIGURATION +; These settings will be used by the RhodeCode mailing system +; ######################################################################## + +; prefix all emails subjects with given prefix, helps filtering out emails +#email_prefix = [RhodeCode] + +; email FROM address all mails will be sent +#app_email_from = rhodecode-noreply@localhost + +#smtp_server = mail.server.com +#smtp_username = +#smtp_password = +#smtp_port = +#smtp_use_tls = false +#smtp_use_ssl = true + + +[server:main] +; COMMON HOST/IP CONFIG +host = 0.0.0.0 +port = 10020 + + +; ########################### +; GUNICORN APPLICATION SERVER +; ########################### + +; run with gunicorn --log-config rhodecode.ini --paste rhodecode.ini + +; Module to use, this setting shouldn't be changed +use = egg:gunicorn#main + +; Sets the number of process workers. More workers means more concurrent connections +; RhodeCode can handle at the same time. Each additional worker also it increases +; memory usage as each has it's own set of caches. +; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more +; than 8-10 unless for really big deployments .e.g 700-1000 users. +; `instance_id = *` must be set in the [app:main] section below (which is the default) +; when using more than 1 worker. +workers = 2 + +; Gunicorn access log level +loglevel = info + +; Process name visible in process list +proc_name = gunicorn-web-1 + +; Type of worker class, one of `sync`, `gevent` +; Recommended type is `gevent` +worker_class = gevent + +; The maximum number of simultaneous clients per worker. Valid only for gevent +worker_connections = 10 + +; Max number of requests that worker will handle before being gracefully restarted. +; Prevents memory leaks, jitter adds variability so not all workers are restarted at once. +max_requests = 2000 +max_requests_jitter = 100 + +; Amount of time a worker can spend with handling a request before it +; gets killed and restarted. By default set to 21600 (6hrs) +; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h) +timeout = 21600 + +; The maximum size of HTTP request line in bytes. +; 0 for unlimited +limit_request_line = 0 + +; Limit the number of HTTP headers fields in a request. +; By default this value is 100 and can't be larger than 32768. +limit_request_fields = 32768 + +; Limit the allowed size of an HTTP request header field. +; Value is a positive number or 0. +; Setting it to 0 will allow unlimited header field sizes. +limit_request_field_size = 0 + +; Timeout for graceful workers restart. +; After receiving a restart signal, workers have this much time to finish +; serving requests. Workers still alive after the timeout (starting from the +; receipt of the restart signal) are force killed. +; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h) +graceful_timeout = 3600 + +# The number of seconds to wait for requests on a Keep-Alive connection. +# Generally set in the 1-5 seconds range. +keepalive = 2 + +; Maximum memory usage that each worker can use before it will receive a +; graceful restart signal 0 = memory monitoring is disabled +; Examples: 268435456 (256MB), 536870912 (512MB) +; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB) +memory_max_usage = 2147483648 + +; How often in seconds to check for memory usage for each gunicorn worker +memory_usage_check_interval = 60 + +; Threshold value for which we don't recycle worker if GarbageCollection +; frees up enough resources. Before each restart we try to run GC on worker +; in case we get enough free memory after that, restart will not happen. +memory_usage_recovery_threshold = 0.8 + + +; Prefix middleware for RhodeCode. +; recommended when using proxy setup. +; allows to set RhodeCode under a prefix in server. +; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well. +; And set your prefix like: `prefix = /custom_prefix` +; be sure to also set beaker.session.cookie_path = /custom_prefix if you need +; to make your cookies only work on prefix url +[filter:proxy-prefix] +use = egg:PasteDeploy#prefix +prefix = / + +[app:main] +; The %(here)s variable will be replaced with the absolute path of parent directory +; of this file +; In addition ENVIRONMENT variables usage is possible, e.g +; sqlalchemy.db1.url = {ENV_RC_DB_URL} + +use = egg:rhodecode-enterprise-ee + +; enable proxy prefix middleware, defined above +#filter-with = proxy-prefix + +; encryption key used to encrypt social plugin tokens, +; remote_urls with credentials etc, if not set it defaults to +; `beaker.session.secret` +#rhodecode.encrypted_values.secret = + +; decryption strict mode (enabled by default). It controls if decryption raises +; `SignatureVerificationError` in case of wrong key, or damaged encryption data. +#rhodecode.encrypted_values.strict = false + +; Pick algorithm for encryption. Either fernet (more secure) or aes (default) +; fernet is safer, and we strongly recommend switching to it. +; Due to backward compatibility aes is used as default. +#rhodecode.encrypted_values.algorithm = fernet + +; Return gzipped responses from RhodeCode (static files/application) +gzip_responses = false + +; Auto-generate javascript routes file on startup +generate_js_files = false + +; System global default language. +; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh +lang = en + +; Perform a full repository scan and import on each server start. +; Settings this to true could lead to very long startup time. +startup.import_repos = false + +; Uncomment and set this path to use archive download cache. +; Once enabled, generated archives will be cached at this location +; and served from the cache during subsequent requests for the same archive of +; the repository. +archive_cache_dir = /etc/rhodecode/conf/data/tarballcache + +; URL at which the application is running. This is used for Bootstrapping +; requests in context when no web request is available. Used in ishell, or +; SSH calls. Set this for events to receive proper url for SSH calls. +app.base_url = {ENV_RC_BASE_URL} + +; Unique application ID. Should be a random unique string for security. +app_instance_uuid = 4442f2dac4dc4fb982f781546735bb99 + +; Cut off limit for large diffs (size in bytes). If overall diff size on +; commit, or pull request exceeds this limit this diff will be displayed +; partially. E.g 512000 == 512Kb +cut_off_limit_diff = 512000 + +; Cut off limit for large files inside diffs (size in bytes). Each individual +; file inside diff which exceeds this limit will be displayed partially. +; E.g 128000 == 128Kb +cut_off_limit_file = 128000 + +; Use cached version of vcs repositories everywhere. Recommended to be `true` +vcs_full_cache = true + +; Force https in RhodeCode, fixes https redirects, assumes it's always https. +; Normally this is controlled by proper flags sent from http server such as Nginx or Apache +force_https = false + +; use Strict-Transport-Security headers +use_htsts = false + +; Set to true if your repos are exposed using the dumb protocol +git_update_server_info = false + +; RSS/ATOM feed options +rss_cut_off_limit = 256000 +rss_items_per_page = 10 +rss_include_diff = false + +; gist URL alias, used to create nicer urls for gist. This should be an +; url that does rewrites to _admin/gists/{gistid}. +; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal +; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid} +gist_alias_url = + +; List of views (using glob pattern syntax) that AUTH TOKENS could be +; used for access. +; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it +; came from the the logged in user who own this authentication token. +; Additionally @TOKEN syntax can be used to bound the view to specific +; authentication token. Such view would be only accessible when used together +; with this authentication token +; list of all views can be found under `/_admin/permissions/auth_token_access` +; The list should be "," separated and on a single line. +; Most common views to enable: + +# RepoCommitsView:repo_commit_download +# RepoCommitsView:repo_commit_patch +# RepoCommitsView:repo_commit_raw +# RepoCommitsView:repo_commit_raw@TOKEN +# RepoFilesView:repo_files_diff +# RepoFilesView:repo_archivefile +# RepoFilesView:repo_file_raw +# GistView:* +api_access_controllers_whitelist = + +; Default encoding used to convert from and to unicode +; can be also a comma separated list of encoding in case of mixed encodings +default_encoding = UTF-8 + +; instance-id prefix +; a prefix key for this instance used for cache invalidation when running +; multiple instances of RhodeCode, make sure it's globally unique for +; all running RhodeCode instances. Leave empty if you don't use it +instance_id = + +; Fallback authentication plugin. Set this to a plugin ID to force the usage +; of an authentication plugin also if it is disabled by it's settings. +; This could be useful if you are unable to log in to the system due to broken +; authentication settings. Then you can enable e.g. the internal RhodeCode auth +; module to log in again and fix the settings. +; Available builtin plugin IDs (hash is part of the ID): +; egg:rhodecode-enterprise-ce#rhodecode +; egg:rhodecode-enterprise-ce#pam +; egg:rhodecode-enterprise-ce#ldap +; egg:rhodecode-enterprise-ce#jasig_cas +; egg:rhodecode-enterprise-ce#headers +; egg:rhodecode-enterprise-ce#crowd + +#rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode + +; Flag to control loading of legacy plugins in py:/path format +auth_plugin.import_legacy_plugins = true + +; alternative return HTTP header for failed authentication. Default HTTP +; response is 401 HTTPUnauthorized. Currently HG clients have troubles with +; handling that causing a series of failed authentication calls. +; Set this variable to 403 to return HTTPForbidden, or any other HTTP code +; This will be served instead of default 401 on bad authentication +auth_ret_code = + +; use special detection method when serving auth_ret_code, instead of serving +; ret_code directly, use 401 initially (Which triggers credentials prompt) +; and then serve auth_ret_code to clients +auth_ret_code_detection = false + +; locking return code. When repository is locked return this HTTP code. 2XX +; codes don't break the transactions while 4XX codes do +lock_ret_code = 423 + +; allows to change the repository location in settings page +allow_repo_location_change = true + +; allows to setup custom hooks in settings page +allow_custom_hooks_settings = true + +; Generated license token required for EE edition license. +; New generated token value can be found in Admin > settings > license page. +license_token = abra-cada-bra1-rce3 + +; This flag hides sensitive information on the license page such as token, and license data +license.hide_license_info = false + +; supervisor connection uri, for managing supervisor and logs. +supervisor.uri = 127.0.0.1:10001 + +; supervisord group name/id we only want this RC instance to handle +supervisor.group_id = web-1 + +; Display extended labs settings +labs_settings_active = true + +; Custom exception store path, defaults to TMPDIR +; This is used to store exception from RhodeCode in shared directory +#exception_tracker.store_path = + +; File store configuration. This is used to store and serve uploaded files +file_store.enabled = true + +; Storage backend, available options are: local +file_store.backend = local + +; path to store the uploaded binaries +file_store.storage_path = /var/opt/rhodecode_data/file_store + + +; ############# +; CELERY CONFIG +; ############# + +; manually run celery: /path/to/celery worker -E --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini + +use_celery = true + +; connection url to the message broker (default redis) +celery.broker_url = redis://redis:6379/8 + +; rabbitmq example +#celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost + +; maximum tasks to execute before worker restart +celery.max_tasks_per_child = 20 + +; tasks will never be sent to the queue, but executed locally instead. +celery.task_always_eager = false + +; ############# +; DOGPILE CACHE +; ############# + +; Default cache dir for caches. Putting this into a ramdisk can boost performance. +; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space +cache_dir = /var/opt/rhodecode_data + +; ********************************************* +; `sql_cache_short` cache for heavy SQL queries +; Only supported backend is `memory_lru` +; ********************************************* +rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru +rc_cache.sql_cache_short.expiration_time = 30 + + +; ***************************************************** +; `cache_repo_longterm` cache for repo object instances +; Only supported backend is `memory_lru` +; ***************************************************** +rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru +; by default we use 30 Days, cache is still invalidated on push +rc_cache.cache_repo_longterm.expiration_time = 2592000 +; max items in LRU cache, set to smaller number to save memory, and expire last used caches +rc_cache.cache_repo_longterm.max_size = 10000 + + +; ************************************************* +; `cache_perms` cache for permission tree, auth TTL +; ************************************************* +#rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace +#rc_cache.cache_perms.expiration_time = 300 +; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set +#rc_cache.cache_perms.arguments.filename = /tmp/cache_perms.db + +; alternative `cache_perms` redis backend with distributed lock +rc_cache.cache_perms.backend = dogpile.cache.rc.redis +rc_cache.cache_perms.expiration_time = 300 + +; redis_expiration_time needs to be greater then expiration_time +rc_cache.cache_perms.arguments.redis_expiration_time = 7200 + +rc_cache.cache_perms.arguments.host = redis +rc_cache.cache_perms.arguments.port = 6379 +rc_cache.cache_perms.arguments.db = 0 +rc_cache.cache_perms.arguments.socket_timeout = 30 +; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends +#rc_cache.cache_perms.arguments.distributed_lock = true + + +; *************************************************** +; `cache_repo` cache for file tree, Readme, RSS FEEDS +; *************************************************** +#rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace +#rc_cache.cache_repo.expiration_time = 2592000 +; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set +#rc_cache.cache_repo.arguments.filename = /tmp/cache_repo.db + +; alternative `cache_repo` redis backend with distributed lock +rc_cache.cache_repo.backend = dogpile.cache.rc.redis +rc_cache.cache_repo.expiration_time = 2592000 + +; redis_expiration_time needs to be greater then expiration_time +#rc_cache.cache_repo.arguments.redis_expiration_time = 2678400 + +rc_cache.cache_repo.arguments.host = redis +rc_cache.cache_repo.arguments.port = 6379 +rc_cache.cache_repo.arguments.db = 1 +rc_cache.cache_repo.arguments.socket_timeout = 30 +; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends +#rc_cache.cache_repo.arguments.distributed_lock = true + + +; ############## +; BEAKER SESSION +; ############## + +; beaker.session.type is type of storage options for the logged users sessions. Current allowed +; types are file, ext:redis, ext:database, ext:memcached, and memory (default if not specified). +; Fastest ones are Redis and ext:database +#beaker.session.type = file +#beaker.session.data_dir = /var/opt/rhodecode_data/sessions + +; Redis based sessions +beaker.session.type = ext:redis +beaker.session.url = redis://redis:6379/2 + +; DB based session, fast, and allows easy management over logged in users +#beaker.session.type = ext:database +#beaker.session.table_name = db_session +#beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode +#beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode +#beaker.session.sa.pool_recycle = 3600 +#beaker.session.sa.echo = false + +beaker.session.key = http_app +beaker.session.secret = b39acb28b2304a27a6a0e911500bf7d1 +beaker.session.lock_dir = /data_ramdisk/lock + +; Secure encrypted cookie. Requires AES and AES python libraries +; you must disable beaker.session.secret to use this +#beaker.session.encrypt_key = key_for_encryption +#beaker.session.validate_key = validation_key + +; Sets session as invalid (also logging out user) if it haven not been +; accessed for given amount of time in seconds +beaker.session.timeout = 2592000 +beaker.session.httponly = true + +; Path to use for the cookie. Set to prefix if you use prefix middleware +#beaker.session.cookie_path = /custom_prefix + +; Set https secure cookie +beaker.session.secure = false + +; default cookie expiration time in seconds, set to `true` to set expire +; at browser close +#beaker.session.cookie_expires = 3600 + +; ############################# +; SEARCH INDEXING CONFIGURATION +; ############################# + +; Full text search indexer is available in rhodecode-tools under +; `rhodecode-tools index` command + +; WHOOSH Backend, doesn't require additional services to run +; it works good with few dozen repos +#search.module = rhodecode.lib.index.whoosh +#search.location = /var/opt/rhodecode_data/index + +; ElasticSearch (EE edition only). Requires Elastic Search cluster +; to be installed, and running. Recommended for large amount of repositories +search.module = rc_elasticsearch +search.location = http://elasticsearch:9200 +; specify Elastic Search version, 6 for latest or 2 for legacy +search.es_version = 6 + +; #################### +; CHANNELSTREAM CONFIG +; #################### + +; channelstream enables persistent connections and live notification +; in the system. It's also used by the chat system + +channelstream.enabled = true + +; server address for channelstream server on the backend +channelstream.server = channelstream:9800 + +; location of the channelstream server from outside world +; use ws:// for http or wss:// for https. This address needs to be handled +; by external HTTP server such as Nginx or Apache +; see Nginx/Apache configuration examples in our docs +channelstream.ws_url = ws:/localhost:8888/_channelstream +channelstream.secret = b39acb28b2304a27a6a0e911500bf7d1 +channelstream.history.location = /var/opt/rhodecode_data/channelstream_history + +; Internal application path that Javascript uses to connect into. +; If you use proxy-prefix the prefix should be added before /_channelstream +channelstream.proxy_path = /_channelstream + +; Live chat for commits/pull requests. Requires CHANNELSTREAM to be enabled +; and configured. (EE edition only) +chat.enabled = false + + +; ############################## +; MAIN RHODECODE DATABASE CONFIG +; ############################## + +#sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30 +#sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode +#sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8 +; pymysql is an alternative driver for MySQL, use in case of problems with default one +#sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode + +sqlalchemy.db1.url = postgresql://rhodecode:hUc1adS7oDd6Oj3in3@database/rhodecode + +; see sqlalchemy docs for other advanced settings +; print the sql statements to output +sqlalchemy.db1.echo = false + +; recycle the connections after this amount of seconds +sqlalchemy.db1.pool_recycle = 3600 +sqlalchemy.db1.convert_unicode = true + +; the number of connections to keep open inside the connection pool. +; 0 indicates no limit +#sqlalchemy.db1.pool_size = 5 + +; The number of connections to allow in connection pool "overflow", that is +; connections that can be opened above and beyond the pool_size setting, +; which defaults to five. +#sqlalchemy.db1.max_overflow = 10 + +; Connection check ping, used to detect broken database connections +; could be enabled to better handle cases if MySQL has gone away errors +#sqlalchemy.db1.ping_connection = true + +; ########## +; VCS CONFIG +; ########## +vcs.server.enable = true +vcs.server = vcsserver:10010 + +; Web server connectivity protocol, responsible for web based VCS operations +; Available protocols are: +; `http` - use http-rpc backend (default) +vcs.server.protocol = http + +; Push/Pull operations protocol, available options are: +; `http` - use http-rpc backend (default) +vcs.scm_app_implementation = http + +; Push/Pull operations hooks protocol, available options are: +; `http` - use http-rpc backend (default) +vcs.hooks.protocol = http + +; Host on which this instance is listening for hooks. If vcsserver is in other location +; this should be adjusted. +vcs.hooks.host = rhodecode + +; Start VCSServer with this instance as a subprocess, useful for development +vcs.start_server = false + +; List of enabled VCS backends, available options are: +; `hg` - mercurial +; `git` - git +; `svn` - subversion +vcs.backends = hg, git, svn + +; Wait this number of seconds before killing connection to the vcsserver +vcs.connection_timeout = 3600 + +; Compatibility version when creating SVN repositories. Defaults to newest version when commented out. +; Available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible +#vcs.svn.compatible_version = pre-1.8-compatible + + +; #################################################### +; Subversion proxy support (mod_dav_svn) +; Maps RhodeCode repo groups into SVN paths for Apache +; #################################################### + +; Enable or disable the config file generation. +svn.proxy.generate_config = true + +; Generate config file with `SVNListParentPath` set to `On`. +svn.proxy.list_parent_path = true + +; Set location and file name of generated config file. +svn.proxy.config_file_path = /etc/rhodecode/conf/svn/mod_dav_svn.conf + +; alternative mod_dav config template. This needs to be a valid mako template +; Example template can be found in the source code: +; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako +#svn.proxy.config_template = ~/.rccontrol/community-1/custom_svn_conf.mako + +; Used as a prefix to the `Location` block in the generated config file. +; In most cases it should be set to `/`. +svn.proxy.location_root = / + +; Command to reload the mod dav svn configuration on change. +; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh +; Make sure user who runs RhodeCode process is allowed to reload Apache +#svn.proxy.reload_cmd = /etc/init.d/apache2 reload + +; If the timeout expires before the reload command finishes, the command will +; be killed. Setting it to zero means no timeout. Defaults to 10 seconds. +#svn.proxy.reload_timeout = 10 + +; #################### +; SSH Support Settings +; #################### + +; Defines if a custom authorized_keys file should be created and written on +; any change user ssh keys. Setting this to false also disables possibility +; of adding SSH keys by users from web interface. Super admins can still +; manage SSH Keys. +ssh.generate_authorized_keyfile = true + +; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding` +# ssh.authorized_keys_ssh_opts = + +; Path to the authorized_keys file where the generate entries are placed. +; It is possible to have multiple key files specified in `sshd_config` e.g. +; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode +ssh.authorized_keys_file_path = /etc/rhodecode/conf/ssh/authorized_keys_rhodecode + +; Command to execute the SSH wrapper. The binary is available in the +; RhodeCode installation directory. +; e.g ~/.rccontrol/community-1/profile/bin/rc-ssh-wrapper +ssh.wrapper_cmd = /var/opt/rhodecode_bin/bin/rc-ssh-wrapper + +; Allow shell when executing the ssh-wrapper command +ssh.wrapper_cmd_allow_shell = false + +; Enables logging, and detailed output send back to the client during SSH +; operations. Useful for debugging, shouldn't be used in production. +ssh.enable_debug_logging = false + +; Paths to binary executable, by default they are the names, but we can +; override them if we want to use a custom one +ssh.executable.hg = ~/.rccontrol/vcsserver-1/profile/bin/hg +ssh.executable.git = ~/.rccontrol/vcsserver-1/profile/bin/git +ssh.executable.svn = ~/.rccontrol/vcsserver-1/profile/bin/svnserve + +; Enables SSH key generator web interface. Disabling this still allows users +; to add their own keys. +ssh.enable_ui_key_generator = true + +; Dummy marker to add new entries after. +; Add any custom entries below. Please don't remove this marker. +custom.conf = 1 + + +; ##################### +; LOGGING CONFIGURATION +; ##################### +[loggers] +keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper + +[handlers] +keys = console, console_sql + +[formatters] +keys = generic, color_formatter, color_formatter_sql + +; ####### +; LOGGERS +; ####### +[logger_root] +level = NOTSET +handlers = console + +[logger_sqlalchemy] +level = INFO +handlers = console_sql +qualname = sqlalchemy.engine +propagate = 0 + +[logger_beaker] +level = DEBUG +handlers = +qualname = beaker.container +propagate = 1 + +[logger_rhodecode] +level = DEBUG +handlers = +qualname = rhodecode +propagate = 1 + +[logger_ssh_wrapper] +level = DEBUG +handlers = +qualname = ssh_wrapper +propagate = 1 + +[logger_celery] +level = DEBUG +handlers = +qualname = celery + + +; ######## +; HANDLERS +; ######## + +[handler_console] +class = StreamHandler +args = (sys.stderr, ) +level = INFO +formatter = generic + +[handler_console_sql] +; "level = DEBUG" logs SQL queries and results. +; "level = INFO" logs SQL queries. +; "level = WARN" logs neither. (Recommended for production systems.) +class = StreamHandler +args = (sys.stderr, ) +level = WARN +formatter = generic + +; ########## +; FORMATTERS +; ########## + +[formatter_generic] +class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter +format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s +datefmt = %Y-%m-%d %H:%M:%S + +[formatter_color_formatter] +class = rhodecode.lib.logging_formatter.ColorFormatter +format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s +datefmt = %Y-%m-%d %H:%M:%S + +[formatter_color_formatter_sql] +class = rhodecode.lib.logging_formatter.ColorFormatterSql +format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s +datefmt = %Y-%m-%d %H:%M:%S diff --git a/config/compose/rhodecode_enterprise.license b/config/compose/rhodecode_enterprise.license new file mode 100644 index 0000000..7b59cfa --- /dev/null +++ b/config/compose/rhodecode_enterprise.license @@ -0,0 +1 @@ +--- LICENSE DATA SHOULD BE PASTED HERE --- \ No newline at end of file diff --git a/config/compose/vcsserver.optimized.ini b/config/compose/vcsserver.optimized.ini new file mode 100644 index 0000000..6b4b04c --- /dev/null +++ b/config/compose/vcsserver.optimized.ini @@ -0,0 +1,199 @@ + +; ################################# +; RHODECODE VCSSERVER CONFIGURATION +; ################################# + +[server:main] +; COMMON HOST/IP CONFIG +host = 0.0.0.0 +port = 10010 + + +; ########################### +; GUNICORN APPLICATION SERVER +; ########################### + +; run with gunicorn --log-config rhodecode.ini --paste rhodecode.ini + +; Module to use, this setting shouldn't be changed +use = egg:gunicorn#main + +; Sets the number of process workers. More workers means more concurrent connections +; RhodeCode can handle at the same time. Each additional worker also it increases +; memory usage as each has it's own set of caches. +; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more +; than 8-10 unless for really big deployments .e.g 700-1000 users. +; `instance_id = *` must be set in the [app:main] section below (which is the default) +; when using more than 1 worker. +workers = 3 + +; Gunicorn access log level +loglevel = info + +; Process name visible in process list +proc_name = gunicorn-vcsserver-1 + +; Type of worker class, one of `sync`, `gevent` +; currently `sync` is the only option allowed. +worker_class = sync + +; The maximum number of simultaneous clients. Valid only for gevent +worker_connections = 10 + +; Max number of requests that worker will handle before being gracefully restarted. +; Prevents memory leaks, jitter adds variability so not all workers are restarted at once. +max_requests = 3000 +max_requests_jitter = 100 + +; Amount of time a worker can spend with handling a request before it +; gets killed and restarted. By default set to 21600 (6hrs) +; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h) +timeout = 21600 + +; The maximum size of HTTP request line in bytes. +; 0 for unlimited +limit_request_line = 0 + +; Limit the number of HTTP headers fields in a request. +; By default this value is 100 and can't be larger than 32768. +limit_request_fields = 32768 + +; Limit the allowed size of an HTTP request header field. +; Value is a positive number or 0. +; Setting it to 0 will allow unlimited header field sizes. +limit_request_field_size = 0 + +; Timeout for graceful workers restart. +; After receiving a restart signal, workers have this much time to finish +; serving requests. Workers still alive after the timeout (starting from the +; receipt of the restart signal) are force killed. +; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h) +graceful_timeout = 3600 + +# The number of seconds to wait for requests on a Keep-Alive connection. +# Generally set in the 1-5 seconds range. +keepalive = 2 + +; Maximum memory usage that each worker can use before it will receive a +; graceful restart signal 0 = memory monitoring is disabled +; Examples: 268435456 (256MB), 536870912 (512MB) +; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB) +memory_max_usage = 2147483648 + +; How often in seconds to check for memory usage for each gunicorn worker +memory_usage_check_interval = 60 + +; Threshold value for which we don't recycle worker if GarbageCollection +; frees up enough resources. Before each restart we try to run GC on worker +; in case we get enough free memory after that, restart will not happen. +memory_usage_recovery_threshold = 0.8 + + +[app:main] +; The %(here)s variable will be replaced with the absolute path of parent directory +; of this file +use = egg:rhodecode-vcsserver + +; Pyramid default locales, we need this to be set +pyramid.default_locale_name = en + +; default locale used by VCS systems +locale = en_US.UTF-8 + +; path to binaries for vcsserver, it should be set by the installer +; at installation time, e.g /home/user/vcsserver-1/profile/bin +; it can also be a path to nix-build output in case of development +core.binary_dir = /home/rhodecode/.rccontrol/vcsserver-1/profile/bin + +; Custom exception store path, defaults to TMPDIR +; This is used to store exception from RhodeCode in shared directory +#exception_tracker.store_path = + +; ############# +; DOGPILE CACHE +; ############# + +; Default cache dir for caches. Putting this into a ramdisk can boost performance. +; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space +cache_dir = /var/opt/rhodecode_data + +; *************************************** +; `repo_object` cache, default file based +; *************************************** + +; `repo_object` cache settings for vcs methods for repositories +#rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace + +; cache auto-expires after N seconds +; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days) +#rc_cache.repo_object.expiration_time = 2592000 + +; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set +#rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache.db + +; *********************************************************** +; `repo_object` cache with redis backend +; recommended for larger instance, and for better performance +; *********************************************************** + +; `repo_object` cache settings for vcs methods for repositories +rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack + +; cache auto-expires after N seconds +; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days) +rc_cache.repo_object.expiration_time = 2592000 + +; redis_expiration_time needs to be greater then expiration_time +rc_cache.repo_object.arguments.redis_expiration_time = 3592000 + +rc_cache.repo_object.arguments.host = redis +rc_cache.repo_object.arguments.port = 6379 +rc_cache.repo_object.arguments.db = 5 +rc_cache.repo_object.arguments.socket_timeout = 30 +; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends +#rc_cache.repo_object.arguments.distributed_lock = true + + +; ##################### +; LOGGING CONFIGURATION +; ##################### +[loggers] +keys = root, vcsserver + +[handlers] +keys = console + +[formatters] +keys = generic + +; ####### +; LOGGERS +; ####### +[logger_root] +level = NOTSET +handlers = console + +[logger_vcsserver] +level = DEBUG +handlers = +qualname = vcsserver +propagate = 1 + + +; ######## +; HANDLERS +; ######## + +[handler_console] +class = StreamHandler +args = (sys.stderr, ) +level = INFO +formatter = generic + +; ########## +; FORMATTERS +; ########## + +[formatter_generic] +format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s +datefmt = %Y-%m-%d %H:%M:%S diff --git a/config/ssh/.dirkeep b/config/ssh/.dirkeep new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/config/ssh/.dirkeep diff --git a/config/ssh/authorized_keys b/config/ssh/authorized_keys new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/config/ssh/authorized_keys diff --git a/config/svn/mod_dav_svn.conf b/config/svn/mod_dav_svn.conf new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/config/svn/mod_dav_svn.conf diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 0000000..0717c08 --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,392 @@ +version: '3.9' + +volumes: + + # bind volume with logs + logvolume: + driver: local + driver_opts: + type: none + o: bind + device: $PWD/logs + + # bind-mount with configs + confvolume: + driver: local + driver_opts: + type: none + o: bind + device: $PWD/config + + # volume for rhodecode caches, archive caches, elasticsearch etc + datavolume: {} + + # postgres store + pg_data: {} + + # volume for rhodecode elasticsearch + es_data: {} + + # RhodeCode repo-store, it's where the repositories will be stored + rhodecode_repos: {} + +networks: + rhodecode_network: + name: rhodecode_network + +services: + + rhodecode: + networks: + - rhodecode_network + image: rhodecode/app:${RC_VERSION:?specify-RC_VERSION-env-var} + stdin_open: true + tty: true + restart: unless-stopped + command: [ + "/var/opt/rhodecode_bin/bin/gunicorn", + "--name", + "gunicorn-rhodecode-1", + "--error-logfile=-", + "--paster=/etc/rhodecode/conf/compose/rhodecode.optimized.ini", + "--config=/etc/rhodecode/conf/gunicorn_conf.py" + ] + ports: + - "127.0.0.1::10020" + + build: + context: . + dockerfile: service/rhodecode/rhodecode.dockerfile + network: rhodecode_network + args: + TZ: ${TZ} + RHODECODE_VERSION: ${RC_VERSION:?specify-RC_VERSION-env-var} + RHODECODE_DB: postgresql://rhodecode:${POSTGRES_PASSWORD:?must-specify-db-password}@database/${POSTGRES_DB:?must-specify-db-name} + RHODECODE_USER_NAME: ${RHODECODE_USER_NAME} + RHODECODE_USER_PASS: ${RHODECODE_USER_PASS} + RHODECODE_USER_EMAIL: ${RHODECODE_USER_EMAIL} + + environment: + RC_APP_TYPE: rhodecode_http + RC_APP_PROC: 1 + ENV_RC_BASE_URL: ${RHODECODE_BASE_URL} + SSL_CERT_FILE: "/etc/rhodecode/conf/ca-bundle.crt" + REQUESTS_CA_BUNDLE: "/etc/rhodecode/conf/ca-bundle.crt" + GIT_SSL_CAINFO: "/etc/rhodecode/conf/ca-bundle.crt" + GEVENT_RESOLVER: "ares" + + DB_UPGRADE: 1 # run the DB upgrade + SETUP_APP: 1 # run the application default settings setup, can be turned off after initial run + MAIN_INI_PATH: /etc/rhodecode/conf/compose/rhodecode.optimized.ini + + # SVN Specific + MOD_DAV_SVN_PORT: 8090 + APACHE_LOG_DIR: /var/log/rhodecode/svn + MOD_DAV_SVN_CONF_FILE: /etc/rhodecode/conf/svn/mod_dav_svn.conf + + + healthcheck: + test: [ "CMD", "curl", "-A", "RhodeCode-Healthcheck", "-s", "-o", "/dev/null", "-w", "'%{http_code}'", "http://127.0.0.1:10020/_admin/ops/ping" ] + timeout: 30s + interval: 60s + retries: 10 + + depends_on: + - database + - redis + + volumes: + - confvolume:/etc/rhodecode/conf + - logvolume:/var/log/rhodecode + - rhodecode_repos:/var/opt/rhodecode_repo_store + - datavolume:/var/opt/rhodecode_data + + tmpfs: + - /data_ramdisk:size=1G + + vcsserver: + networks: + - rhodecode_network + image: rhodecode/app:${RC_VERSION:?specify-RC_VERSION-env-var} + stdin_open: true + tty: true + restart: unless-stopped + command: [ + "/home/rhodecode/.rccontrol/vcsserver-1/profile/bin/gunicorn", + "--name=gunicorn-vcsserver-1", + "--error-logfile=-", + "--paster=/etc/rhodecode/conf/compose/vcsserver.optimized.ini", + "--config=/etc/rhodecode/conf/gunicorn_conf.py" + ] + ports: + - "127.0.0.1::10010" + + healthcheck: + test: [ "CMD", "curl", "-A", "RhodeCode-Healthcheck", "-s", "-o", "/dev/null", "-w", "'%{http_code}'", "http://127.0.0.1:10010/status" ] + timeout: 30s + interval: 60s + retries: 10 + + environment: + RC_APP_TYPE: rhodecode_vcsserver + RC_APP_PROC: 1 + MAIN_INI_PATH: /etc/rhodecode/conf/compose/vcsserver.optimized.ini + ENV_RC_BASE_URL: ${RHODECODE_BASE_URL} + SSL_CERT_FILE: "/etc/rhodecode/conf/ca-bundle.crt" + REQUESTS_CA_BUNDLE: "/etc/rhodecode/conf/ca-bundle.crt" + GIT_SSL_CAINFO: "/etc/rhodecode/conf/ca-bundle.crt" + + depends_on: + - redis + + volumes: + - confvolume:/etc/rhodecode/conf + - logvolume:/var/log/rhodecode + - rhodecode_repos:/var/opt/rhodecode_repo_store + - datavolume:/var/opt/rhodecode_data + + celery: + networks: + - rhodecode_network + image: rhodecode/app:${RC_VERSION:?specify-RC_VERSION-env-var} + stdin_open: true + tty: true + restart: unless-stopped + command: [ + "/var/opt/rhodecode_bin/bin/celery", + "worker", + "--task-events", + "--autoscale=10,2", + "--no-color", + "--app=rhodecode.lib.celerylib.loader", + "--loglevel=DEBUG", + "--ini=/etc/rhodecode/conf/compose/rhodecode.optimized.ini" + ] + + environment: + RC_APP_TYPE: rhodecode_celery + RC_APP_PROC: 1 + MAIN_INI_PATH: /etc/rhodecode/conf/compose/rhodecode.optimized.ini + ENV_RC_BASE_URL: ${RHODECODE_BASE_URL} + SSL_CERT_FILE: "/etc/rhodecode/conf/ca-bundle.crt" + REQUESTS_CA_BUNDLE: "/etc/rhodecode/conf/ca-bundle.crt" + GIT_SSL_CAINFO: "/etc/rhodecode/conf/ca-bundle.crt" + + depends_on: + - database + - redis + + volumes: + - confvolume:/etc/rhodecode/conf + - logvolume:/var/log/rhodecode + - rhodecode_repos:/var/opt/rhodecode_repo_store + - datavolume:/var/opt/rhodecode_data + + beat: + # This service is not scalable + networks: + - rhodecode_network + image: rhodecode/app:${RC_VERSION:?specify-RC_VERSION-env-var} + stdin_open: true + tty: true + restart: unless-stopped + command: [ + "/var/opt/rhodecode_bin/bin/celery", + "beat", + "--no-color", + "--app=rhodecode.lib.celerylib.loader", + "--scheduler=rhodecode.lib.celerylib.scheduler.RcScheduler", + "--loglevel=DEBUG", + "--ini=/etc/rhodecode/conf/compose/rhodecode.optimized.ini" + ] + + environment: + RC_APP_TYPE: rhodecode_beat + RC_APP_PROC: 1 + MAIN_INI_PATH: /etc/rhodecode/conf/compose/rhodecode.optimized.ini + ENV_RC_BASE_URL: ${RHODECODE_BASE_URL} + SSL_CERT_FILE: "/etc/rhodecode/conf/ca-bundle.crt" + REQUESTS_CA_BUNDLE: "/etc/rhodecode/conf/ca-bundle.crt" + GIT_SSL_CAINFO: "/etc/rhodecode/conf/ca-bundle.crt" + + depends_on: + - database + - redis + + volumes: + - confvolume:/etc/rhodecode/conf + - logvolume:/var/log/rhodecode + - rhodecode_repos:/var/opt/rhodecode_repo_store + - datavolume:/var/opt/rhodecode_data + + svn: + networks: + - rhodecode_network + image: rhodecode/app:${RC_VERSION:?specify-RC_VERSION-env-var} + stdin_open: true + tty: true + restart: unless-stopped + command: ["apachectl", "-D", "FOREGROUND"] + + environment: + RC_APP_TYPE: rhodecode_svn + + # SVN Specific + MOD_DAV_SVN_PORT: 8090 + APACHE_LOG_DIR: /var/log/rhodecode/svn + MOD_DAV_SVN_CONF_FILE: /etc/rhodecode/conf/svn/mod_dav_svn.conf + + ports: + - "127.0.0.1::8090" + + healthcheck: + test: [ "CMD", "curl", "-A", "RhodeCode-Healthcheck", "-s", "-o", "/dev/null", "-w", "'%{http_code}'", "http://127.0.0.1:8090/_server_status" ] + timeout: 30s + interval: 60s + retries: 10 + + volumes: + - confvolume:/etc/rhodecode/conf + - logvolume:/var/log/rhodecode + - rhodecode_repos:/var/opt/rhodecode_repo_store + + sshd: + networks: + - rhodecode_network + image: rhodecode/app:${RC_VERSION:?specify-RC_VERSION-env-var} + stdin_open: true + tty: true + restart: unless-stopped + command: ["/usr/sbin/sshd", "-f", "/etc/rhodecode/sshd_config", "-D", "-e"] + + environment: + RC_APP_TYPE: rhodecode_sshd + SSH_BOOTSTRAP: 1 + + ports: + # set from .env file + - "${RC_SSH_PORT:?must-specify-ssh-port}:22" + + depends_on: {} + + volumes: + - confvolume:/etc/rhodecode/conf + - logvolume:/var/log/rhodecode + - rhodecode_repos:/var/opt/rhodecode_repo_store + - datavolume:/var/opt/rhodecode_data + + elasticsearch: + networks: + - rhodecode_network + image: docker.elastic.co/elasticsearch/elasticsearch:6.8.13 + + environment: + - cluster.name=elasticsearch-cluster + - network.host=0.0.0.0 + - bootstrap.memory_lock=true + - discovery.type=single-node + - "ES_JAVA_OPTS=-Xms512m -Xmx512m" + + ulimits: + memlock: + soft: -1 + hard: -1 + + volumes: + - es_data:/usr/share/elasticsearch/data + + channelstream: + networks: + - rhodecode_network + image: channelstream/channelstream:0.7.1 + restart: unless-stopped + + ports: + - "127.0.0.1:9800:9800" + + command: ["channelstream", "-i", "/etc/rhodecode/conf/compose/channelstream.ini"] + + environment: + CHANNELSTREAM_ALLOW_POSTING_FROM: 0.0.0.0 + + healthcheck: + test: [ "CMD", "curl", "-s", "-o", "/dev/null", "-w", "'%{http_code}'", "http://127.0.0.1:9800/admin/sign_in" ] + timeout: 30s + interval: 60s + retries: 10 + + volumes: + - confvolume:/etc/rhodecode/conf + - logvolume:/var/log/rhodecode + + redis: + networks: + - rhodecode_network + image: rhodecode/redis:6.0.10 + build: + context: . + dockerfile: service/redis/rhodecode_redis.dockerfile + network: rhodecode_network + + restart: unless-stopped + + ports: + - "127.0.0.1::6379" + + volumes: + - logvolume:/var/log/rhodecode + + database: + networks: + - rhodecode_network + image: rhodecode/database:13.1 + build: + context: . + dockerfile: service/database/rhodecode_database.dockerfile + network: rhodecode_network + restart: unless-stopped + + ports: + - "127.0.0.1::5432" + + environment: + POSTGRES_DB: ${POSTGRES_DB:?must-specify-db-name} + POSTGRES_USER: rhodecode + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:?must-specify-db-password} + + volumes: + # save the pg_data volume + - pg_data:/var/lib/postgresql/data + - logvolume:/var/log/rhodecode + + nginx: + networks: + - rhodecode_network + image: rhodecode/nginx:1.19.6 + build: + context: . + dockerfile: service/nginx/rhodecode_nginx.dockerfile + network: rhodecode_network + restart: unless-stopped + + ports: + # set from .env file + - "${RC_HTTP_PORT:?must-specify-http-port}:80" + - "${RC_HTTPS_PORT:?must-specify-https-port}:443" + + healthcheck: + # change port 80 to 443 when only using SSL + test: [ "CMD", "curl", "-A", "RhodeCode-Healthcheck", "-s", "-o", "/dev/null", "-w", "'%{http_code}'", "http://127.0.0.1:80/_admin/ops/ping" ] + timeout: 30s + interval: 60s + retries: 10 + + depends_on: + - channelstream + + volumes: + - confvolume:/etc/rhodecode/conf + - logvolume:/var/log/rhodecode + + volumes_from: + - rhodecode:ro diff --git a/entrypoints.d/entrypoint.sh b/entrypoints.d/entrypoint.sh new file mode 100755 index 0000000..94a4ee1 --- /dev/null +++ b/entrypoints.d/entrypoint.sh @@ -0,0 +1,143 @@ +#!/usr/bin/env bash +set -Eeo pipefail + +function config_copy() { + # copy over the configs if they don't exist + for f in /etc/rhodecode/conf_build/*; do + fname=${f##*/} + if [ ! -f "/etc/rhodecode/conf/$fname" ]; then + echo "$fname not exists copying over as default conf..." + cp -v $f /etc/rhodecode/conf/$fname + fi + done + +} + +function db_upgrade() { + echo 'ENTRYPOINT: Upgrading database.' + /var/opt/rhodecode_bin/bin/rc-upgrade-db $MAIN_INI_PATH --force-yes +} + +function db_init() { + + gosu $RC_USER \ + /home/$RC_USER/.rccontrol/$RC_TYPE_ID/profile/bin/rc-setup-app \ + $MAIN_INI_PATH \ + --force-yes \ + --skip-existing-db \ + --user=$RHODECODE_USER_NAME \ + --password=$RHODECODE_USER_PASS \ + --email=$RHODECODE_USER_EMAIL \ + --repos=$RHODECODE_REPO_DIR +} + +function rhodecode_setup() { + for f in /home/$RC_USER/.rccontrol/bootstrap/*.py; do + fname=${f##*/} + + echo "Running script $fname on $RC_TYPE_ID" + gosu $RC_USER /home/$RC_USER/.rccontrol/$RC_TYPE_ID/profile/bin/rc-ishell $MAIN_INI_PATH <<< "%run $f" + + done +} + +function generate_ssh_keys() { + keys_dir=/etc/rhodecode/conf/ssh + + if [[ ! -d $keys_dir ]]; then + echo "Generating $keys_dir/ssh_host_rsa_key ..." + gosu "$RC_USER" mkdir -p $keys_dir + fi + + # Generate ssh host key for the first time + if [[ ! -f $keys_dir/ssh_host_rsa_key ]]; then + echo "Generating $keys_dir/ssh_host_rsa_key ..." + gosu "$RC_USER" ssh-keygen -f $keys_dir/ssh_host_rsa_key -N '' -t rsa + gosu "$RC_USER" chmod 0600 $keys_dir/ssh_host_rsa_key + fi + + if [[ ! -f $keys_dir/ssh_host_ecdsa_key ]]; then + echo "Generating $keys_dir/ssh_host_ecdsa_key ..." + gosu "$RC_USER" ssh-keygen -f $keys_dir/ssh_host_ecdsa_key -N '' -t ecdsa + gosu "$RC_USER" chmod 0600 $keys_dir/ssh_host_ecdsa_key + fi + + if [[ ! -f $keys_dir/ssh_host_ed25519_key ]]; then + echo "Generating $keys_dir/ssh_host_ed25519_key ..." + gosu "$RC_USER" ssh-keygen -f $keys_dir/ssh_host_ed25519_key -N '' -t ed25519 + gosu "$RC_USER" chmod 0600 $keys_dir/ssh_host_ed25519_key + fi + + if [[ ! -f $keys_dir/authorized_keys ]]; then + echo "Generating $keys_dir/authorized_keys..." + gosu "$RC_USER" touch $keys_dir/authorized_keys + fi + + sed -i "s/AllowUsers USER/AllowUsers $RC_USER/" $SSHD_CONF_FILE +} + + +echo "ENTRYPOINT: Running $RC_APP_TYPE with cmd '$1'" + +if [ "$SSH_BOOTSTRAP" = 1 ]; then + # generate SSH keys + generate_ssh_keys +fi + +isLikelyWeb= +case "$1" in + supervisord | pserve | gunicorn ) isLikelyWeb=1 ;; +esac + +if [[ $RC_APP_TYPE = "rhodecode_http" ]]; then + + DB_INIT_FILE=/var/opt/rhodecode_data/.dbinit_bootstrapped + # Avoid DB_INIT to run 2x + if [[ ! -e $DB_INIT_FILE ]]; then + echo "ENTRYPOINT: Starting $RC_APP_TYPE initial db bootstrap" + + db_init + + gosu $RC_USER touch "$DB_INIT_FILE" + echo "ENTRYPOINT: marked as db-bootstrapped at $DB_INIT_FILE" + + fi + + BOOTSTRAP_FILE=/var/opt/rhodecode_data/.setup_bootstrapped + # Avoid destroying bootstrapping by simple start/stop + if [[ ! -e $BOOTSTRAP_FILE ]]; then + echo "ENTRYPOINT: Starting $RC_APP_TYPE initial bootstrap" + + # copy over default configuration files + config_copy + + # setup application with specific options + if [ "$SETUP_APP" = 1 ]; then + rhodecode_setup + fi + + gosu $RC_USER touch "$BOOTSTRAP_FILE" + echo "ENTRYPOINT: marked as setup-bootstrapped at $BOOTSTRAP_FILE" + + fi + + if [ "$DB_UPGRADE" = 1 ]; then + # run DB migrate + db_upgrade + fi + +fi + + +if [ "$RC_APP_PROC" = 1 ]; then + # Fix problem with zombie processes when using executables like supervisord/gunicorn + set -- tini -- "$@" + set -- gosu $RC_USER "$@" +fi + +if [ "$RC_APP_TYPE" = "rhodecode_sshd" ]; then + # Fix problem with Missing privilege separation directory error + mkdir -p /run/sshd +fi + +exec "$@" diff --git a/entrypoints.d/ssh-entrypoint.sh b/entrypoints.d/ssh-entrypoint.sh new file mode 100755 index 0000000..1aede01 --- /dev/null +++ b/entrypoints.d/ssh-entrypoint.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash +set -Eeo pipefail + + +function generate_ssh_keys() { + keys_dir=/etc/rhodecode/conf/ssh + + if [[ ! -d $keys_dir ]]; then + echo "Generating $keys_dir/ssh_host_rsa_key ..." + mkdir -p $keys_dir + fi + + # Generate ssh host key for the first time + if [[ ! -f $keys_dir/ssh_host_rsa_key ]]; then + echo "Generating $keys_dir/ssh_host_rsa_key ..." + ssh-keygen -f $keys_dir/ssh_host_rsa_key -N '' -t rsa + chmod 0600 $keys_dir/ssh_host_rsa_key + fi + + if [[ ! -f $keys_dir/ssh_host_ecdsa_key ]]; then + echo "Generating $keys_dir/ssh_host_ecdsa_key ..." + ssh-keygen -f $keys_dir/ssh_host_ecdsa_key -N '' -t ecdsa + chmod 0600 $keys_dir/ssh_host_ecdsa_key + fi + + if [[ ! -f $keys_dir/ssh_host_ed25519_key ]]; then + echo "Generating $keys_dir/ssh_host_ed25519_key ..." + ssh-keygen -f $keys_dir/ssh_host_ed25519_key -N '' -t ed25519 + chmod 0600 $keys_dir/ssh_host_ed25519_key + fi + + if [[ ! -f $keys_dir/authorized_keys ]]; then + echo "Generating $keys_dir/authorized_keys..." + touch $keys_dir/authorized_keys + fi + + sed -i "s/AllowUsers USER/AllowUsers $RC_USER/" $SSHD_CONF_FILE +} + +echo "ENTRYPOINT: Running with cmd '$1'" + + +if [ "$SSH_BOOTSTRAP" = 1 ]; then + # generate SSH keys + generate_ssh_keys +fi + +mkdir -p /run/sshd +exec "$@" diff --git a/entrypoints.d/svn-entrypoint.sh b/entrypoints.d/svn-entrypoint.sh new file mode 100644 index 0000000..d7e141b --- /dev/null +++ b/entrypoints.d/svn-entrypoint.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +set -Eeo pipefail + + +BOOTSTRAP_FILE=.bootstrapped + +# Avoid destroying bootstrapping by simple start/stop +if [[ ! -e /.$BOOTSTRAP_FILE ]]; then + echo "ENTRYPOINT: Starting $RC_APP_TYPE bootstrap" + + touch $MOD_DAV_SVN_CONF_FILE + touch /$BOOTSTRAP_FILE +fi + +exec "$@" \ No newline at end of file diff --git a/logs/nginx/.dirkeep b/logs/nginx/.dirkeep new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/logs/nginx/.dirkeep diff --git a/logs/svn/.dirkeep b/logs/svn/.dirkeep new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/logs/svn/.dirkeep diff --git a/service/database/customized.conf b/service/database/customized.conf new file mode 100644 index 0000000..0e775b1 --- /dev/null +++ b/service/database/customized.conf @@ -0,0 +1,8 @@ +max_connections = 400 +shared_buffers = 1GB +effective_cache_size = 3GB +work_mem = 2621kB +maintenance_work_mem = 256MB +checkpoint_completion_target = 0.9 +wal_buffers = 16MB +default_statistics_target = 100 diff --git a/service/database/rhodecode_database.dockerfile b/service/database/rhodecode_database.dockerfile new file mode 100644 index 0000000..3dad39f --- /dev/null +++ b/service/database/rhodecode_database.dockerfile @@ -0,0 +1,4 @@ +FROM library/postgres:13.1 + +COPY service/database/customized.conf /etc/conf.d/pg_customized.conf +CMD ["postgres", "-c", "log_statement=ddl"] \ No newline at end of file diff --git a/service/nginx/http.conf b/service/nginx/http.conf new file mode 100644 index 0000000..b1eca11 --- /dev/null +++ b/service/nginx/http.conf @@ -0,0 +1,114 @@ +# read more here http://tautt.com/best-nginx-configuration-for-security/ + +# config to don't allow the browser to render the page inside an frame or iframe +# and avoid clickjacking http://en.wikipedia.org/wiki/Clickjacking +# if you need to allow [i]frames, you can use SAMEORIGIN or even set an uri with ALLOW-FROM uri +# https://developer.mozilla.org/en-US/docs/HTTP/X-Frame-Options +#add_header X-Frame-Options SAMEORIGIN; + +# when serving user-supplied content, include a X-Content-Type-Options: nosniff header along with the Content-Type: header, +# to disable content-type sniffing on some browsers. +# https://www.owasp.org/index.php/List_of_useful_HTTP_headers +# currently suppoorted in IE > 8 http://blogs.msdn.com/b/ie/archive/2008/09/02/ie8-security-part-vi-beta-2-update.aspx +# http://msdn.microsoft.com/en-us/library/ie/gg622941(v=vs.85).aspx +# 'soon' on Firefox https://bugzilla.mozilla.org/show_bug.cgi?id=471020 +#add_header X-Content-Type-Options nosniff; + +# This header enables the Cross-site scripting (XSS) filter built into most recent web browsers. +# It's usually enabled by default anyway, so the role of this header is to re-enable the filter for +# this particular website if it was disabled by the user. +# https://www.owasp.org/index.php/List_of_useful_HTTP_headers +#add_header X-XSS-Protection "1; mode=block"; + +# with Content Security Policy (CSP) enabled(and a browser that supports it(http://caniuse.com/#feat=contentsecuritypolicy), +# you can tell the browser that it can only download content from the domains you explicitly allow +# http://www.html5rocks.com/en/tutorials/security/content-security-policy/ +# https://www.owasp.org/index.php/Content_Security_Policy +# I need to change our application code so we can increase security by disabling 'unsafe-inline' 'unsafe-eval' +# directives for css and js(if you have inline css or js, you will need to keep it too). +# more: http://www.html5rocks.com/en/tutorials/security/content-security-policy/#inline-code-considered-harmful +#add_header Content-Security-Policy "default-src 'self'; script-src 'self' 'unsafe-inline' 'unsafe-eval' https://ssl.google-analytics.com https://assets.zendesk.com https://connect.facebook.net; img-src 'self' https://ssl.google-analytics.com https://s-static.ak.facebook.com https://assets.zendesk.com; style-src 'self' 'unsafe-inline' https://fonts.googleapis.com https://assets.zendesk.com; font-src 'self' https://themes.googleusercontent.com; frame-src https://assets.zendesk.com https://www.facebook.com https://s-static.ak.facebook.com https://tautt.zendesk.com; object-src 'none'"; + +## rate limiter for certain pages to prevent brute force attacks +limit_req_zone $binary_remote_addr zone=http_req_limit:10m rate=1r/s; + +## custom log format +log_format http_log_custom '$remote_addr - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" $request_time $upstream_response_time $pipe'; + +server { + listen 80 default; + # ensure we get the proper Docker DNS resolver for load balancing. + resolver 127.0.0.11 ipv6=off; + server_name localhost 127.0.0.1; + access_log /var/log/rhodecode/nginx/rhodecode.http.access.log http_log_custom; + error_log /var/log/rhodecode/nginx/rhodecode.http.error.log; + + # uncomment to redirect http traffic to https + #return 301 https://$host$request_uri; + + client_body_buffer_size 128k; + # maximum number and size of buffers for large headers to read from client request + large_client_header_buffers 16 256k; + + ## serve static files by nginx, recommended + location /_static/rhodecode { + gzip on; + gzip_min_length 500; + gzip_proxied any; + gzip_comp_level 4; + gzip_types text/css text/javascript text/xml text/plain text/x-component application/javascript application/json application/xml application/rss+xml font/truetype font/opentype application/vnd.ms-fontobject image/svg+xml; + gzip_vary on; + gzip_disable "msie6"; + expires 60d; + + alias /var/opt/rhodecode_data/static; + } + + ## channelstream location handler, if channelstream live chat and notifications + ## are enable this will proxy the requests to channelstream websocket server + location /_channelstream { + rewrite /_channelstream/(.*) /$1 break; + gzip off; + tcp_nodelay off; + + proxy_connect_timeout 10; + proxy_send_timeout 10m; + proxy_read_timeout 10m; + + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Url-Scheme $scheme; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + + proxy_pass http://channelstream:9800; + } + + ## rate limit this endpoint to prevent login page brute-force attacks + location /_admin/login { + limit_req zone=http_req_limit burst=10 nodelay; + try_files $uri @rhodecode_http; + } + + location / { + include /etc/nginx/proxy.conf; + try_files $uri @rhodecode_http; + } + + location @rhodecode_http { + include /etc/nginx/proxy.conf; + proxy_pass http://rhodecode:10020; + } + + ## Custom 502 error page. + ## Will be displayed while RhodeCode server is turned off + error_page 502 /502.html; + location = /502.html { + root /var/opt/static; + } + +} diff --git a/service/nginx/nginx.conf b/service/nginx/nginx.conf new file mode 100644 index 0000000..f5e4f6f --- /dev/null +++ b/service/nginx/nginx.conf @@ -0,0 +1,30 @@ +user root; +worker_processes 2; + +pid /var/run/nginx.pid; +error_log /var/log/rhodecode/nginx/default.error.log warn; + +events { + worker_connections 1024; + # multi_accept on; +} + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + log_format main '$remote_addr - $remote_user [$time_local] ' + '"$request" $status $body_bytes_sent ' + '"$http_referer" "$http_user_agent" ' + '$request_time $upstream_response_time $pipe'; + + access_log /var/log/rhodecode/nginx/default.access.log main; + + sendfile on; + tcp_nopush on; + tcp_nodelay on; + keepalive_timeout 65; + types_hash_max_size 2048; + + include /etc/nginx/sites-enabled/*.conf; +} diff --git a/service/nginx/proxy.conf b/service/nginx/proxy.conf new file mode 100644 index 0000000..7741345 --- /dev/null +++ b/service/nginx/proxy.conf @@ -0,0 +1,36 @@ +proxy_redirect off; +proxy_set_header Host $http_host; + +## If you use HTTPS make sure you disable gzip compression +## to be safe against BREACH attack. +gzip off; + +# Don't buffer requests in NGINX stream them using chunked-encoding +proxy_buffering off; + +## This is also required for later GIT to use streaming. +## Works only for Nginx 1.7.11 and newer +proxy_request_buffering off; +proxy_http_version 1.1; + +## Set this to a larger number if you experience timeouts +## or 413 Request Entity Too Large, 10GB is enough for most cases +client_max_body_size 10240m; + +## needed for container auth +# proxy_set_header REMOTE_USER $remote_user; +# proxy_set_header X-Forwarded-User $remote_user; + +proxy_set_header X-Url-Scheme $scheme; +proxy_set_header X-Host $http_host; +proxy_set_header X-Real-IP $remote_addr; +proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; +proxy_set_header Proxy-host $proxy_host; + +proxy_connect_timeout 7200; +proxy_send_timeout 7200; +proxy_read_timeout 7200; +proxy_buffers 8 32k; + +add_header X-Frame-Options SAMEORIGIN; +add_header Strict-Transport-Security "max-age=31536000; includeSubdomains;"; diff --git a/service/nginx/rhodecode_nginx.dockerfile b/service/nginx/rhodecode_nginx.dockerfile new file mode 100644 index 0000000..f36d11e --- /dev/null +++ b/service/nginx/rhodecode_nginx.dockerfile @@ -0,0 +1,13 @@ +FROM library/nginx:1.19.6 + +ENV NGINX_ENTRYPOINT_QUIET_LOGS=1 + +RUN mkdir -p /etc/nginx/sites-enabled/ +RUN mkdir -p /var/log/rhodecode/nginx +COPY service/nginx/nginx.conf /etc/nginx/nginx.conf +COPY service/nginx/http.conf /etc/nginx/sites-enabled/http.conf +COPY service/nginx/proxy.conf /etc/nginx/proxy.conf + +VOLUME /var/log/rhodecode + +#TODO enable amplify \ No newline at end of file diff --git a/service/redis/redis.conf b/service/redis/redis.conf new file mode 100644 index 0000000..eabdc21 --- /dev/null +++ b/service/redis/redis.conf @@ -0,0 +1,1865 @@ +# Redis configuration file example. +# +# Note that in order to read the configuration file, Redis must be +# started with the file path as first argument: +# +# ./redis-server /path/to/redis.conf + +# Note on units: when memory size is needed, it is possible to specify +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis servers but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# Note that option "include" won't be rewritten by command "CONFIG REWRITE" +# from admin or Redis Sentinel. Since Redis always uses the last processed +# line as value of a configuration directive, you'd better put includes +# at the beginning of this file to avoid overwriting config change at runtime. +# +# If instead you are interested in using includes to override configuration +# options, it is better to use include as the last line. +# +# include /path/to/local.conf +# include /path/to/other.conf + +################################## MODULES ##################################### + +# Load modules at startup. If the server is not able to load modules +# it will abort. It is possible to use multiple loadmodule directives. +# +# loadmodule /path/to/my_module.so +# loadmodule /path/to/other_module.so + +################################## NETWORK ##################################### + +# By default, if no "bind" configuration directive is specified, Redis listens +# for connections from all available network interfaces on the host machine. +# It is possible to listen to just one or multiple selected interfaces using +# the "bind" configuration directive, followed by one or more IP addresses. +# +# Examples: +# +# bind 192.168.1.100 10.0.0.1 +# bind 127.0.0.1 ::1 +# +# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the +# internet, binding to all the interfaces is dangerous and will expose the +# instance to everybody on the internet. So by default we uncomment the +# following bind directive, that will force Redis to listen only on the +# IPv4 loopback interface address (this means Redis will only be able to +# accept client connections from the same host that it is running on). +# +# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES +# JUST COMMENT OUT THE FOLLOWING LINE. +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +#bind 127.0.0.1 + +# Protected mode is a layer of security protection, in order to avoid that +# Redis instances left open on the internet are accessed and exploited. +# +# When protected mode is on and if: +# +# 1) The server is not binding explicitly to a set of addresses using the +# "bind" directive. +# 2) No password is configured. +# +# The server only accepts connections from clients connecting from the +# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain +# sockets. +# +# By default protected mode is enabled. You should disable it only if +# you are sure you want clients from other hosts to connect to Redis +# even if no authentication is configured, nor a specific set of interfaces +# are explicitly listed using the "bind" directive. +protected-mode no + +# Accept connections on the specified port, default is 6379 (IANA #815344). +# If port 0 is specified Redis will not listen on a TCP socket. +port 6379 + +# TCP listen() backlog. +# +# In high requests-per-second environments you need a high backlog in order +# to avoid slow clients connection issues. Note that the Linux kernel +# will silently truncate it to the value of /proc/sys/net/core/somaxconn so +# make sure to raise both the value of somaxconn and tcp_max_syn_backlog +# in order to get the desired effect. +tcp-backlog 511 + +# Unix socket. +# +# Specify the path for the Unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /tmp/redis.sock +# unixsocketperm 700 + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Force network equipment in the middle to consider the connection to be +# alive. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 300 seconds, which is the new +# Redis default starting with Redis 3.2.1. +tcp-keepalive 300 + +################################# TLS/SSL ##################################### + +# By default, TLS/SSL is disabled. To enable it, the "tls-port" configuration +# directive can be used to define TLS-listening ports. To enable TLS on the +# default port, use: +# +# port 0 +# tls-port 6379 + +# Configure a X.509 certificate and private key to use for authenticating the +# server to connected clients, masters or cluster peers. These files should be +# PEM formatted. +# +# tls-cert-file redis.crt +# tls-key-file redis.key + +# Configure a DH parameters file to enable Diffie-Hellman (DH) key exchange: +# +# tls-dh-params-file redis.dh + +# Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL +# clients and peers. Redis requires an explicit configuration of at least one +# of these, and will not implicitly use the system wide configuration. +# +# tls-ca-cert-file ca.crt +# tls-ca-cert-dir /etc/ssl/certs + +# By default, clients (including replica servers) on a TLS port are required +# to authenticate using valid client side certificates. +# +# If "no" is specified, client certificates are not required and not accepted. +# If "optional" is specified, client certificates are accepted and must be +# valid if provided, but are not required. +# +# tls-auth-clients no +# tls-auth-clients optional + +# By default, a Redis replica does not attempt to establish a TLS connection +# with its master. +# +# Use the following directive to enable TLS on replication links. +# +# tls-replication yes + +# By default, the Redis Cluster bus uses a plain TCP connection. To enable +# TLS for the bus protocol, use the following directive: +# +# tls-cluster yes + +# Explicitly specify TLS versions to support. Allowed values are case insensitive +# and include "TLSv1", "TLSv1.1", "TLSv1.2", "TLSv1.3" (OpenSSL >= 1.1.1) or +# any combination. To enable only TLSv1.2 and TLSv1.3, use: +# +# tls-protocols "TLSv1.2 TLSv1.3" + +# Configure allowed ciphers. See the ciphers(1ssl) manpage for more information +# about the syntax of this string. +# +# Note: this configuration applies only to <= TLSv1.2. +# +# tls-ciphers DEFAULT:!MEDIUM + +# Configure allowed TLSv1.3 ciphersuites. See the ciphers(1ssl) manpage for more +# information about the syntax of this string, and specifically for TLSv1.3 +# ciphersuites. +# +# tls-ciphersuites TLS_CHACHA20_POLY1305_SHA256 + +# When choosing a cipher, use the server's preference instead of the client +# preference. By default, the server follows the client's preference. +# +# tls-prefer-server-ciphers yes + +# By default, TLS session caching is enabled to allow faster and less expensive +# reconnections by clients that support it. Use the following directive to disable +# caching. +# +# tls-session-caching no + +# Change the default number of TLS sessions cached. A zero value sets the cache +# to unlimited size. The default size is 20480. +# +# tls-session-cache-size 5000 + +# Change the default timeout of cached TLS sessions. The default timeout is 300 +# seconds. +# +# tls-session-cache-timeout 60 + +################################# GENERAL ##################################### + +# By default Redis does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. +daemonize no + +# If you run Redis from upstart or systemd, Redis can interact with your +# supervision tree. Options: +# supervised no - no supervision interaction +# supervised upstart - signal upstart by putting Redis into SIGSTOP mode +# requires "expect stop" in your upstart job config +# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET +# supervised auto - detect upstart or systemd method based on +# UPSTART_JOB or NOTIFY_SOCKET environment variables +# Note: these supervision methods only signal "process is ready." +# They do not enable continuous pings back to your supervisor. +supervised no + +# If a pid file is specified, Redis writes it where specified at startup +# and removes it at exit. +# +# When the server runs non daemonized, no pid file is created if none is +# specified in the configuration. When the server is daemonized, the pid file +# is used even if not specified, defaulting to "/var/run/redis.pid". +# +# Creating a pid file is best effort: if Redis is not able to create it +# nothing bad happens, the server will start and run normally. +pidfile /var/run/redis_6379.pid + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel notice + +# Specify the log file name. Also the empty string can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile "" + +# To enable logging to the system logger, just set 'syslog-enabled' to yes, +# and optionally update the other syslog parameters to suit your needs. +# syslog-enabled no + +# Specify the syslog identity. +# syslog-ident redis + +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. +# syslog-facility local0 + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +# By default Redis shows an ASCII art logo only when started to log to the +# standard output and if the standard output is a TTY. Basically this means +# that normally a logo is displayed only in interactive sessions. +# +# However it is possible to force the pre-4.0 behavior and always show a +# ASCII art logo in startup logs by setting the following option to yes. +always-show-logo yes + +################################ SNAPSHOTTING ################################ +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behavior will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving completely by commenting out all "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + +save 900 1 +save 300 10 +save 60 10000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in a hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# disaster will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usual even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes + +# Compress string objects using LZF when dump .rdb databases? +# By default compression is enabled as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes + +# The filename where to dump the DB +dbfilename dump.rdb + +# Remove RDB files used by replication in instances without persistence +# enabled. By default this option is disabled, however there are environments +# where for regulations or other security concerns, RDB files persisted on +# disk by masters in order to feed replicas, or stored on disk by replicas +# in order to load them for the initial synchronization, should be deleted +# ASAP. Note that this option ONLY WORKS in instances that have both AOF +# and RDB persistence disabled, otherwise is completely ignored. +# +# An alternative (and sometimes better) way to obtain the same effect is +# to use diskless replication on both master and replicas instances. However +# in the case of replicas, diskless is not always an option. +rdb-del-sync-files no + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir ./ + +################################# REPLICATION ################################# + +# Master-Replica replication. Use replicaof to make a Redis instance a copy of +# another Redis server. A few things to understand ASAP about Redis replication. +# +# +------------------+ +---------------+ +# | Master | ---> | Replica | +# | (receive writes) | | (exact copy) | +# +------------------+ +---------------+ +# +# 1) Redis replication is asynchronous, but you can configure a master to +# stop accepting writes if it appears to be not connected with at least +# a given number of replicas. +# 2) Redis replicas are able to perform a partial resynchronization with the +# master if the replication link is lost for a relatively small amount of +# time. You may want to configure the replication backlog size (see the next +# sections of this file) with a sensible value depending on your needs. +# 3) Replication is automatic and does not need user intervention. After a +# network partition replicas automatically try to reconnect to masters +# and resynchronize with them. +# +# replicaof + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the replica to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the replica request. +# +# masterauth +# +# However this is not enough if you are using Redis ACLs (for Redis version +# 6 or greater), and the default user is not capable of running the PSYNC +# command and/or other commands needed for replication. In this case it's +# better to configure a special user to use with replication, and specify the +# masteruser configuration as such: +# +# masteruser +# +# When masteruser is specified, the replica will authenticate against its +# master using the new AUTH form: AUTH . + +# When a replica loses its connection with the master, or when the replication +# is still in progress, the replica can act in two different ways: +# +# 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) If replica-serve-stale-data is set to 'no' the replica will reply with +# an error "SYNC with master in progress" to all commands except: +# INFO, REPLICAOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG, SUBSCRIBE, +# UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, COMMAND, POST, +# HOST and LATENCY. +# +replica-serve-stale-data yes + +# You can configure a replica instance to accept writes or not. Writing against +# a replica instance may be useful to store some ephemeral data (because data +# written on a replica will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default replicas are read-only. +# +# Note: read only replicas are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only replica exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve +# security of read only replicas using 'rename-command' to shadow all the +# administrative / dangerous commands. +replica-read-only yes + +# Replication SYNC strategy: disk or socket. +# +# New replicas and reconnecting replicas that are not able to continue the +# replication process just receiving differences, need to do what is called a +# "full synchronization". An RDB file is transmitted from the master to the +# replicas. +# +# The transmission can happen in two different ways: +# +# 1) Disk-backed: The Redis master creates a new process that writes the RDB +# file on disk. Later the file is transferred by the parent +# process to the replicas incrementally. +# 2) Diskless: The Redis master creates a new process that directly writes the +# RDB file to replica sockets, without touching the disk at all. +# +# With disk-backed replication, while the RDB file is generated, more replicas +# can be queued and served with the RDB file as soon as the current child +# producing the RDB file finishes its work. With diskless replication instead +# once the transfer starts, new replicas arriving will be queued and a new +# transfer will start when the current one terminates. +# +# When diskless replication is used, the master waits a configurable amount of +# time (in seconds) before starting the transfer in the hope that multiple +# replicas will arrive and the transfer can be parallelized. +# +# With slow disks and fast (large bandwidth) networks, diskless replication +# works better. +repl-diskless-sync no + +# When diskless replication is enabled, it is possible to configure the delay +# the server waits in order to spawn the child that transfers the RDB via socket +# to the replicas. +# +# This is important since once the transfer starts, it is not possible to serve +# new replicas arriving, that will be queued for the next RDB transfer, so the +# server waits a delay in order to let more replicas arrive. +# +# The delay is specified in seconds, and by default is 5 seconds. To disable +# it entirely just set it to 0 seconds and the transfer will start ASAP. +repl-diskless-sync-delay 5 + +# ----------------------------------------------------------------------------- +# WARNING: RDB diskless load is experimental. Since in this setup the replica +# does not immediately store an RDB on disk, it may cause data loss during +# failovers. RDB diskless load + Redis modules not handling I/O reads may also +# cause Redis to abort in case of I/O errors during the initial synchronization +# stage with the master. Use only if your do what you are doing. +# ----------------------------------------------------------------------------- +# +# Replica can load the RDB it reads from the replication link directly from the +# socket, or store the RDB to a file and read that file after it was completely +# received from the master. +# +# In many cases the disk is slower than the network, and storing and loading +# the RDB file may increase replication time (and even increase the master's +# Copy on Write memory and salve buffers). +# However, parsing the RDB file directly from the socket may mean that we have +# to flush the contents of the current database before the full rdb was +# received. For this reason we have the following options: +# +# "disabled" - Don't use diskless load (store the rdb file to the disk first) +# "on-empty-db" - Use diskless load only when it is completely safe. +# "swapdb" - Keep a copy of the current db contents in RAM while parsing +# the data directly from the socket. note that this requires +# sufficient memory, if you don't have it, you risk an OOM kill. +repl-diskless-load disabled + +# Replicas send PINGs to server in a predefined interval. It's possible to +# change this interval with the repl_ping_replica_period option. The default +# value is 10 seconds. +# +# repl-ping-replica-period 10 + +# The following option sets the replication timeout for: +# +# 1) Bulk transfer I/O during SYNC, from the point of view of replica. +# 2) Master timeout from the point of view of replicas (data, pings). +# 3) Replica timeout from the point of view of masters (REPLCONF ACK pings). +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-replica-period otherwise a timeout will be detected +# every time there is low traffic between the master and the replica. The default +# value is 60 seconds. +# +# repl-timeout 60 + +# Disable TCP_NODELAY on the replica socket after SYNC? +# +# If you select "yes" Redis will use a smaller number of TCP packets and +# less bandwidth to send data to replicas. But this can add a delay for +# the data to appear on the replica side, up to 40 milliseconds with +# Linux kernels using a default configuration. +# +# If you select "no" the delay for data to appear on the replica side will +# be reduced but more bandwidth will be used for replication. +# +# By default we optimize for low latency, but in very high traffic conditions +# or when the master and replicas are many hops away, turning this to "yes" may +# be a good idea. +repl-disable-tcp-nodelay no + +# Set the replication backlog size. The backlog is a buffer that accumulates +# replica data when replicas are disconnected for some time, so that when a +# replica wants to reconnect again, often a full resync is not needed, but a +# partial resync is enough, just passing the portion of data the replica +# missed while disconnected. +# +# The bigger the replication backlog, the longer the replica can endure the +# disconnect and later be able to perform a partial resynchronization. +# +# The backlog is only allocated if there is at least one replica connected. +# +# repl-backlog-size 1mb + +# After a master has no connected replicas for some time, the backlog will be +# freed. The following option configures the amount of seconds that need to +# elapse, starting from the time the last replica disconnected, for the backlog +# buffer to be freed. +# +# Note that replicas never free the backlog for timeout, since they may be +# promoted to masters later, and should be able to correctly "partially +# resynchronize" with other replicas: hence they should always accumulate backlog. +# +# A value of 0 means to never release the backlog. +# +# repl-backlog-ttl 3600 + +# The replica priority is an integer number published by Redis in the INFO +# output. It is used by Redis Sentinel in order to select a replica to promote +# into a master if the master is no longer working correctly. +# +# A replica with a low priority number is considered better for promotion, so +# for instance if there are three replicas with priority 10, 100, 25 Sentinel +# will pick the one with priority 10, that is the lowest. +# +# However a special priority of 0 marks the replica as not able to perform the +# role of master, so a replica with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +replica-priority 100 + +# It is possible for a master to stop accepting writes if there are less than +# N replicas connected, having a lag less or equal than M seconds. +# +# The N replicas need to be in "online" state. +# +# The lag in seconds, that must be <= the specified value, is calculated from +# the last ping received from the replica, that is usually sent every second. +# +# This option does not GUARANTEE that N replicas will accept the write, but +# will limit the window of exposure for lost writes in case not enough replicas +# are available, to the specified number of seconds. +# +# For example to require at least 3 replicas with a lag <= 10 seconds use: +# +# min-replicas-to-write 3 +# min-replicas-max-lag 10 +# +# Setting one or the other to 0 disables the feature. +# +# By default min-replicas-to-write is set to 0 (feature disabled) and +# min-replicas-max-lag is set to 10. + +# A Redis master is able to list the address and port of the attached +# replicas in different ways. For example the "INFO replication" section +# offers this information, which is used, among other tools, by +# Redis Sentinel in order to discover replica instances. +# Another place where this info is available is in the output of the +# "ROLE" command of a master. +# +# The listed IP address and port normally reported by a replica is +# obtained in the following way: +# +# IP: The address is auto detected by checking the peer address +# of the socket used by the replica to connect with the master. +# +# Port: The port is communicated by the replica during the replication +# handshake, and is normally the port that the replica is using to +# listen for connections. +# +# However when port forwarding or Network Address Translation (NAT) is +# used, the replica may actually be reachable via different IP and port +# pairs. The following two options can be used by a replica in order to +# report to its master a specific set of IP and port, so that both INFO +# and ROLE will report those values. +# +# There is no need to use both the options if you need to override just +# the port or the IP address. +# +# replica-announce-ip 5.5.5.5 +# replica-announce-port 1234 + +############################### KEYS TRACKING ################################# + +# Redis implements server assisted support for client side caching of values. +# This is implemented using an invalidation table that remembers, using +# 16 millions of slots, what clients may have certain subsets of keys. In turn +# this is used in order to send invalidation messages to clients. Please +# check this page to understand more about the feature: +# +# https://redis.io/topics/client-side-caching +# +# When tracking is enabled for a client, all the read only queries are assumed +# to be cached: this will force Redis to store information in the invalidation +# table. When keys are modified, such information is flushed away, and +# invalidation messages are sent to the clients. However if the workload is +# heavily dominated by reads, Redis could use more and more memory in order +# to track the keys fetched by many clients. +# +# For this reason it is possible to configure a maximum fill value for the +# invalidation table. By default it is set to 1M of keys, and once this limit +# is reached, Redis will start to evict keys in the invalidation table +# even if they were not modified, just to reclaim memory: this will in turn +# force the clients to invalidate the cached values. Basically the table +# maximum size is a trade off between the memory you want to spend server +# side to track information about who cached what, and the ability of clients +# to retain cached objects in memory. +# +# If you set the value to 0, it means there are no limits, and Redis will +# retain as many keys as needed in the invalidation table. +# In the "stats" INFO section, you can find information about the number of +# keys in the invalidation table at every given moment. +# +# Note: when key tracking is used in broadcasting mode, no memory is used +# in the server side so this setting is useless. +# +# tracking-table-max-keys 1000000 + +################################## SECURITY ################################### + +# Warning: since Redis is pretty fast, an outside user can try up to +# 1 million passwords per second against a modern box. This means that you +# should use very strong passwords, otherwise they will be very easy to break. +# Note that because the password is really a shared secret between the client +# and the server, and should not be memorized by any human, the password +# can be easily a long string from /dev/urandom or whatever, so by using a +# long and unguessable password no brute force attack will be possible. + +# Redis ACL users are defined in the following format: +# +# user ... acl rules ... +# +# For example: +# +# user worker +@list +@connection ~jobs:* on >ffa9203c493aa99 +# +# The special username "default" is used for new connections. If this user +# has the "nopass" rule, then new connections will be immediately authenticated +# as the "default" user without the need of any password provided via the +# AUTH command. Otherwise if the "default" user is not flagged with "nopass" +# the connections will start in not authenticated state, and will require +# AUTH (or the HELLO command AUTH option) in order to be authenticated and +# start to work. +# +# The ACL rules that describe what a user can do are the following: +# +# on Enable the user: it is possible to authenticate as this user. +# off Disable the user: it's no longer possible to authenticate +# with this user, however the already authenticated connections +# will still work. +# + Allow the execution of that command +# - Disallow the execution of that command +# +@ Allow the execution of all the commands in such category +# with valid categories are like @admin, @set, @sortedset, ... +# and so forth, see the full list in the server.c file where +# the Redis command table is described and defined. +# The special category @all means all the commands, but currently +# present in the server, and that will be loaded in the future +# via modules. +# +|subcommand Allow a specific subcommand of an otherwise +# disabled command. Note that this form is not +# allowed as negative like -DEBUG|SEGFAULT, but +# only additive starting with "+". +# allcommands Alias for +@all. Note that it implies the ability to execute +# all the future commands loaded via the modules system. +# nocommands Alias for -@all. +# ~ Add a pattern of keys that can be mentioned as part of +# commands. For instance ~* allows all the keys. The pattern +# is a glob-style pattern like the one of KEYS. +# It is possible to specify multiple patterns. +# allkeys Alias for ~* +# resetkeys Flush the list of allowed keys patterns. +# > Add this password to the list of valid password for the user. +# For example >mypass will add "mypass" to the list. +# This directive clears the "nopass" flag (see later). +# < Remove this password from the list of valid passwords. +# nopass All the set passwords of the user are removed, and the user +# is flagged as requiring no password: it means that every +# password will work against this user. If this directive is +# used for the default user, every new connection will be +# immediately authenticated with the default user without +# any explicit AUTH command required. Note that the "resetpass" +# directive will clear this condition. +# resetpass Flush the list of allowed passwords. Moreover removes the +# "nopass" status. After "resetpass" the user has no associated +# passwords and there is no way to authenticate without adding +# some password (or setting it as "nopass" later). +# reset Performs the following actions: resetpass, resetkeys, off, +# -@all. The user returns to the same state it has immediately +# after its creation. +# +# ACL rules can be specified in any order: for instance you can start with +# passwords, then flags, or key patterns. However note that the additive +# and subtractive rules will CHANGE MEANING depending on the ordering. +# For instance see the following example: +# +# user alice on +@all -DEBUG ~* >somepassword +# +# This will allow "alice" to use all the commands with the exception of the +# DEBUG command, since +@all added all the commands to the set of the commands +# alice can use, and later DEBUG was removed. However if we invert the order +# of two ACL rules the result will be different: +# +# user alice on -DEBUG +@all ~* >somepassword +# +# Now DEBUG was removed when alice had yet no commands in the set of allowed +# commands, later all the commands are added, so the user will be able to +# execute everything. +# +# Basically ACL rules are processed left-to-right. +# +# For more information about ACL configuration please refer to +# the Redis web site at https://redis.io/topics/acl + +# ACL LOG +# +# The ACL Log tracks failed commands and authentication events associated +# with ACLs. The ACL Log is useful to troubleshoot failed commands blocked +# by ACLs. The ACL Log is stored in memory. You can reclaim memory with +# ACL LOG RESET. Define the maximum entry length of the ACL Log below. +acllog-max-len 128 + +# Using an external ACL file +# +# Instead of configuring users here in this file, it is possible to use +# a stand-alone file just listing users. The two methods cannot be mixed: +# if you configure users here and at the same time you activate the external +# ACL file, the server will refuse to start. +# +# The format of the external ACL user file is exactly the same as the +# format that is used inside redis.conf to describe users. +# +# aclfile /etc/redis/users.acl + +# IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatibility +# layer on top of the new ACL system. The option effect will be just setting +# the password for the default user. Clients will still authenticate using +# AUTH as usually, or more explicitly with AUTH default +# if they follow the new protocol: both will work. +# +# requirepass foobared + +# Command renaming (DEPRECATED). +# +# ------------------------------------------------------------------------ +# WARNING: avoid using this option if possible. Instead use ACLs to remove +# commands from the default user, and put them only in some admin user you +# create for administrative purposes. +# ------------------------------------------------------------------------ +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to replicas may cause problems. + +################################### CLIENTS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# IMPORTANT: When Redis Cluster is used, the max number of connections is also +# shared with the cluster bus: every node in the cluster will use two +# connections, one incoming and another outgoing. It is important to size the +# limit accordingly in case of very large clusters. +# +# maxclients 10000 + +############################## MEMORY MANAGEMENT ################################ + +# Set a memory usage limit to the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# according to the eviction policy selected (see maxmemory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU or LFU cache, or to +# set a hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have replicas attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the replicas are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of replicas is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have replicas attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for replica +# output buffers (but this is not needed if the policy is 'noeviction'). +# +maxmemory 8192mb + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached. You can select one from the following behaviors: +# +# volatile-lru -> Evict using approximated LRU, only keys with an expire set. +# allkeys-lru -> Evict any key using approximated LRU. +# volatile-lfu -> Evict using approximated LFU, only keys with an expire set. +# allkeys-lfu -> Evict any key using approximated LFU. +# volatile-random -> Remove a random key having an expire set. +# allkeys-random -> Remove a random key, any key. +# volatile-ttl -> Remove the key with the nearest expire time (minor TTL) +# noeviction -> Don't evict anything, just return an error on write operations. +# +# LRU means Least Recently Used +# LFU means Least Frequently Used +# +# Both LRU, LFU and volatile-ttl are implemented using approximated +# randomized algorithms. +# +# Note: with any of the above policies, Redis will return an error on write +# operations, when there are no suitable keys for eviction. +# +# At the date of writing these commands are: set setnx setex append +# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd +# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby +# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby +# getset mset msetnx exec sort +# +# The default is: +# +maxmemory-policy volatile-lru + +# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can tune it for speed or +# accuracy. By default Redis will check five keys and pick the one that was +# used least recently, you can change the sample size using the following +# configuration directive. +# +# The default of 5 produces good enough results. 10 Approximates very closely +# true LRU but costs more CPU. 3 is faster but not very accurate. +# +maxmemory-samples 5 + +# Starting from Redis 5, by default a replica will ignore its maxmemory setting +# (unless it is promoted to master after a failover or manually). It means +# that the eviction of keys will be just handled by the master, sending the +# DEL commands to the replica as keys evict in the master side. +# +# This behavior ensures that masters and replicas stay consistent, and is usually +# what you want, however if your replica is writable, or you want the replica +# to have a different memory setting, and you are sure all the writes performed +# to the replica are idempotent, then you may change this default (but be sure +# to understand what you are doing). +# +# Note that since the replica by default does not evict, it may end using more +# memory than the one set via maxmemory (there are certain buffers that may +# be larger on the replica, or data structures may sometimes take more memory +# and so forth). So make sure you monitor your replicas and make sure they +# have enough memory to never hit a real out-of-memory condition before the +# master hits the configured maxmemory setting. +# +# replica-ignore-maxmemory yes + +# Redis reclaims expired keys in two ways: upon access when those keys are +# found to be expired, and also in background, in what is called the +# "active expire key". The key space is slowly and interactively scanned +# looking for expired keys to reclaim, so that it is possible to free memory +# of keys that are expired and will never be accessed again in a short time. +# +# The default effort of the expire cycle will try to avoid having more than +# ten percent of expired keys still in memory, and will try to avoid consuming +# more than 25% of total memory and to add latency to the system. However +# it is possible to increase the expire "effort" that is normally set to +# "1", to a greater value, up to the value "10". At its maximum value the +# system will use more CPU, longer cycles (and technically may introduce +# more latency), and will tolerate less already expired keys still present +# in the system. It's a tradeoff between memory, CPU and latency. +# +# active-expire-effort 1 + +############################# LAZY FREEING #################################### + +# Redis has two primitives to delete keys. One is called DEL and is a blocking +# deletion of the object. It means that the server stops processing new commands +# in order to reclaim all the memory associated with an object in a synchronous +# way. If the key deleted is associated with a small object, the time needed +# in order to execute the DEL command is very small and comparable to most other +# O(1) or O(log_N) commands in Redis. However if the key is associated with an +# aggregated value containing millions of elements, the server can block for +# a long time (even seconds) in order to complete the operation. +# +# For the above reasons Redis also offers non blocking deletion primitives +# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and +# FLUSHDB commands, in order to reclaim memory in background. Those commands +# are executed in constant time. Another thread will incrementally free the +# object in the background as fast as possible. +# +# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. +# It's up to the design of the application to understand when it is a good +# idea to use one or the other. However the Redis server sometimes has to +# delete keys or flush the whole database as a side effect of other operations. +# Specifically Redis deletes objects independently of a user call in the +# following scenarios: +# +# 1) On eviction, because of the maxmemory and maxmemory policy configurations, +# in order to make room for new data, without going over the specified +# memory limit. +# 2) Because of expire: when a key with an associated time to live (see the +# EXPIRE command) must be deleted from memory. +# 3) Because of a side effect of a command that stores data on a key that may +# already exist. For example the RENAME command may delete the old key +# content when it is replaced with another one. Similarly SUNIONSTORE +# or SORT with STORE option may delete existing keys. The SET command +# itself removes any old content of the specified key in order to replace +# it with the specified string. +# 4) During replication, when a replica performs a full resynchronization with +# its master, the content of the whole database is removed in order to +# load the RDB file just transferred. +# +# In all the above cases the default is to delete objects in a blocking way, +# like if DEL was called. However you can configure each case specifically +# in order to instead release memory in a non-blocking way like if UNLINK +# was called, using the following configuration directives. + +lazyfree-lazy-eviction no +lazyfree-lazy-expire no +lazyfree-lazy-server-del no +replica-lazy-flush no + +# It is also possible, for the case when to replace the user code DEL calls +# with UNLINK calls is not easy, to modify the default behavior of the DEL +# command to act exactly like UNLINK, using the following configuration +# directive: + +lazyfree-lazy-user-del no + +################################ THREADED I/O ################################# + +# Redis is mostly single threaded, however there are certain threaded +# operations such as UNLINK, slow I/O accesses and other things that are +# performed on side threads. +# +# Now it is also possible to handle Redis clients socket reads and writes +# in different I/O threads. Since especially writing is so slow, normally +# Redis users use pipelining in order to speed up the Redis performances per +# core, and spawn multiple instances in order to scale more. Using I/O +# threads it is possible to easily speedup two times Redis without resorting +# to pipelining nor sharding of the instance. +# +# By default threading is disabled, we suggest enabling it only in machines +# that have at least 4 or more cores, leaving at least one spare core. +# Using more than 8 threads is unlikely to help much. We also recommend using +# threaded I/O only if you actually have performance problems, with Redis +# instances being able to use a quite big percentage of CPU time, otherwise +# there is no point in using this feature. +# +# So for instance if you have a four cores boxes, try to use 2 or 3 I/O +# threads, if you have a 8 cores, try to use 6 threads. In order to +# enable I/O threads use the following configuration directive: +# +# io-threads 4 +# +# Setting io-threads to 1 will just use the main thread as usual. +# When I/O threads are enabled, we only use threads for writes, that is +# to thread the write(2) syscall and transfer the client buffers to the +# socket. However it is also possible to enable threading of reads and +# protocol parsing using the following configuration directive, by setting +# it to yes: +# +# io-threads-do-reads no +# +# Usually threading reads doesn't help much. +# +# NOTE 1: This configuration directive cannot be changed at runtime via +# CONFIG SET. Aso this feature currently does not work when SSL is +# enabled. +# +# NOTE 2: If you want to test the Redis speedup using redis-benchmark, make +# sure you also run the benchmark itself in threaded mode, using the +# --threads option to match the number of Redis threads, otherwise you'll not +# be able to notice the improvements. + +############################ KERNEL OOM CONTROL ############################## + +# On Linux, it is possible to hint the kernel OOM killer on what processes +# should be killed first when out of memory. +# +# Enabling this feature makes Redis actively control the oom_score_adj value +# for all its processes, depending on their role. The default scores will +# attempt to have background child processes killed before all others, and +# replicas killed before masters. + +oom-score-adj no + +# When oom-score-adj is used, this directive controls the specific values used +# for master, replica and background child processes. Values range -1000 to +# 1000 (higher means more likely to be killed). +# +# Unprivileged processes (not root, and without CAP_SYS_RESOURCE capabilities) +# can freely increase their value, but not decrease it below its initial +# settings. +# +# Values are used relative to the initial value of oom_score_adj when the server +# starts. Because typically the initial value is 0, they will often match the +# absolute values. + +oom-score-adj-values 0 200 800 + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. +# +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. +# +# Please check http://redis.io/topics/persistence for more information. + +appendonly no + +# The name of the append only file (default: "appendonly.aof") + +appendfilename "appendonly.aof" + +# The fsync() call tells the Operating System to actually write data on disk +# instead of waiting for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log. Slow, Safest. +# everysec: fsync only one time every second. Compromise. +# +# The default is "everysec", as that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# +# If unsure, use "everysec". + +# appendfsync always +appendfsync everysec +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving, the durability of Redis is +# the same as "appendfsync none". In practical terms, this means that it is +# possible to lose up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. + +no-appendfsync-on-rewrite no + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size grows by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (if no rewrite has happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a percentage of zero in order to disable the automatic AOF +# rewrite feature. + +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +# An AOF file may be found to be truncated at the end during the Redis +# startup process, when the AOF data gets loaded back into memory. +# This may happen when the system where Redis is running +# crashes, especially when an ext4 filesystem is mounted without the +# data=ordered option (however this can't happen when Redis itself +# crashes or aborts but the operating system still works correctly). +# +# Redis can either exit with an error when this happens, or load as much +# data as possible (the default now) and start if the AOF file is found +# to be truncated at the end. The following option controls this behavior. +# +# If aof-load-truncated is set to yes, a truncated AOF file is loaded and +# the Redis server starts emitting a log to inform the user of the event. +# Otherwise if the option is set to no, the server aborts with an error +# and refuses to start. When the option is set to no, the user requires +# to fix the AOF file using the "redis-check-aof" utility before to restart +# the server. +# +# Note that if the AOF file will be found to be corrupted in the middle +# the server will still exit with an error. This option only applies when +# Redis will try to read more data from the AOF file but not enough bytes +# will be found. +aof-load-truncated yes + +# When rewriting the AOF file, Redis is able to use an RDB preamble in the +# AOF file for faster rewrites and recoveries. When this option is turned +# on the rewritten AOF file is composed of two different stanzas: +# +# [RDB file][AOF tail] +# +# When loading, Redis recognizes that the AOF file starts with the "REDIS" +# string and loads the prefixed RDB file, then continues loading the AOF +# tail. +aof-use-rdb-preamble yes + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceeds the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet call any write commands. The second +# is the only way to shut down the server in the case a write command was +# already issued by the script but the user doesn't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +lua-time-limit 5000 + +################################ REDIS CLUSTER ############################### + +# Normal Redis instances can't be part of a Redis Cluster; only nodes that are +# started as cluster nodes can. In order to start a Redis instance as a +# cluster node enable the cluster support uncommenting the following: +# +# cluster-enabled yes + +# Every cluster node has a cluster configuration file. This file is not +# intended to be edited by hand. It is created and updated by Redis nodes. +# Every Redis Cluster node requires a different cluster configuration file. +# Make sure that instances running in the same system do not have +# overlapping cluster configuration file names. +# +# cluster-config-file nodes-6379.conf + +# Cluster node timeout is the amount of milliseconds a node must be unreachable +# for it to be considered in failure state. +# Most other internal time limits are a multiple of the node timeout. +# +# cluster-node-timeout 15000 + +# A replica of a failing master will avoid to start a failover if its data +# looks too old. +# +# There is no simple way for a replica to actually have an exact measure of +# its "data age", so the following two checks are performed: +# +# 1) If there are multiple replicas able to failover, they exchange messages +# in order to try to give an advantage to the replica with the best +# replication offset (more data from the master processed). +# Replicas will try to get their rank by offset, and apply to the start +# of the failover a delay proportional to their rank. +# +# 2) Every single replica computes the time of the last interaction with +# its master. This can be the last ping or command received (if the master +# is still in the "connected" state), or the time that elapsed since the +# disconnection with the master (if the replication link is currently down). +# If the last interaction is too old, the replica will not try to failover +# at all. +# +# The point "2" can be tuned by user. Specifically a replica will not perform +# the failover if, since the last interaction with the master, the time +# elapsed is greater than: +# +# (node-timeout * cluster-replica-validity-factor) + repl-ping-replica-period +# +# So for example if node-timeout is 30 seconds, and the cluster-replica-validity-factor +# is 10, and assuming a default repl-ping-replica-period of 10 seconds, the +# replica will not try to failover if it was not able to talk with the master +# for longer than 310 seconds. +# +# A large cluster-replica-validity-factor may allow replicas with too old data to failover +# a master, while a too small value may prevent the cluster from being able to +# elect a replica at all. +# +# For maximum availability, it is possible to set the cluster-replica-validity-factor +# to a value of 0, which means, that replicas will always try to failover the +# master regardless of the last time they interacted with the master. +# (However they'll always try to apply a delay proportional to their +# offset rank). +# +# Zero is the only value able to guarantee that when all the partitions heal +# the cluster will always be able to continue. +# +# cluster-replica-validity-factor 10 + +# Cluster replicas are able to migrate to orphaned masters, that are masters +# that are left without working replicas. This improves the cluster ability +# to resist to failures as otherwise an orphaned master can't be failed over +# in case of failure if it has no working replicas. +# +# Replicas migrate to orphaned masters only if there are still at least a +# given number of other working replicas for their old master. This number +# is the "migration barrier". A migration barrier of 1 means that a replica +# will migrate only if there is at least 1 other working replica for its master +# and so forth. It usually reflects the number of replicas you want for every +# master in your cluster. +# +# Default is 1 (replicas migrate only if their masters remain with at least +# one replica). To disable migration just set it to a very large value. +# A value of 0 can be set but is useful only for debugging and dangerous +# in production. +# +# cluster-migration-barrier 1 + +# By default Redis Cluster nodes stop accepting queries if they detect there +# is at least a hash slot uncovered (no available node is serving it). +# This way if the cluster is partially down (for example a range of hash slots +# are no longer covered) all the cluster becomes, eventually, unavailable. +# It automatically returns available as soon as all the slots are covered again. +# +# However sometimes you want the subset of the cluster which is working, +# to continue to accept queries for the part of the key space that is still +# covered. In order to do so, just set the cluster-require-full-coverage +# option to no. +# +# cluster-require-full-coverage yes + +# This option, when set to yes, prevents replicas from trying to failover its +# master during master failures. However the master can still perform a +# manual failover, if forced to do so. +# +# This is useful in different scenarios, especially in the case of multiple +# data center operations, where we want one side to never be promoted if not +# in the case of a total DC failure. +# +# cluster-replica-no-failover no + +# This option, when set to yes, allows nodes to serve read traffic while the +# the cluster is in a down state, as long as it believes it owns the slots. +# +# This is useful for two cases. The first case is for when an application +# doesn't require consistency of data during node failures or network partitions. +# One example of this is a cache, where as long as the node has the data it +# should be able to serve it. +# +# The second use case is for configurations that don't meet the recommended +# three shards but want to enable cluster mode and scale later. A +# master outage in a 1 or 2 shard configuration causes a read/write outage to the +# entire cluster without this option set, with it set there is only a write outage. +# Without a quorum of masters, slot ownership will not change automatically. +# +# cluster-allow-reads-when-down no + +# In order to setup your cluster make sure to read the documentation +# available at http://redis.io web site. + +########################## CLUSTER DOCKER/NAT support ######################## + +# In certain deployments, Redis Cluster nodes address discovery fails, because +# addresses are NAT-ted or because ports are forwarded (the typical case is +# Docker and other containers). +# +# In order to make Redis Cluster working in such environments, a static +# configuration where each node knows its public address is needed. The +# following two options are used for this scope, and are: +# +# * cluster-announce-ip +# * cluster-announce-port +# * cluster-announce-bus-port +# +# Each instructs the node about its address, client port, and cluster message +# bus port. The information is then published in the header of the bus packets +# so that other nodes will be able to correctly map the address of the node +# publishing the information. +# +# If the above options are not used, the normal Redis Cluster auto-detection +# will be used instead. +# +# Note that when remapped, the bus port may not be at the fixed offset of +# clients port + 10000, so you can specify any port and bus-port depending +# on how they get remapped. If the bus-port is not set, a fixed offset of +# 10000 will be used as usual. +# +# Example: +# +# cluster-announce-ip 10.1.1.5 +# cluster-announce-port 6379 +# cluster-announce-bus-port 6380 + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +################################ LATENCY MONITOR ############################## + +# The Redis latency monitoring subsystem samples different operations +# at runtime in order to collect data related to possible sources of +# latency of a Redis instance. +# +# Via the LATENCY command this information is available to the user that can +# print graphs and obtain reports. +# +# The system only logs operations that were performed in a time equal or +# greater than the amount of milliseconds specified via the +# latency-monitor-threshold configuration directive. When its value is set +# to zero, the latency monitor is turned off. +# +# By default latency monitoring is disabled since it is mostly not needed +# if you don't have latency issues, and collecting data has a performance +# impact, that while very small, can be measured under big load. Latency +# monitoring can easily be enabled at runtime using the command +# "CONFIG SET latency-monitor-threshold " if needed. +latency-monitor-threshold 0 + +############################# EVENT NOTIFICATION ############################## + +# Redis can notify Pub/Sub clients about events happening in the key space. +# This feature is documented at http://redis.io/topics/notifications +# +# For instance if keyspace events notification is enabled, and a client +# performs a DEL operation on key "foo" stored in the Database 0, two +# messages will be published via Pub/Sub: +# +# PUBLISH __keyspace@0__:foo del +# PUBLISH __keyevent@0__:del foo +# +# It is possible to select the events that Redis will notify among a set +# of classes. Every class is identified by a single character: +# +# K Keyspace events, published with __keyspace@__ prefix. +# E Keyevent events, published with __keyevent@__ prefix. +# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... +# $ String commands +# l List commands +# s Set commands +# h Hash commands +# z Sorted set commands +# x Expired events (events generated every time a key expires) +# e Evicted events (events generated when a key is evicted for maxmemory) +# t Stream commands +# m Key-miss events (Note: It is not included in the 'A' class) +# A Alias for g$lshzxet, so that the "AKE" string means all the events +# (Except key-miss events which are excluded from 'A' due to their +# unique nature). +# +# The "notify-keyspace-events" takes as argument a string that is composed +# of zero or multiple characters. The empty string means that notifications +# are disabled. +# +# Example: to enable list and generic events, from the point of view of the +# event name, use: +# +# notify-keyspace-events Elg +# +# Example 2: to get the stream of the expired keys subscribing to channel +# name __keyevent@0__:expired use: +# +# notify-keyspace-events Ex +# +# By default all notifications are disabled because most users don't need +# this feature and the feature has some overhead. Note that if you don't +# specify at least one of K or E, no events will be delivered. +notify-keyspace-events "" + +############################### GOPHER SERVER ################################# + +# Redis contains an implementation of the Gopher protocol, as specified in +# the RFC 1436 (https://www.ietf.org/rfc/rfc1436.txt). +# +# The Gopher protocol was very popular in the late '90s. It is an alternative +# to the web, and the implementation both server and client side is so simple +# that the Redis server has just 100 lines of code in order to implement this +# support. +# +# What do you do with Gopher nowadays? Well Gopher never *really* died, and +# lately there is a movement in order for the Gopher more hierarchical content +# composed of just plain text documents to be resurrected. Some want a simpler +# internet, others believe that the mainstream internet became too much +# controlled, and it's cool to create an alternative space for people that +# want a bit of fresh air. +# +# Anyway for the 10nth birthday of the Redis, we gave it the Gopher protocol +# as a gift. +# +# --- HOW IT WORKS? --- +# +# The Redis Gopher support uses the inline protocol of Redis, and specifically +# two kind of inline requests that were anyway illegal: an empty request +# or any request that starts with "/" (there are no Redis commands starting +# with such a slash). Normal RESP2/RESP3 requests are completely out of the +# path of the Gopher protocol implementation and are served as usual as well. +# +# If you open a connection to Redis when Gopher is enabled and send it +# a string like "/foo", if there is a key named "/foo" it is served via the +# Gopher protocol. +# +# In order to create a real Gopher "hole" (the name of a Gopher site in Gopher +# talking), you likely need a script like the following: +# +# https://github.com/antirez/gopher2redis +# +# --- SECURITY WARNING --- +# +# If you plan to put Redis on the internet in a publicly accessible address +# to server Gopher pages MAKE SURE TO SET A PASSWORD to the instance. +# Once a password is set: +# +# 1. The Gopher server (when enabled, not by default) will still serve +# content via Gopher. +# 2. However other commands cannot be called before the client will +# authenticate. +# +# So use the 'requirepass' option to protect your instance. +# +# Note that Gopher is not currently supported when 'io-threads-do-reads' +# is enabled. +# +# To enable Gopher support, uncomment the following line and set the option +# from no (the default) to yes. +# +# gopher-enabled no + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +hash-max-ziplist-entries 512 +hash-max-ziplist-value 64 + +# Lists are also encoded in a special way to save a lot of space. +# The number of entries allowed per internal list node can be specified +# as a fixed maximum size or a maximum number of elements. +# For a fixed maximum size, use -5 through -1, meaning: +# -5: max size: 64 Kb <-- not recommended for normal workloads +# -4: max size: 32 Kb <-- not recommended +# -3: max size: 16 Kb <-- probably not recommended +# -2: max size: 8 Kb <-- good +# -1: max size: 4 Kb <-- good +# Positive numbers mean store up to _exactly_ that number of elements +# per list node. +# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), +# but if your use case is unique, adjust the settings as necessary. +list-max-ziplist-size -2 + +# Lists may also be compressed. +# Compress depth is the number of quicklist ziplist nodes from *each* side of +# the list to *exclude* from compression. The head and tail of the list +# are always uncompressed for fast push/pop operations. Settings are: +# 0: disable all list compression +# 1: depth 1 means "don't start compressing until after 1 node into the list, +# going from either the head or tail" +# So: [head]->node->node->...->node->[tail] +# [head], [tail] will always be uncompressed; inner nodes will compress. +# 2: [head]->[next]->node->node->...->node->[prev]->[tail] +# 2 here means: don't compress head or head->next or tail->prev or tail, +# but compress all nodes between them. +# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] +# etc. +list-compress-depth 0 + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happen to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +set-max-intset-entries 512 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +zset-max-ziplist-entries 128 +zset-max-ziplist-value 64 + +# HyperLogLog sparse representation bytes limit. The limit includes the +# 16 bytes header. When an HyperLogLog using the sparse representation crosses +# this limit, it is converted into the dense representation. +# +# A value greater than 16000 is totally useless, since at that point the +# dense representation is more memory efficient. +# +# The suggested value is ~ 3000 in order to have the benefits of +# the space efficient encoding without slowing down too much PFADD, +# which is O(N) with the sparse encoding. The value can be raised to +# ~ 10000 when CPU is not a concern, but space is, and the data set is +# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. +hll-sparse-max-bytes 3000 + +# Streams macro node max size / items. The stream data structure is a radix +# tree of big nodes that encode multiple items inside. Using this configuration +# it is possible to configure how big a single node can be in bytes, and the +# maximum number of items it may contain before switching to a new node when +# appending new stream entries. If any of the following settings are set to +# zero, the limit is ignored, so for instance it is possible to set just a +# max entires limit by setting max-bytes to 0 and max-entries to the desired +# value. +stream-node-max-bytes 4096 +stream-node-max-entries 100 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation Redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into a hash table +# that is rehashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# actively rehash the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply from time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +# The client output buffer limits can be used to force disconnection of clients +# that are not reading data from the server fast enough for some reason (a +# common reason is that a Pub/Sub client can't consume messages as fast as the +# publisher can produce them). +# +# The limit can be set differently for the three different classes of clients: +# +# normal -> normal clients including MONITOR clients +# replica -> replica clients +# pubsub -> clients subscribed to at least one pubsub channel or pattern +# +# The syntax of every client-output-buffer-limit directive is the following: +# +# client-output-buffer-limit +# +# A client is immediately disconnected once the hard limit is reached, or if +# the soft limit is reached and remains reached for the specified number of +# seconds (continuously). +# So for instance if the hard limit is 32 megabytes and the soft limit is +# 16 megabytes / 10 seconds, the client will get disconnected immediately +# if the size of the output buffers reach 32 megabytes, but will also get +# disconnected if the client reaches 16 megabytes and continuously overcomes +# the limit for 10 seconds. +# +# By default normal clients are not limited because they don't receive data +# without asking (in a push way), but just after a request, so only +# asynchronous clients may create a scenario where data is requested faster +# than it can read. +# +# Instead there is a default limit for pubsub and replica clients, since +# subscribers and replicas receive data in a push fashion. +# +# Both the hard or the soft limit can be disabled by setting them to zero. +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit replica 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 + +# Client query buffers accumulate new commands. They are limited to a fixed +# amount by default in order to avoid that a protocol desynchronization (for +# instance due to a bug in the client) will lead to unbound memory usage in +# the query buffer. However you can configure it here if you have very special +# needs, such us huge multi/exec requests or alike. +# +# client-query-buffer-limit 1gb + +# In the Redis protocol, bulk requests, that are, elements representing single +# strings, are normally limited to 512 mb. However you can change this limit +# here, but must be 1mb or greater +# +# proto-max-bulk-len 512mb + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeout, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are performed with the same frequency, but Redis checks for +# tasks to perform according to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +hz 10 + +# Normally it is useful to have an HZ value which is proportional to the +# number of clients connected. This is useful in order, for instance, to +# avoid too many clients are processed for each background task invocation +# in order to avoid latency spikes. +# +# Since the default HZ value by default is conservatively set to 10, Redis +# offers, and enables by default, the ability to use an adaptive HZ value +# which will temporarily raise when there are many connected clients. +# +# When dynamic HZ is enabled, the actual configured HZ will be used +# as a baseline, but multiples of the configured HZ value will be actually +# used as needed once more clients are connected. In this way an idle +# instance will use very little CPU time while a busy instance will be +# more responsive. +dynamic-hz yes + +# When a child rewrites the AOF file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +aof-rewrite-incremental-fsync yes + +# When redis saves RDB file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +rdb-save-incremental-fsync yes + +# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good +# idea to start with the default settings and only change them after investigating +# how to improve the performances and how the keys LFU change over time, which +# is possible to inspect via the OBJECT FREQ command. +# +# There are two tunable parameters in the Redis LFU implementation: the +# counter logarithm factor and the counter decay time. It is important to +# understand what the two parameters mean before changing them. +# +# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis +# uses a probabilistic increment with logarithmic behavior. Given the value +# of the old counter, when a key is accessed, the counter is incremented in +# this way: +# +# 1. A random number R between 0 and 1 is extracted. +# 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1). +# 3. The counter is incremented only if R < P. +# +# The default lfu-log-factor is 10. This is a table of how the frequency +# counter changes with a different number of accesses with different +# logarithmic factors: +# +# +--------+------------+------------+------------+------------+------------+ +# | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | +# +--------+------------+------------+------------+------------+------------+ +# | 0 | 104 | 255 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 1 | 18 | 49 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 10 | 10 | 18 | 142 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 100 | 8 | 11 | 49 | 143 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# +# NOTE: The above table was obtained by running the following commands: +# +# redis-benchmark -n 1000000 incr foo +# redis-cli object freq foo +# +# NOTE 2: The counter initial value is 5 in order to give new objects a chance +# to accumulate hits. +# +# The counter decay time is the time, in minutes, that must elapse in order +# for the key counter to be divided by two (or decremented if it has a value +# less <= 10). +# +# The default value for the lfu-decay-time is 1. A special value of 0 means to +# decay the counter every time it happens to be scanned. +# +# lfu-log-factor 10 +# lfu-decay-time 1 + +########################### ACTIVE DEFRAGMENTATION ####################### +# +# What is active defragmentation? +# ------------------------------- +# +# Active (online) defragmentation allows a Redis server to compact the +# spaces left between small allocations and deallocations of data in memory, +# thus allowing to reclaim back memory. +# +# Fragmentation is a natural process that happens with every allocator (but +# less so with Jemalloc, fortunately) and certain workloads. Normally a server +# restart is needed in order to lower the fragmentation, or at least to flush +# away all the data and create it again. However thanks to this feature +# implemented by Oran Agra for Redis 4.0 this process can happen at runtime +# in a "hot" way, while the server is running. +# +# Basically when the fragmentation is over a certain level (see the +# configuration options below) Redis will start to create new copies of the +# values in contiguous memory regions by exploiting certain specific Jemalloc +# features (in order to understand if an allocation is causing fragmentation +# and to allocate it in a better place), and at the same time, will release the +# old copies of the data. This process, repeated incrementally for all the keys +# will cause the fragmentation to drop back to normal values. +# +# Important things to understand: +# +# 1. This feature is disabled by default, and only works if you compiled Redis +# to use the copy of Jemalloc we ship with the source code of Redis. +# This is the default with Linux builds. +# +# 2. You never need to enable this feature if you don't have fragmentation +# issues. +# +# 3. Once you experience fragmentation, you can enable this feature when +# needed with the command "CONFIG SET activedefrag yes". +# +# The configuration parameters are able to fine tune the behavior of the +# defragmentation process. If you are not sure about what they mean it is +# a good idea to leave the defaults untouched. + +# Enabled active defragmentation +# activedefrag no + +# Minimum amount of fragmentation waste to start active defrag +# active-defrag-ignore-bytes 100mb + +# Minimum percentage of fragmentation to start active defrag +# active-defrag-threshold-lower 10 + +# Maximum percentage of fragmentation at which we use maximum effort +# active-defrag-threshold-upper 100 + +# Minimal effort for defrag in CPU percentage, to be used when the lower +# threshold is reached +# active-defrag-cycle-min 1 + +# Maximal effort for defrag in CPU percentage, to be used when the upper +# threshold is reached +# active-defrag-cycle-max 25 + +# Maximum number of set/hash/zset/list fields that will be processed from +# the main dictionary scan +# active-defrag-max-scan-fields 1000 + +# Jemalloc background thread for purging will be enabled by default +jemalloc-bg-thread yes + +# It is possible to pin different threads and processes of Redis to specific +# CPUs in your system, in order to maximize the performances of the server. +# This is useful both in order to pin different Redis threads in different +# CPUs, but also in order to make sure that multiple Redis instances running +# in the same host will be pinned to different CPUs. +# +# Normally you can do this using the "taskset" command, however it is also +# possible to this via Redis configuration directly, both in Linux and FreeBSD. +# +# You can pin the server/IO threads, bio threads, aof rewrite child process, and +# the bgsave child process. The syntax to specify the cpu list is the same as +# the taskset command: +# +# Set redis server/io threads to cpu affinity 0,2,4,6: +# server_cpulist 0-7:2 +# +# Set bio threads to cpu affinity 1,3: +# bio_cpulist 1,3 +# +# Set aof rewrite child process to cpu affinity 8,9,10,11: +# aof_rewrite_cpulist 8-11 +# +# Set bgsave child process to cpu affinity 1,10,11 +# bgsave_cpulist 1,10-11 \ No newline at end of file diff --git a/service/redis/rhodecode_redis.dockerfile b/service/redis/rhodecode_redis.dockerfile new file mode 100644 index 0000000..99987d1 --- /dev/null +++ b/service/redis/rhodecode_redis.dockerfile @@ -0,0 +1,3 @@ +FROM library/redis:6.0.9 +COPY service/redis/redis.conf /etc/redis/redis-rc.conf +CMD ["redis-server", "/etc/redis/redis-rc.conf"] diff --git a/service/rhodecode/bootstrap/apply_license.py b/service/rhodecode/bootstrap/apply_license.py new file mode 100644 index 0000000..d3092a8 --- /dev/null +++ b/service/rhodecode/bootstrap/apply_license.py @@ -0,0 +1,31 @@ +""" +echo "%run path/create_docs_repo.py" | rc-ishell .dev/dev.ini +""" + +import os +from rhodecode.model.db import Session + +LICENSE_FILE_NAME = 'rhodecode_enterprise.license' + + +def main(): + license_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), LICENSE_FILE_NAME) + + if not os.path.isfile(license_file): + print('No license file at {}'.format(license_file)) + return + + try: + from rc_license.models import apply_license + except ImportError: + print('Cannot import apply_license') + return + + with open(license_file, 'r') as f: + license_data = f.read() + + apply_license(license_data) + Session().commit() + + +main() diff --git a/service/rhodecode/bootstrap/enable_svn_proxy.py b/service/rhodecode/bootstrap/enable_svn_proxy.py new file mode 100644 index 0000000..d69b292 --- /dev/null +++ b/service/rhodecode/bootstrap/enable_svn_proxy.py @@ -0,0 +1,24 @@ +""" +echo "%run path/enable_svn_proxy.py" | RC_SETTING='{"vcs_svn_proxy_http_requests_enabled":true, "vcs_svn_proxy_http_server_url": "http://localhost:8090"}' rc-ishell .dev/dev.ini +""" + +import os +import json +from rhodecode.model.db import Session +from rhodecode.model.settings import VcsSettingsModel + +defaults = json.dumps({ + 'vcs_svn_proxy_http_requests_enabled': True, + 'vcs_svn_proxy_http_server_url': 'http://svn:8090' +}) + + +def main(json_args): + model = VcsSettingsModel() + model.create_or_update_global_svn_settings(json_args) + Session().commit() + print('ok') + + +args = json.loads(os.environ.get('RC_SETTING') or defaults) +main(args) diff --git a/service/rhodecode/bootstrap/enable_vcs_settings.py b/service/rhodecode/bootstrap/enable_vcs_settings.py new file mode 100644 index 0000000..c91a3ee --- /dev/null +++ b/service/rhodecode/bootstrap/enable_vcs_settings.py @@ -0,0 +1,30 @@ +""" +echo "%run path/enable_diff_cache.py" | RC_SETTING='{"rhodecode_git_close_branch_before_merging": false, "rhodecode_pr_merge_enabled": true, "rhodecode_hg_close_branch_before_merging": false, "rhodecode_use_outdated_comments": true, "rhodecode_git_use_rebase_for_merging": false, "rhodecode_diff_cache": true, "rhodecode_hg_use_rebase_for_merging": false}' rc-ishell .dev/dev.ini +""" + +import os +import json +from rhodecode.model.db import Session +from rhodecode.model.settings import VcsSettingsModel + + +defaults = json.dumps({ + 'rhodecode_diff_cache': True, + 'rhodecode_git_close_branch_before_merging': False, + 'rhodecode_git_use_rebase_for_merging': False, + 'rhodecode_hg_close_branch_before_merging': False, + 'rhodecode_hg_use_rebase_for_merging': False, + 'rhodecode_pr_merge_enabled': True, + 'rhodecode_use_outdated_comments': True +}) + + +def main(json_args): + model = VcsSettingsModel() + model.create_or_update_global_pr_settings(json_args) + Session().commit() + print('ok') + + +args = json.loads(os.environ.get('RC_SETTING') or defaults) +main(args) diff --git a/service/rhodecode/bootstrap/generate_svn_apache_conf.py b/service/rhodecode/bootstrap/generate_svn_apache_conf.py new file mode 100644 index 0000000..8894521 --- /dev/null +++ b/service/rhodecode/bootstrap/generate_svn_apache_conf.py @@ -0,0 +1,22 @@ +""" +echo "%run path/generate_svn_apache_conf.py" | RC_SETTING='{"setting1":"key1"}' rc-ishell .dev/dev.ini +""" + +import os +import json +from rhodecode.apps.svn_support.utils import generate_mod_dav_svn_config +from rhodecode.lib.base import bootstrap_request + +defaults = json.dumps({ + +}) + + +def main(json_args): + request = bootstrap_request() + generate_mod_dav_svn_config(request.registry) + print('ok') + + +args = json.loads(os.environ.get('RC_SETTING') or defaults) +main(args) diff --git a/service/rhodecode/bootstrap/set_default_renderer_to_md.py b/service/rhodecode/bootstrap/set_default_renderer_to_md.py new file mode 100644 index 0000000..e28045d --- /dev/null +++ b/service/rhodecode/bootstrap/set_default_renderer_to_md.py @@ -0,0 +1,24 @@ +""" +echo "%run path/set_default_renderer_to_md.py" | RC_SETTING='[["markup_renderer", "markdown", "unicode"]]' rc-ishell .dev/dev.ini +""" + +import os +import json +from rhodecode.model.db import Session +from rhodecode.model.settings import SettingsModel + +defaults = json.dumps([ + ('markup_renderer', 'markdown', 'unicode') +]) + + +def main(json_args): + model = SettingsModel() + for setting_name, value, type_ in json_args: + model.create_or_update_setting(setting_name, value, type_) + Session().commit() + print('ok') + + +args = json.loads(os.environ.get('RC_SETTING') or defaults) +main(args) diff --git a/service/rhodecode/rhodecode.dockerfile b/service/rhodecode/rhodecode.dockerfile new file mode 100644 index 0000000..51e3707 --- /dev/null +++ b/service/rhodecode/rhodecode.dockerfile @@ -0,0 +1,245 @@ +FROM ubuntu:groovy +MAINTAINER RhodeCode Inc. + +ARG TZ="UTC" +ARG LOCALE_TYPE=en_US.UTF-8 +ARG RHODECODE_TYPE=Enterprise +ARG RHODECODE_VERSION=4.24.1 +ARG RHODECODE_DB=sqlite +ARG RHODECODE_USER_NAME=admin +ARG RHODECODE_USER_PASS=secret4 +ARG RHODECODE_USER_EMAIL=support@rhodecode.com + +# env are runtime +ENV \ + TZ=${TZ} \ + LOCALE_TYPE=${LOCALE_TYPE} \ + \ + ## Define type we build, and the instance we'll create + RHODECODE_TYPE=${RHODECODE_TYPE} \ + RC_TYPE_ID=enterprise-1 \ + \ + ## SETUP ARGS FOR INSTALLATION ## + ## set version we build on, get from .env or set default ver + RHODECODE_VERSION=${RHODECODE_VERSION} \ + \ + ## set DB, default sqlite + RHODECODE_DB=${RHODECODE_DB} \ + \ + ## set app bootstrap required data + RHODECODE_USER_NAME=${RHODECODE_USER_NAME} \ + RHODECODE_USER_PASS=${RHODECODE_USER_PASS} \ + RHODECODE_USER_EMAIL=${RHODECODE_USER_EMAIL} \ + \ + RC_USER=rhodecode \ + \ + # SVN CONFIG + MOD_DAV_SVN_CONF_FILE=/etc/rhodecode/conf/svn/mod_dav_svn.conf \ + MOD_DAV_SVN_PORT=8090 \ + \ + # SSHD CONFIG + SSHD_CONF_FILE=/etc/rhodecode/sshd_config \ + \ + BUILD_CONF=/etc/rhodecode/conf_build \ + BUILD_BIN_DIR=/var/opt/rhodecode_bin \ + RHODECODE_DATA_DIR=/var/opt/rhodecode_data \ + RHODECODE_REPO_DIR=/var/opt/rhodecode_repo_store \ + RHODECODE_HTTP_PORT=10020 \ + RHODECODE_VCS_PORT=10010 \ + RHODECODE_HOST=0.0.0.0 \ + RHODECODE_VCS_HOST=127.0.0.1 + +ENV \ + RCCONTROL=/home/$RC_USER/.rccontrol-profile/bin/rccontrol \ + SUPERVISOR_CONF=/home/$RC_USER/.rccontrol/supervisor/supervisord.ini \ + # make application scripts visible + PATH=$PATH:/home/$RC_USER/.rccontrol-profile/bin + +ENV SVN_LOCALE_DEPS apache2 apache2-utils libapache2-mod-svn +ENV SSH_LOCALE_DEPS openssh-server +ENV PYTHON_DEPS python2 + +RUN \ +echo "** install base packages **" && \ +set -eux; \ + \ + savedAptMark="$(apt-mark showmanual)"; \ + apt-get update; \ + DEBIAN_FRONTEND="noninteractive" \ + apt-get install -y --no-install-recommends \ + tini \ + bash \ + binutils \ + tzdata \ + locales \ + openssl \ + curl \ + sudo \ + gosu \ + $PYTHON_DEPS \ + $SSH_LOCALE_DEPS \ + $SVN_LOCALE_DEPS \ + ; \ + rm -rf /var/lib/apt/lists/*; + +RUN \ +echo "** Configure the python executable for py2/3 compat **" && \ +ISPY=$(which python3 || which python2) && \ +if [ -n $ISPY ] ; then ln -s $ISPY /usr/bin/python ; fi + +RUN \ +echo "** Configure the locales **" && \ + sed -i "s/^# ${LOCALE_TYPE}/${LOCALE_TYPE}/g" /etc/locale.gen && \ + locale-gen + +# locale-archive is a fix for old nix glibc2.26 locales available +ENV \ + LOCALE_ARCHIVE=/var/opt/locale-archive \ + LANG=${LOCALE_TYPE} \ + LANGUAGE=${LOCALE_TYPE} \ + LC_ALL=${LOCALE_TYPE} + +# configure the system user +# explicitly set uid/gid to guarantee that it won't change in the future +# the values 999:999 are identical to the current user/group id assigned +RUN \ +echo "** Create system user $RC_USER **" && \ + groupadd --system --gid 999 $RC_USER && \ + useradd --system --gid $RC_USER --uid 999 --shell /bin/bash $RC_USER + +# set the defult bash shell +SHELL ["/bin/bash", "-c"] + +# Fix and set a timezone +RUN \ +echo "** configure the timezone **" && \ +rm /etc/localtime && cp /usr/share/zoneinfo/$TZ /etc/localtime && \ +echo $TZ > /etc/timezone + + +RUN \ +echo "** prepare rhodecode store and cache **" && \ + install -d -m 0755 -o $RC_USER -g $RC_USER /opt/rhodecode && \ + install -d -m 0755 -o $RC_USER -g $RC_USER /var/opt/rhodecode_bin && \ + install -d -m 0755 -o $RC_USER -g $RC_USER $RHODECODE_REPO_DIR && \ + install -d -m 0755 -o $RC_USER -g $RC_USER $RHODECODE_DATA_DIR && \ + install -d -m 0755 -o $RC_USER -g $RC_USER $BUILD_CONF && \ + install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/ && \ + install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/.rccontrol && \ + install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/.rccontrol/cache && \ + install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/.rccontrol/bootstrap && \ + install -d -m 0700 -o $RC_USER -g $RC_USER /home/$RC_USER/.ssh && \ + install -d -m 0700 -o $RC_USER -g $RC_USER /home/$RC_USER/.rhoderc + +# expose our custom sshd config +COPY service/sshd/sshd_config $SSHD_CONF_FILE + +# Apache SVN setup +RUN \ + echo "**** Apache config cleanup ****" && \ + rm -f /etc/apache2/conf.d/info.conf \ + /etc/apache2/conf.d/mpm.conf \ + /etc/apache2/conf.d/userdir.conf && \ + rm -f /etc/apache2/sites-enabled/* && \ + rm -f /etc/apache2/sites-available/* + +# custom SVN virtualhost +COPY service/svn/virtualhost.conf /etc/apache2/sites-enabled/ + +RUN \ +echo "**** Apache config ****" && \ + echo $(strings /usr/lib/apache2/modules/mod_dav_svn.so | grep 'Powered by') > /var/opt/dav.version && \ + mkdir -p /run/apache2 && \ + mkdir -p /var/opt/www && \ + echo "unset HOME" > /etc/apache2/envvars && \ + echo "export APACHE_RUN_USER=${RC_USER}" >> /etc/apache2/envvars && \ + echo "export APACHE_PID_FILE=/var/run/apache2/apache2.pid" >> /etc/apache2/envvars && \ + echo "export APACHE_RUN_DIR=/var/run/apache2" >> /etc/apache2/envvars && \ + echo "export APACHE_LOCK_DIR=/var/lock/apache2" >> /etc/apache2/envvars && \ + echo "export APACHE_RUN_USER=${RC_USER}" >> /etc/apache2/envvars && \ + echo "export APACHE_RUN_GROUP=${RC_USER}" >> /etc/apache2/envvars && \ + sed -i "s/Listen 80/Listen ${MOD_DAV_SVN_PORT}/g" /etc/apache2/ports.conf + + +# Copy artifacts +COPY --chown=$RC_USER:$RC_USER .cache/* /home/$RC_USER/.rccontrol/cache/ +COPY --chown=$RC_USER:$RC_USER service/rhodecode/bootstrap/* /home/$RC_USER/.rccontrol/bootstrap/ +COPY --chown=$RC_USER:$RC_USER config/compose/rhodecode_enterprise.license /home/$RC_USER/.rccontrol/bootstrap/ + +RUN \ +echo "**** locale-archive path ****" && \ + mv -v /home/$RC_USER/.rccontrol/cache/locale-archive /var/opt/locale-archive + +# change to non-root user for RUN commands +USER $RC_USER +WORKDIR /home/$RC_USER + +RUN \ +echo "** install rhodecode control **" && \ + cd /home/$RC_USER/.rccontrol/cache && \ + INSTALLER=$(ls -Art /home/$RC_USER/.rccontrol/cache/RhodeCode-installer-* | tail -n 1) && \ + chmod +x ${INSTALLER} && \ + ${INSTALLER} --accept-license && \ + ${RCCONTROL} self-init && \ + cp -v /home/$RC_USER/.rccontrol-profile/etc/ca-bundle.crt $BUILD_CONF/ + +RUN \ +echo "** install vcsserver ${RHODECODE_VERSION} **" && \ + ${RCCONTROL} install VCSServer --version ${RHODECODE_VERSION} --start-at-boot=yes --accept-license --offline \ + '{"host":"'"$RHODECODE_VCS_HOST"'", "port":"'"$RHODECODE_VCS_PORT"'"}' && \ + VCSSERVER_PATH=/home/$RC_USER/.rccontrol/vcsserver-1 && \ + cp -v ${VCSSERVER_PATH}/vcsserver.ini $BUILD_CONF/ + +RUN \ +echo "** install rhodecode ${RHODECODE_TYPE} ${RHODECODE_VERSION} **" && \ + RHODECODE_DB_INIT=sqlite && \ + ${RCCONTROL} install ${RHODECODE_TYPE} --version ${RHODECODE_VERSION} --start-at-boot=yes --accept-license --offline \ + '{"host":"'"$RHODECODE_HOST"'", "port":"'"$RHODECODE_HTTP_PORT"'", "username":"'"$RHODECODE_USER_NAME"'", "password":"'"$RHODECODE_USER_PASS"'", "email":"'"$RHODECODE_USER_EMAIL"'", "repo_dir":"'"$RHODECODE_REPO_DIR"'", "database": "'"$RHODECODE_DB_INIT"'", "skip_existing_db": "1"}' && \ + RHODECODE_PATH=/home/$RC_USER/.rccontrol/${RC_TYPE_ID} && \ + cp -v ${RHODECODE_PATH}/rhodecode.ini $BUILD_CONF/ && \ + cp -v ${RHODECODE_PATH}/search_mapping.ini $BUILD_CONF/ && \ + cp -v ${RHODECODE_PATH}/gunicorn_conf.py $BUILD_CONF/ && \ + rm -rf $BUILD_BIN_DIR/bin && ln -s ${RHODECODE_PATH}/profile/bin $BUILD_BIN_DIR && \ + mkdir -p $RHODECODE_DATA_DIR/static && cp -r ${RHODECODE_PATH}/public/* $RHODECODE_DATA_DIR/static/ && \ + rm ${RHODECODE_PATH}/rhodecode.db + + +RUN \ +echo "** configure supervisord **" && \ + cp -v ${SUPERVISOR_CONF} $BUILD_CONF/ && \ + sed -i "s/self_managed_supervisor = False/self_managed_supervisor = True/g" /home/$RC_USER/.rccontrol.ini + +USER root + + +RUN \ +echo "**** cleanup ****" && \ + apt-get remove -y $PYTHON_DEPS && \ + apt-get autoclean -y && \ + rm -f /tmp/* && \ + rm -f /home/$RC_USER/.rccontrol/cache/RhodeCode-installer-* && \ + rm -f /home/$RC_USER/.rccontrol/cache/*.bz2 && \ + rm -rf /var/lib/apt/lists/* \ + rm -rf /var/cache/apk/* \ + rm ${SUPERVISOR_CONF} + +# copy entrypoints +COPY entrypoints.d/entrypoint.sh /opt/entrypoints.d/entrypoint.sh +RUN chmod +x /opt/entrypoints.d/entrypoint.sh + +# config volume +VOLUME /etc/rhodecode/conf + +# repo store volume +VOLUME /var/opt/rhodecode_repo_store + +# data volume +VOLUME /var/opt/rhodecode_data + +# logs volume +VOLUME /var/log/rhodecode + +ENTRYPOINT ["/opt/entrypoints.d/entrypoint.sh"] + +# compose can override this +CMD ["supervisord", "--nodaemon", "-c", "/etc/rhodecode/conf/supervisord.ini"] diff --git a/service/sshd/rhodecode_sshd.dockerfile b/service/sshd/rhodecode_sshd.dockerfile new file mode 100644 index 0000000..3a3d265 --- /dev/null +++ b/service/sshd/rhodecode_sshd.dockerfile @@ -0,0 +1,76 @@ +FROM debian:buster +MAINTAINER RhodeCode Inc. + +# env are runtime/build +ENV \ + TZ="UTC" \ + RC_USER=rhodecode \ + RHODECODE_USER_NAME=rhodecode \ + SSHD_CONF_FILE=/etc/rhodecode/sshd_config + +RUN \ +echo "** install base packages **" && \ +set -eux; \ + \ + savedAptMark="$(apt-mark showmanual)"; \ + apt-get update; \ + apt-get install -y --no-install-recommends \ + bash \ + tzdata \ + vim \ + openssl \ + openssh-server \ + ; \ + rm -rf /var/lib/apt/lists/*; + +# # reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies +# apt-mark auto '.*' > /dev/null; \ +# [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; \ +# apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false + +# configure the system user +# explicitly set uid/gid to guarantee that it won't change in the future +# the values 999:999 are identical to the current user/group id assigned +RUN \ +echo "** Create system user $RC_USER **" && \ + groupadd --system --gid 999 $RC_USER && \ + useradd --system --gid $RC_USER --uid 999 --shell /bin/bash $RC_USER + + +RUN \ +echo "** prepare rhodecode store and cache **" && \ + install -d -m 0700 -o $RC_USER -g $RC_USER /home/$RC_USER/.ssh + +# set the defult bash shell +SHELL ["/bin/bash", "-c"] + +# Fix and set a timezone +RUN \ +echo "** configure the timezone **" && \ +echo $TZ > /etc/timezone + +# expose our custom sshd config +COPY service/sshd/sshd_config $SSHD_CONF_FILE + +USER root + +RUN \ +echo "**** cleanup ****" && \ + rm -f /tmp/* && \ + rm -rf /var/lib/apt/lists/* \ + rm -rf /var/cache/apk/* + +# copy entrypoints +COPY entrypoints.d/ssh-entrypoint.sh /opt/entrypoints.d/ssh-entrypoint.sh +RUN chmod +x /opt/entrypoints.d/ssh-entrypoint.sh + +# config volume +VOLUME /etc/rhodecode/conf + +# logs volume +VOLUME /var/log/rhodecode + +ENTRYPOINT ["/opt/entrypoints.d/ssh-entrypoint.sh"] + +# compose can override this +CMD ["/usr/sbin/sshd", "-f", "/etc/rhodecode/sshd_config", "-D", "-e"] diff --git a/service/sshd/sshd_config b/service/sshd/sshd_config new file mode 100644 index 0000000..e128ae6 --- /dev/null +++ b/service/sshd/sshd_config @@ -0,0 +1,22 @@ +Port 22 +ChallengeResponseAuthentication no + +HostKey /etc/rhodecode/conf/ssh/ssh_host_rsa_key +HostKey /etc/rhodecode/conf/ssh/ssh_host_ecdsa_key +HostKey /etc/rhodecode/conf/ssh/ssh_host_ed25519_key + +Protocol 2 +PermitRootLogin no +PasswordAuthentication no +MaxStartups 100:30:200 +AllowUsers rhodecode +PrintMotd no +PubkeyAuthentication yes +AuthorizedKeysFile /etc/rhodecode/conf/ssh/authorized_keys /etc/rhodecode/conf/ssh/authorized_keys_rhodecode + +# Fixes: User USERNAME not allowed because account is locked +# With "UsePAM yes" even if account gets locked because of password the key still works +UsePAM yes + +# Disabling use DNS in ssh since it tends to slow connecting +UseDNS no diff --git a/service/svn/rhodecode_svn.dockerfile b/service/svn/rhodecode_svn.dockerfile new file mode 100644 index 0000000..d7300b4 --- /dev/null +++ b/service/svn/rhodecode_svn.dockerfile @@ -0,0 +1,70 @@ +FROM debian:buster +MAINTAINER RhodeCode Inc. + +ENV \ + RC_USER=rhodecode \ + MOD_DAV_SVN_CONF_FILE=/etc/rhodecode/conf/svn/mod_dav_svn.conf + +RUN \ +echo "** install base svn packages **" && \ + apk update && \ + apk add --no-cache \ + tini \ + bash \ + curl \ + apache2 \ + apache2-utils \ + apache2-webdav \ + mod_dav_svn \ + subversion + +# configure the system user +# explicitly set uid/gid to guarantee that it won't change in the future +# the values 999:999 are identical to the current user/group id assigned +RUN \ +echo "** Create system user $RC_USER **" && \ + groupadd --system --gid 999 $RC_USER && \ + useradd --system --gid $RC_USER --uid 999 --shell /bin/bash $RC_USER + + +RUN \ +echo "**** cleanup ****" && \ + apk del tzdata python2 && \ + rm -f /tmp/* && \ + rm -rf /var/lib/apt/lists/* \ + rm -rf /var/cache/apk/* + +RUN \ + echo "**** Apache config cleanup ****" && \ + rm -f /etc/apache2/conf.d/info.conf \ + /etc/apache2/conf.d/mpm.conf \ + /etc/apache2/conf.d/userdir.conf + + +COPY svn/virtualhost.conf /etc/apache2/conf.d/ + +# copy entrypoints +COPY entrypoints.d/svn-entrypoint.sh /opt/entrypoints.d/svn-entrypoint.sh +RUN chmod +x /opt/entrypoints.d/svn-entrypoint.sh + +RUN \ + echo $(strings /usr/lib/apache2/mod_dav_svn.so | grep 'Powered by') > /var/opt/dav.version && \ + mkdir -p /run/apache2 && \ + mkdir -p /var/opt/www && \ + echo "export APACHE_RUN_USER=${RC_USER}" > /etc/apache2/envvars && \ + echo "export APACHE_RUN_GROUP=${RC_USER}" >> /etc/apache2/envvars && \ + sed -i "s/User apache/User ${RC_USER}/g" /etc/apache2/httpd.conf && \ + sed -i "s/Group apache/Group ${RC_USER}/g" /etc/apache2/httpd.conf + +# repo store volume +VOLUME /var/opt/rhodecode_repo_store + +# config volume +VOLUME /etc/rhodecode/conf + +# logs volume +VOLUME /var/log/rhodecode + +ENTRYPOINT ["/opt/entrypoints.d/svn-entrypoint.sh"] + +CMD ["apachectl", "-D", "FOREGROUND"] diff --git a/service/svn/virtualhost.conf b/service/svn/virtualhost.conf new file mode 100644 index 0000000..bc79470 --- /dev/null +++ b/service/svn/virtualhost.conf @@ -0,0 +1,22 @@ +LoadModule headers_module /usr/lib/apache2/modules/mod_headers.so +LoadModule authn_anon_module /usr/lib/apache2/modules/mod_authn_anon.so +LoadModule dav_svn_module /usr/lib/apache2/modules/mod_dav_svn.so + + + ServerAdmin admin@localhost + DocumentRoot /var/opt/www + ErrorLog ${APACHE_LOG_DIR}/svn_error.log + CustomLog ${APACHE_LOG_DIR}/svn_access.log combined + LogLevel info + + + Require ip 127 + SetHandler server-status + Require all granted + + + # allows custom host names, prevents 400 errors on checkout + HttpProtocolOptions Unsafe + + Include ${MOD_DAV_SVN_CONF_FILE} + \ No newline at end of file