; ######################################### ; RHODECODE COMMUNITY EDITION CONFIGURATION ; ######################################### [DEFAULT] ; Debug flag sets all loggers to debug, and enables request tracking debug = true ; ######################################################################## ; EMAIL CONFIGURATION ; These settings will be used by the RhodeCode mailing system ; ######################################################################## ; prefix all emails subjects with given prefix, helps filtering out emails #email_prefix = [RhodeCode] ; email FROM address all mails will be sent #app_email_from = rhodecode-noreply@localhost #smtp_server = mail.server.com #smtp_username = #smtp_password = #smtp_port = #smtp_use_tls = false #smtp_use_ssl = true [server:main] ; COMMON HOST/IP CONFIG, This applies mostly to develop setup, ; Host port for gunicorn are controlled by gunicorn_conf.py host = 127.0.0.1 port = 10020 ; ########################### ; GUNICORN APPLICATION SERVER ; ########################### ; run with gunicorn --config gunicorn_conf.py --paste rhodecode.ini ; Module to use, this setting shouldn't be changed use = egg:gunicorn#main ; Prefix middleware for RhodeCode. ; recommended when using proxy setup. ; allows to set RhodeCode under a prefix in server. ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well. ; And set your prefix like: `prefix = /custom_prefix` ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need ; to make your cookies only work on prefix url [filter:proxy-prefix] use = egg:PasteDeploy#prefix prefix = / [app:main] ; The %(here)s variable will be replaced with the absolute path of parent directory ; of this file ; Each option in the app:main can be override by an environmental variable ; ;To override an option: ; ;RC_ ;Everything should be uppercase, . and - should be replaced by _. ;For example, if you have these configuration settings: ;rc_cache.repo_object.backend = foo ;can be overridden by ;export RC_CACHE_REPO_OBJECT_BACKEND=foo use = egg:rhodecode-enterprise-ce ; enable proxy prefix middleware, defined above #filter-with = proxy-prefix ; control if environmental variables to be expanded into the .ini settings #rhodecode.env_expand = true ; ############# ; DEBUG OPTIONS ; ############# pyramid.reload_templates = true # During development the we want to have the debug toolbar enabled pyramid.includes = pyramid_debugtoolbar debugtoolbar.hosts = 0.0.0.0/0 debugtoolbar.exclude_prefixes = /css /fonts /images /js ## RHODECODE PLUGINS ## rhodecode.includes = rhodecode.api # api prefix url rhodecode.api.url = /_admin/api ; enable debug style page debug_style = true ; ################# ; END DEBUG OPTIONS ; ################# ; encryption key used to encrypt social plugin tokens, ; remote_urls with credentials etc, if not set it defaults to ; `beaker.session.secret` #rhodecode.encrypted_values.secret = ; decryption strict mode (enabled by default). It controls if decryption raises ; `SignatureVerificationError` in case of wrong key, or damaged encryption data. #rhodecode.encrypted_values.strict = false ; Pick algorithm for encryption. Either fernet (more secure) or aes (default) ; fernet is safer, and we strongly recommend switching to it. ; Due to backward compatibility aes is used as default. #rhodecode.encrypted_values.algorithm = fernet ; Return gzipped responses from RhodeCode (static files/application) gzip_responses = false ; Auto-generate javascript routes file on startup generate_js_files = false ; System global default language. ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh lang = en ; Perform a full repository scan and import on each server start. ; Settings this to true could lead to very long startup time. startup.import_repos = false ; URL at which the application is running. This is used for Bootstrapping ; requests in context when no web request is available. Used in ishell, or ; SSH calls. Set this for events to receive proper url for SSH calls. app.base_url = http://rhodecode.local ; Host at which the Service API is running. app.service_api.host = http://rhodecode.local:10020 ; Secret for Service API authentication. app.service_api.token = ; Unique application ID. Should be a random unique string for security. app_instance_uuid = rc-production ; Cut off limit for large diffs (size in bytes). If overall diff size on ; commit, or pull request exceeds this limit this diff will be displayed ; partially. E.g 512000 == 512Kb cut_off_limit_diff = 512000 ; Cut off limit for large files inside diffs (size in bytes). Each individual ; file inside diff which exceeds this limit will be displayed partially. ; E.g 128000 == 128Kb cut_off_limit_file = 128000 ; Use cached version of vcs repositories everywhere. Recommended to be `true` vcs_full_cache = true ; Force https in RhodeCode, fixes https redirects, assumes it's always https. ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache force_https = false ; use Strict-Transport-Security headers use_htsts = false ; Set to true if your repos are exposed using the dumb protocol git_update_server_info = false ; RSS/ATOM feed options rss_cut_off_limit = 256000 rss_items_per_page = 10 rss_include_diff = false ; gist URL alias, used to create nicer urls for gist. This should be an ; url that does rewrites to _admin/gists/{gistid}. ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid} gist_alias_url = ; List of views (using glob pattern syntax) that AUTH TOKENS could be ; used for access. ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it ; came from the the logged in user who own this authentication token. ; Additionally @TOKEN syntax can be used to bound the view to specific ; authentication token. Such view would be only accessible when used together ; with this authentication token ; list of all views can be found under `/_admin/permissions/auth_token_access` ; The list should be "," separated and on a single line. ; Most common views to enable: # RepoCommitsView:repo_commit_download # RepoCommitsView:repo_commit_patch # RepoCommitsView:repo_commit_raw # RepoCommitsView:repo_commit_raw@TOKEN # RepoFilesView:repo_files_diff # RepoFilesView:repo_archivefile # RepoFilesView:repo_file_raw # GistView:* api_access_controllers_whitelist = ; Default encoding used to convert from and to unicode ; can be also a comma separated list of encoding in case of mixed encodings default_encoding = UTF-8 ; instance-id prefix ; a prefix key for this instance used for cache invalidation when running ; multiple instances of RhodeCode, make sure it's globally unique for ; all running RhodeCode instances. Leave empty if you don't use it instance_id = ; Fallback authentication plugin. Set this to a plugin ID to force the usage ; of an authentication plugin also if it is disabled by it's settings. ; This could be useful if you are unable to log in to the system due to broken ; authentication settings. Then you can enable e.g. the internal RhodeCode auth ; module to log in again and fix the settings. ; Available builtin plugin IDs (hash is part of the ID): ; egg:rhodecode-enterprise-ce#rhodecode ; egg:rhodecode-enterprise-ce#pam ; egg:rhodecode-enterprise-ce#ldap ; egg:rhodecode-enterprise-ce#jasig_cas ; egg:rhodecode-enterprise-ce#headers ; egg:rhodecode-enterprise-ce#crowd #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode ; Flag to control loading of legacy plugins in py:/path format auth_plugin.import_legacy_plugins = true ; alternative return HTTP header for failed authentication. Default HTTP ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with ; handling that causing a series of failed authentication calls. ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code ; This will be served instead of default 401 on bad authentication auth_ret_code = ; use special detection method when serving auth_ret_code, instead of serving ; ret_code directly, use 401 initially (Which triggers credentials prompt) ; and then serve auth_ret_code to clients auth_ret_code_detection = false ; locking return code. When repository is locked return this HTTP code. 2XX ; codes don't break the transactions while 4XX codes do lock_ret_code = 423 ; Filesystem location were repositories should be stored repo_store.path = /var/opt/rhodecode_repo_store ; allows to setup custom hooks in settings page allow_custom_hooks_settings = true ; Generated license token required for EE edition license. ; New generated token value can be found in Admin > settings > license page. license_token = ; This flag hides sensitive information on the license page such as token, and license data license.hide_license_info = false ; Import EE license from this license path #license.import_path = %(here)s/rhodecode_enterprise.license ; import license 'if-missing' or 'force' (always override) ; if-missing means apply license if it doesn't exist. 'force' option always overrides it license.import_path_mode = if-missing ; supervisor connection uri, for managing supervisor and logs. supervisor.uri = ; supervisord group name/id we only want this RC instance to handle supervisor.group_id = dev ; Display extended labs settings labs_settings_active = true ; Custom exception store path, defaults to TMPDIR ; This is used to store exception from RhodeCode in shared directory #exception_tracker.store_path = ; Send email with exception details when it happens #exception_tracker.send_email = false ; Comma separated list of recipients for exception emails, ; e.g admin@rhodecode.com,devops@rhodecode.com ; Can be left empty, then emails will be sent to ALL super-admins #exception_tracker.send_email_recipients = ; optional prefix to Add to email Subject #exception_tracker.email_prefix = [RHODECODE ERROR] ; NOTE: this setting IS DEPRECATED: ; file_store backend is always enabled #file_store.enabled = true ; NOTE: this setting IS DEPRECATED: ; file_store.backend = X -> use `file_store.backend.type = filesystem_v2` instead ; Storage backend, available options are: local #file_store.backend = local ; NOTE: this setting IS DEPRECATED: ; file_store.storage_path = X -> use `file_store.filesystem_v2.storage_path = X` instead ; path to store the uploaded binaries and artifacts #file_store.storage_path = /var/opt/rhodecode_data/file_store ; Artifacts file-store, is used to store comment attachments and artifacts uploads. ; file_store backend type: filesystem_v1, filesystem_v2 or objectstore (s3-based) are available as options ; filesystem_v1 is backwards compat with pre 5.1 storage changes ; new installations should choose filesystem_v2 or objectstore (s3-based), pick filesystem when migrating from ; previous installations to keep the artifacts without a need of migration #file_store.backend.type = filesystem_v2 ; filesystem options... #file_store.filesystem_v1.storage_path = /var/opt/rhodecode_data/artifacts_file_store ; filesystem_v2 options... #file_store.filesystem_v2.storage_path = /var/opt/rhodecode_data/artifacts_file_store #file_store.filesystem_v2.shards = 8 ; objectstore options... ; url for s3 compatible storage that allows to upload artifacts ; e.g http://minio:9000 #file_store.backend.type = objectstore #file_store.objectstore.url = http://s3-minio:9000 ; a top-level bucket to put all other shards in ; objects will be stored in rhodecode-file-store/shard-N based on the bucket_shards number #file_store.objectstore.bucket = rhodecode-file-store ; number of sharded buckets to create to distribute archives across ; default is 8 shards #file_store.objectstore.bucket_shards = 8 ; key for s3 auth #file_store.objectstore.key = s3admin ; secret for s3 auth #file_store.objectstore.secret = s3secret4 ;region for s3 storage #file_store.objectstore.region = eu-central-1 ; Redis url to acquire/check generation of archives locks archive_cache.locking.url = redis://redis:6379/1 ; Storage backend, only 'filesystem' and 'objectstore' are available now archive_cache.backend.type = filesystem ; url for s3 compatible storage that allows to upload artifacts ; e.g http://minio:9000 archive_cache.objectstore.url = http://s3-minio:9000 ; key for s3 auth archive_cache.objectstore.key = key ; secret for s3 auth archive_cache.objectstore.secret = secret ;region for s3 storage archive_cache.objectstore.region = eu-central-1 ; number of sharded buckets to create to distribute archives across ; default is 8 shards archive_cache.objectstore.bucket_shards = 8 ; a top-level bucket to put all other shards in ; objects will be stored in rhodecode-archive-cache/shard-N based on the bucket_shards number archive_cache.objectstore.bucket = rhodecode-archive-cache ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time archive_cache.objectstore.retry = false ; number of seconds to wait for next try using retry archive_cache.objectstore.retry_backoff = 1 ; how many tries do do a retry fetch from this backend archive_cache.objectstore.retry_attempts = 10 ; Default is $cache_dir/archive_cache if not set ; Generated repo archives will be cached at this location ; and served from the cache during subsequent requests for the same archive of ; the repository. This path is important to be shared across filesystems and with ; RhodeCode and vcsserver archive_cache.filesystem.store_dir = /var/opt/rhodecode_data/archive_cache ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb archive_cache.filesystem.cache_size_gb = 1 ; Eviction policy used to clear out after cache_size_gb limit is reached archive_cache.filesystem.eviction_policy = least-recently-stored ; By default cache uses sharding technique, this specifies how many shards are there ; default is 8 shards archive_cache.filesystem.cache_shards = 8 ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time archive_cache.filesystem.retry = false ; number of seconds to wait for next try using retry archive_cache.filesystem.retry_backoff = 1 ; how many tries do do a retry fetch from this backend archive_cache.filesystem.retry_attempts = 10 ; ############# ; CELERY CONFIG ; ############# ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini use_celery = true ; path to store schedule database #celerybeat-schedule.path = ; connection url to the message broker (default redis) celery.broker_url = redis://redis:6379/8 ; results backend to get results for (default redis) celery.result_backend = redis://redis:6379/8 ; rabbitmq example #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost ; maximum tasks to execute before worker restart celery.max_tasks_per_child = 20 ; tasks will never be sent to the queue, but executed locally instead. celery.task_always_eager = false ; ############# ; DOGPILE CACHE ; ############# ; Default cache dir for caches. Putting this into a ramdisk can boost performance. ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space cache_dir = /var/opt/rhodecode_data ; ********************************************* ; `sql_cache_short` cache for heavy SQL queries ; Only supported backend is `memory_lru` ; ********************************************* rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru rc_cache.sql_cache_short.expiration_time = 30 ; ***************************************************** ; `cache_repo_longterm` cache for repo object instances ; Only supported backend is `memory_lru` ; ***************************************************** rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru ; by default we use 30 Days, cache is still invalidated on push rc_cache.cache_repo_longterm.expiration_time = 2592000 ; max items in LRU cache, set to smaller number to save memory, and expire last used caches rc_cache.cache_repo_longterm.max_size = 10000 ; ********************************************* ; `cache_general` cache for general purpose use ; for simplicity use rc.file_namespace backend, ; for performance and scale use rc.redis ; ********************************************* rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace rc_cache.cache_general.expiration_time = 43200 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set #rc_cache.cache_general.arguments.filename = /tmp/cache_general_db ; alternative `cache_general` redis backend with distributed lock #rc_cache.cache_general.backend = dogpile.cache.rc.redis #rc_cache.cache_general.expiration_time = 300 ; redis_expiration_time needs to be greater then expiration_time #rc_cache.cache_general.arguments.redis_expiration_time = 7200 #rc_cache.cache_general.arguments.host = localhost #rc_cache.cache_general.arguments.port = 6379 #rc_cache.cache_general.arguments.db = 0 #rc_cache.cache_general.arguments.socket_timeout = 30 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends #rc_cache.cache_general.arguments.distributed_lock = true ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen #rc_cache.cache_general.arguments.lock_auto_renewal = true ; ************************************************* ; `cache_perms` cache for permission tree, auth TTL ; for simplicity use rc.file_namespace backend, ; for performance and scale use rc.redis ; ************************************************* rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace rc_cache.cache_perms.expiration_time = 3600 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms_db ; alternative `cache_perms` redis backend with distributed lock #rc_cache.cache_perms.backend = dogpile.cache.rc.redis #rc_cache.cache_perms.expiration_time = 300 ; redis_expiration_time needs to be greater then expiration_time #rc_cache.cache_perms.arguments.redis_expiration_time = 7200 #rc_cache.cache_perms.arguments.host = localhost #rc_cache.cache_perms.arguments.port = 6379 #rc_cache.cache_perms.arguments.db = 0 #rc_cache.cache_perms.arguments.socket_timeout = 30 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends #rc_cache.cache_perms.arguments.distributed_lock = true ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen #rc_cache.cache_perms.arguments.lock_auto_renewal = true ; *************************************************** ; `cache_repo` cache for file tree, Readme, RSS FEEDS ; for simplicity use rc.file_namespace backend, ; for performance and scale use rc.redis ; *************************************************** rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace rc_cache.cache_repo.expiration_time = 2592000 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo_db ; alternative `cache_repo` redis backend with distributed lock #rc_cache.cache_repo.backend = dogpile.cache.rc.redis #rc_cache.cache_repo.expiration_time = 2592000 ; redis_expiration_time needs to be greater then expiration_time #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400 #rc_cache.cache_repo.arguments.host = localhost #rc_cache.cache_repo.arguments.port = 6379 #rc_cache.cache_repo.arguments.db = 1 #rc_cache.cache_repo.arguments.socket_timeout = 30 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends #rc_cache.cache_repo.arguments.distributed_lock = true ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen #rc_cache.cache_repo.arguments.lock_auto_renewal = true ; ############## ; BEAKER SESSION ; ############## ; beaker.session.type is type of storage options for the logged users sessions. Current allowed ; types are file, ext:redis, ext:database, ext:memcached ; Fastest ones are ext:redis and ext:database, DO NOT use memory type for session #beaker.session.type = file #beaker.session.data_dir = %(here)s/data/sessions ; Redis based sessions beaker.session.type = ext:redis beaker.session.url = redis://redis:6379/2 ; DB based session, fast, and allows easy management over logged in users #beaker.session.type = ext:database #beaker.session.table_name = db_session #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode #beaker.session.sa.pool_recycle = 3600 #beaker.session.sa.echo = false beaker.session.key = rhodecode beaker.session.secret = develop-rc-uytcxaz beaker.session.lock_dir = /data_ramdisk/lock ; Secure encrypted cookie. Requires AES and AES python libraries ; you must disable beaker.session.secret to use this #beaker.session.encrypt_key = key_for_encryption #beaker.session.validate_key = validation_key ; Sets session as invalid (also logging out user) if it haven not been ; accessed for given amount of time in seconds beaker.session.timeout = 2592000 beaker.session.httponly = true ; Path to use for the cookie. Set to prefix if you use prefix middleware #beaker.session.cookie_path = /custom_prefix ; Set https secure cookie beaker.session.secure = false ; default cookie expiration time in seconds, set to `true` to set expire ; at browser close #beaker.session.cookie_expires = 3600 ; ############################# ; SEARCH INDEXING CONFIGURATION ; ############################# ; Full text search indexer is available in rhodecode-tools under ; `rhodecode-tools index` command ; WHOOSH Backend, doesn't require additional services to run ; it works good with few dozen repos search.module = rhodecode.lib.index.whoosh search.location = %(here)s/data/index ; #################### ; CHANNELSTREAM CONFIG ; #################### ; channelstream enables persistent connections and live notification ; in the system. It's also used by the chat system channelstream.enabled = true ; server address for channelstream server on the backend channelstream.server = channelstream:9800 ; location of the channelstream server from outside world ; use ws:// for http or wss:// for https. This address needs to be handled ; by external HTTP server such as Nginx or Apache ; see Nginx/Apache configuration examples in our docs channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream channelstream.secret = ENV_GENERATED channelstream.history.location = /var/opt/rhodecode_data/channelstream_history ; Internal application path that Javascript uses to connect into. ; If you use proxy-prefix the prefix should be added before /_channelstream channelstream.proxy_path = /_channelstream ; ############################## ; MAIN RHODECODE DATABASE CONFIG ; ############################## #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30 #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8 ; pymysql is an alternative driver for MySQL, use in case of problems with default one #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30 ; see sqlalchemy docs for other advanced settings ; print the sql statements to output sqlalchemy.db1.echo = false ; recycle the connections after this amount of seconds sqlalchemy.db1.pool_recycle = 3600 ; the number of connections to keep open inside the connection pool. ; 0 indicates no limit ; the general calculus with gevent is: ; if your system allows 500 concurrent greenlets (max_connections) that all do database access, ; then increase pool size + max overflow so that they add up to 500. #sqlalchemy.db1.pool_size = 5 ; The number of connections to allow in connection pool "overflow", that is ; connections that can be opened above and beyond the pool_size setting, ; which defaults to five. #sqlalchemy.db1.max_overflow = 10 ; Connection check ping, used to detect broken database connections ; could be enabled to better handle cases if MySQL has gone away errors #sqlalchemy.db1.ping_connection = true ; ########## ; VCS CONFIG ; ########## vcs.server.enable = true vcs.server = vcsserver:10010 ; Web server connectivity protocol, responsible for web based VCS operations ; Available protocols are: ; `http` - use http-rpc backend (default) vcs.server.protocol = http ; Push/Pull operations protocol, available options are: ; `http` - use http-rpc backend (default) vcs.scm_app_implementation = http ; Push/Pull operations hooks protocol, available options are: ; `http` - use http-rpc backend (default) ; `celery` - use celery based hooks #DEPRECATED:vcs.hooks.protocol = http vcs.hooks.protocol.v2 = celery ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be ; accessible via network. ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker) vcs.hooks.host = * ; Start VCSServer with this instance as a subprocess, useful for development vcs.start_server = false ; List of enabled VCS backends, available options are: ; `hg` - mercurial ; `git` - git ; `svn` - subversion vcs.backends = hg, git, svn ; Wait this number of seconds before killing connection to the vcsserver vcs.connection_timeout = 3600 ; Cache flag to cache vcsserver remote calls locally ; It uses cache_region `cache_repo` vcs.methods.cache = true ; Filesystem location where Git lfs objects should be stored vcs.git.lfs.storage_location = /var/opt/rhodecode_repo_store/.cache/git_lfs_store ; Filesystem location where Mercurial largefile objects should be stored vcs.hg.largefiles.storage_location = /var/opt/rhodecode_repo_store/.cache/hg_largefiles_store ; #################################################### ; Subversion proxy support (mod_dav_svn) ; Maps RhodeCode repo groups into SVN paths for Apache ; #################################################### ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out. ; Set a numeric version for your current SVN e.g 1.8, or 1.12 ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible #vcs.svn.compatible_version = 1.8 ; Redis connection settings for svn integrations logic ; This connection string needs to be the same on ce and vcsserver vcs.svn.redis_conn = redis://redis:6379/0 ; Enable SVN proxy of requests over HTTP vcs.svn.proxy.enabled = true ; host to connect to running SVN subsystem vcs.svn.proxy.host = http://svn:8090 ; Enable or disable the config file generation. svn.proxy.generate_config = true ; Generate config file with `SVNListParentPath` set to `On`. svn.proxy.list_parent_path = true ; Set location and file name of generated config file. svn.proxy.config_file_path = /etc/rhodecode/conf/svn/mod_dav_svn.conf ; alternative mod_dav config template. This needs to be a valid mako template ; Example template can be found in the source code: ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako ; Used as a prefix to the `Location` block in the generated config file. ; In most cases it should be set to `/`. svn.proxy.location_root = / ; Command to reload the mod dav svn configuration on change. ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh ; Make sure user who runs RhodeCode process is allowed to reload Apache #svn.proxy.reload_cmd = /etc/init.d/apache2 reload ; If the timeout expires before the reload command finishes, the command will ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds. #svn.proxy.reload_timeout = 10 ; #################### ; SSH Support Settings ; #################### ; Defines if a custom authorized_keys file should be created and written on ; any change user ssh keys. Setting this to false also disables possibility ; of adding SSH keys by users from web interface. Super admins can still ; manage SSH Keys. ssh.generate_authorized_keyfile = true ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding` # ssh.authorized_keys_ssh_opts = ; Path to the authorized_keys file where the generate entries are placed. ; It is possible to have multiple key files specified in `sshd_config` e.g. ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode ssh.authorized_keys_file_path = /etc/rhodecode/conf/ssh/authorized_keys_rhodecode ; Command to execute the SSH wrapper. The binary is available in the ; RhodeCode installation directory. ; legacy: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper ; new rewrite: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2 #DEPRECATED: ssh.wrapper_cmd = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper ssh.wrapper_cmd.v2 = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2 ; Allow shell when executing the ssh-wrapper command ssh.wrapper_cmd_allow_shell = false ; Enables logging, and detailed output send back to the client during SSH ; operations. Useful for debugging, shouldn't be used in production. ssh.enable_debug_logging = true ; Paths to binary executable, by default they are the names, but we can ; override them if we want to use a custom one ssh.executable.hg = /usr/local/bin/rhodecode_bin/vcs_bin/hg ssh.executable.git = /usr/local/bin/rhodecode_bin/vcs_bin/git ssh.executable.svn = /usr/local/bin/rhodecode_bin/vcs_bin/svnserve ; Enables SSH key generator web interface. Disabling this still allows users ; to add their own keys. ssh.enable_ui_key_generator = true ; Statsd client config, this is used to send metrics to statsd ; We recommend setting statsd_exported and scrape them using Prometheus #statsd.enabled = false #statsd.statsd_host = 0.0.0.0 #statsd.statsd_port = 8125 #statsd.statsd_prefix = #statsd.statsd_ipv6 = false ; configure logging automatically at server startup set to false ; to use the below custom logging config. ; RC_LOGGING_FORMATTER ; RC_LOGGING_LEVEL ; env variables can control the settings for logging in case of autoconfigure #logging.autoconfigure = true ; specify your own custom logging config file to configure logging #logging.logging_conf_file = /path/to/custom_logging.ini ; Dummy marker to add new entries after. ; Add any custom entries below. Please don't remove this marker. custom.conf = 1 ; ##################### ; LOGGING CONFIGURATION ; ##################### [loggers] keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper [handlers] keys = console, console_sql [formatters] keys = generic, json, color_formatter, color_formatter_sql ; ####### ; LOGGERS ; ####### [logger_root] level = NOTSET handlers = console [logger_sqlalchemy] level = INFO handlers = console_sql qualname = sqlalchemy.engine propagate = 0 [logger_beaker] level = DEBUG handlers = qualname = beaker.container propagate = 1 [logger_rhodecode] level = DEBUG handlers = qualname = rhodecode propagate = 1 [logger_ssh_wrapper] level = DEBUG handlers = qualname = ssh_wrapper propagate = 1 [logger_celery] level = DEBUG handlers = qualname = celery ; ######## ; HANDLERS ; ######## [handler_console] class = StreamHandler args = (sys.stderr, ) level = DEBUG ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json' ; This allows sending properly formatted logs to grafana loki or elasticsearch formatter = color_formatter [handler_console_sql] ; "level = DEBUG" logs SQL queries and results. ; "level = INFO" logs SQL queries. ; "level = WARN" logs neither. (Recommended for production systems.) class = StreamHandler args = (sys.stderr, ) level = WARN ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json' ; This allows sending properly formatted logs to grafana loki or elasticsearch formatter = color_formatter_sql ; ########## ; FORMATTERS ; ########## [formatter_generic] class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s datefmt = %Y-%m-%d %H:%M:%S [formatter_color_formatter] class = rhodecode.lib.logging_formatter.ColorFormatter format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s datefmt = %Y-%m-%d %H:%M:%S [formatter_color_formatter_sql] class = rhodecode.lib.logging_formatter.ColorFormatterSql format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s datefmt = %Y-%m-%d %H:%M:%S [formatter_json] format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s class = rhodecode.lib._vendor.jsonlogger.JsonFormatter