Show More
@@ -0,0 +1,45 b'' | |||
|
1 | #!/usr/bin/env bash | |
|
2 | set -euo pipefail | |
|
3 | IFS=$'\n\t' | |
|
4 | # e.g 4.24.1 | |
|
5 | source ../.env | |
|
6 | VER=$RC_VERSION | |
|
7 | INSTALLER_URL=https://dls.rhodecode.com/dls/N2E2ZTY1NzA3NjYxNDA2NTc1NjI3MTcyNzA2MjcxNzIyZTcwNjI3YQ==/rhodecode-control/latest-linux-ee | |
|
8 | ||
|
9 | echo "Downloading Artifacts for version: $VER" | |
|
10 | ||
|
11 | MANIFEST=https://dls.rhodecode.com/linux/MANIFEST | |
|
12 | CACHE_DIR=../.cache | |
|
13 | VER_REGEX="$VER+x86_64" | |
|
14 | ||
|
15 | echo "Downloading locale-archive" | |
|
16 | curl -L https://dls.rhodecode.com/assets/locale-archive -J -O | |
|
17 | mv -v locale-archive $CACHE_DIR | |
|
18 | ||
|
19 | ARTS=$(curl -s $MANIFEST | grep --ignore-case "$VER_REGEX" | cut -d ' ' -f 2) | |
|
20 | ||
|
21 | # vcsserver/ce/ee | |
|
22 | echo "Found following $ARTS" | |
|
23 | ||
|
24 | for url in $ARTS; do | |
|
25 | echo "Downloading $url" | |
|
26 | curl -L ${url} -J -O | |
|
27 | done | |
|
28 | ||
|
29 | ## rhodecode control | |
|
30 | #for url in $(curl -s $MANIFEST | grep --ignore-case -E 'control.+\+x86_64' | cut -d ' ' -f 2); do | |
|
31 | # echo "Downloading $url" | |
|
32 | # curl -L ${url} -J -O | |
|
33 | #done | |
|
34 | ||
|
35 | ## installer | |
|
36 | echo "Downloading installer from $INSTALLER_URL" | |
|
37 | curl -L $INSTALLER_URL -J -O | |
|
38 | ||
|
39 | INSTALLER=$(ls -Art RhodeCode-installer-* | tail -n 1) | |
|
40 | if [[ -n $INSTALLER ]]; then | |
|
41 | chmod +x "${INSTALLER}" | |
|
42 | fi | |
|
43 | ||
|
44 | mv -v "${INSTALLER}" $CACHE_DIR | |
|
45 | mv -v *.bz2 $CACHE_DIR |
|
1 | NO CONTENT: new file 100644 |
@@ -0,0 +1,24 b'' | |||
|
1 | COMPOSE_PROJECT_NAME=rc_cluster | |
|
2 | TZ="UTC" | |
|
3 | ||
|
4 | # Version to deploy and run | |
|
5 | RC_VERSION="4.24.1" | |
|
6 | ||
|
7 | # Database access credentials | |
|
8 | POSTGRES_DB=rhodecode | |
|
9 | POSTGRES_PASSWORD=hUc1adS7oDd6Oj3in3 | |
|
10 | ||
|
11 | # base url for running app | |
|
12 | RHODECODE_BASE_URL=http://localhost:8888 | |
|
13 | ||
|
14 | # HTTP and HTTPS ports for running app | |
|
15 | RC_HTTP_PORT=8888 | |
|
16 | RC_HTTPS_PORT=8443 | |
|
17 | ||
|
18 | # SSH Port exposed, increased security is to not used default 22 | |
|
19 | RC_SSH_PORT=9022 | |
|
20 | ||
|
21 | # user/password for first admin user created for access | |
|
22 | RHODECODE_USER_EMAIL=admin@rhodecode.com | |
|
23 | RHODECODE_USER_NAME=admin | |
|
24 | RHODECODE_USER_PASS=qweqwe |
@@ -0,0 +1,12 b'' | |||
|
1 | # CACHE FILES | |
|
2 | .cache/RhodeCode-installer* | |
|
3 | .cache/*.tar.bz2 | |
|
4 | .cache/locale-archive | |
|
5 | ||
|
6 | # LOGS | |
|
7 | logs/*.log | |
|
8 | logs/nginx/*.log | |
|
9 | logs/svn/*.log | |
|
10 | ||
|
11 | .idea | |
|
12 | config/compose/.rcmetadata.json |
@@ -0,0 +1,22 b'' | |||
|
1 | [channelstream] | |
|
2 | host = 0.0.0.0 | |
|
3 | port = 9800 | |
|
4 | ||
|
5 | admin_user = admin | |
|
6 | # admin panel password | |
|
7 | admin_secret = b39acb28b2304a27a6a0e911500bf7d1 | |
|
8 | # auth cookie secret, leave empty for random string generated at server start | |
|
9 | # fill in if you need to have multiple servers and want to keep admin session between them | |
|
10 | cookie_secret = | |
|
11 | ||
|
12 | # API password | |
|
13 | secret = b39acb28b2304a27a6a0e911500bf7d1 | |
|
14 | ||
|
15 | demo = false | |
|
16 | allow_posting_from = 127.0.0.1, | |
|
17 | 0.0.0.0 | |
|
18 | log_level = INFO | |
|
19 | # should require SSL connections? | |
|
20 | enforce_https = | |
|
21 | # enforce https:// in links | |
|
22 | http_scheme = |
This diff has been collapsed as it changes many lines, (736 lines changed) Show them Hide them | |||
@@ -0,0 +1,736 b'' | |||
|
1 | ||
|
2 | ; ########################################## | |
|
3 | ; RHODECODE ENTERPRISE EDITION CONFIGURATION | |
|
4 | ; ########################################## | |
|
5 | ||
|
6 | [DEFAULT] | |
|
7 | ; Debug flag sets all loggers to debug, and enables request tracking | |
|
8 | debug = false | |
|
9 | ||
|
10 | ; ######################################################################## | |
|
11 | ; EMAIL CONFIGURATION | |
|
12 | ; These settings will be used by the RhodeCode mailing system | |
|
13 | ; ######################################################################## | |
|
14 | ||
|
15 | ; prefix all emails subjects with given prefix, helps filtering out emails | |
|
16 | #email_prefix = [RhodeCode] | |
|
17 | ||
|
18 | ; email FROM address all mails will be sent | |
|
19 | #app_email_from = rhodecode-noreply@localhost | |
|
20 | ||
|
21 | #smtp_server = mail.server.com | |
|
22 | #smtp_username = | |
|
23 | #smtp_password = | |
|
24 | #smtp_port = | |
|
25 | #smtp_use_tls = false | |
|
26 | #smtp_use_ssl = true | |
|
27 | ||
|
28 | ||
|
29 | [server:main] | |
|
30 | ; COMMON HOST/IP CONFIG | |
|
31 | host = 0.0.0.0 | |
|
32 | port = 10020 | |
|
33 | ||
|
34 | ||
|
35 | ; ########################### | |
|
36 | ; GUNICORN APPLICATION SERVER | |
|
37 | ; ########################### | |
|
38 | ||
|
39 | ; run with gunicorn --log-config rhodecode.ini --paste rhodecode.ini | |
|
40 | ||
|
41 | ; Module to use, this setting shouldn't be changed | |
|
42 | use = egg:gunicorn#main | |
|
43 | ||
|
44 | ; Sets the number of process workers. More workers means more concurrent connections | |
|
45 | ; RhodeCode can handle at the same time. Each additional worker also it increases | |
|
46 | ; memory usage as each has it's own set of caches. | |
|
47 | ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more | |
|
48 | ; than 8-10 unless for really big deployments .e.g 700-1000 users. | |
|
49 | ; `instance_id = *` must be set in the [app:main] section below (which is the default) | |
|
50 | ; when using more than 1 worker. | |
|
51 | workers = 2 | |
|
52 | ||
|
53 | ; Gunicorn access log level | |
|
54 | loglevel = info | |
|
55 | ||
|
56 | ; Process name visible in process list | |
|
57 | proc_name = gunicorn-web-1 | |
|
58 | ||
|
59 | ; Type of worker class, one of `sync`, `gevent` | |
|
60 | ; Recommended type is `gevent` | |
|
61 | worker_class = gevent | |
|
62 | ||
|
63 | ; The maximum number of simultaneous clients per worker. Valid only for gevent | |
|
64 | worker_connections = 10 | |
|
65 | ||
|
66 | ; Max number of requests that worker will handle before being gracefully restarted. | |
|
67 | ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once. | |
|
68 | max_requests = 2000 | |
|
69 | max_requests_jitter = 100 | |
|
70 | ||
|
71 | ; Amount of time a worker can spend with handling a request before it | |
|
72 | ; gets killed and restarted. By default set to 21600 (6hrs) | |
|
73 | ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h) | |
|
74 | timeout = 21600 | |
|
75 | ||
|
76 | ; The maximum size of HTTP request line in bytes. | |
|
77 | ; 0 for unlimited | |
|
78 | limit_request_line = 0 | |
|
79 | ||
|
80 | ; Limit the number of HTTP headers fields in a request. | |
|
81 | ; By default this value is 100 and can't be larger than 32768. | |
|
82 | limit_request_fields = 32768 | |
|
83 | ||
|
84 | ; Limit the allowed size of an HTTP request header field. | |
|
85 | ; Value is a positive number or 0. | |
|
86 | ; Setting it to 0 will allow unlimited header field sizes. | |
|
87 | limit_request_field_size = 0 | |
|
88 | ||
|
89 | ; Timeout for graceful workers restart. | |
|
90 | ; After receiving a restart signal, workers have this much time to finish | |
|
91 | ; serving requests. Workers still alive after the timeout (starting from the | |
|
92 | ; receipt of the restart signal) are force killed. | |
|
93 | ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h) | |
|
94 | graceful_timeout = 3600 | |
|
95 | ||
|
96 | # The number of seconds to wait for requests on a Keep-Alive connection. | |
|
97 | # Generally set in the 1-5 seconds range. | |
|
98 | keepalive = 2 | |
|
99 | ||
|
100 | ; Maximum memory usage that each worker can use before it will receive a | |
|
101 | ; graceful restart signal 0 = memory monitoring is disabled | |
|
102 | ; Examples: 268435456 (256MB), 536870912 (512MB) | |
|
103 | ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB) | |
|
104 | memory_max_usage = 2147483648 | |
|
105 | ||
|
106 | ; How often in seconds to check for memory usage for each gunicorn worker | |
|
107 | memory_usage_check_interval = 60 | |
|
108 | ||
|
109 | ; Threshold value for which we don't recycle worker if GarbageCollection | |
|
110 | ; frees up enough resources. Before each restart we try to run GC on worker | |
|
111 | ; in case we get enough free memory after that, restart will not happen. | |
|
112 | memory_usage_recovery_threshold = 0.8 | |
|
113 | ||
|
114 | ||
|
115 | ; Prefix middleware for RhodeCode. | |
|
116 | ; recommended when using proxy setup. | |
|
117 | ; allows to set RhodeCode under a prefix in server. | |
|
118 | ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well. | |
|
119 | ; And set your prefix like: `prefix = /custom_prefix` | |
|
120 | ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need | |
|
121 | ; to make your cookies only work on prefix url | |
|
122 | [filter:proxy-prefix] | |
|
123 | use = egg:PasteDeploy#prefix | |
|
124 | prefix = / | |
|
125 | ||
|
126 | [app:main] | |
|
127 | ; The %(here)s variable will be replaced with the absolute path of parent directory | |
|
128 | ; of this file | |
|
129 | ; In addition ENVIRONMENT variables usage is possible, e.g | |
|
130 | ; sqlalchemy.db1.url = {ENV_RC_DB_URL} | |
|
131 | ||
|
132 | use = egg:rhodecode-enterprise-ee | |
|
133 | ||
|
134 | ; enable proxy prefix middleware, defined above | |
|
135 | #filter-with = proxy-prefix | |
|
136 | ||
|
137 | ; encryption key used to encrypt social plugin tokens, | |
|
138 | ; remote_urls with credentials etc, if not set it defaults to | |
|
139 | ; `beaker.session.secret` | |
|
140 | #rhodecode.encrypted_values.secret = | |
|
141 | ||
|
142 | ; decryption strict mode (enabled by default). It controls if decryption raises | |
|
143 | ; `SignatureVerificationError` in case of wrong key, or damaged encryption data. | |
|
144 | #rhodecode.encrypted_values.strict = false | |
|
145 | ||
|
146 | ; Pick algorithm for encryption. Either fernet (more secure) or aes (default) | |
|
147 | ; fernet is safer, and we strongly recommend switching to it. | |
|
148 | ; Due to backward compatibility aes is used as default. | |
|
149 | #rhodecode.encrypted_values.algorithm = fernet | |
|
150 | ||
|
151 | ; Return gzipped responses from RhodeCode (static files/application) | |
|
152 | gzip_responses = false | |
|
153 | ||
|
154 | ; Auto-generate javascript routes file on startup | |
|
155 | generate_js_files = false | |
|
156 | ||
|
157 | ; System global default language. | |
|
158 | ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh | |
|
159 | lang = en | |
|
160 | ||
|
161 | ; Perform a full repository scan and import on each server start. | |
|
162 | ; Settings this to true could lead to very long startup time. | |
|
163 | startup.import_repos = false | |
|
164 | ||
|
165 | ; Uncomment and set this path to use archive download cache. | |
|
166 | ; Once enabled, generated archives will be cached at this location | |
|
167 | ; and served from the cache during subsequent requests for the same archive of | |
|
168 | ; the repository. | |
|
169 | archive_cache_dir = /etc/rhodecode/conf/data/tarballcache | |
|
170 | ||
|
171 | ; URL at which the application is running. This is used for Bootstrapping | |
|
172 | ; requests in context when no web request is available. Used in ishell, or | |
|
173 | ; SSH calls. Set this for events to receive proper url for SSH calls. | |
|
174 | app.base_url = {ENV_RC_BASE_URL} | |
|
175 | ||
|
176 | ; Unique application ID. Should be a random unique string for security. | |
|
177 | app_instance_uuid = 4442f2dac4dc4fb982f781546735bb99 | |
|
178 | ||
|
179 | ; Cut off limit for large diffs (size in bytes). If overall diff size on | |
|
180 | ; commit, or pull request exceeds this limit this diff will be displayed | |
|
181 | ; partially. E.g 512000 == 512Kb | |
|
182 | cut_off_limit_diff = 512000 | |
|
183 | ||
|
184 | ; Cut off limit for large files inside diffs (size in bytes). Each individual | |
|
185 | ; file inside diff which exceeds this limit will be displayed partially. | |
|
186 | ; E.g 128000 == 128Kb | |
|
187 | cut_off_limit_file = 128000 | |
|
188 | ||
|
189 | ; Use cached version of vcs repositories everywhere. Recommended to be `true` | |
|
190 | vcs_full_cache = true | |
|
191 | ||
|
192 | ; Force https in RhodeCode, fixes https redirects, assumes it's always https. | |
|
193 | ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache | |
|
194 | force_https = false | |
|
195 | ||
|
196 | ; use Strict-Transport-Security headers | |
|
197 | use_htsts = false | |
|
198 | ||
|
199 | ; Set to true if your repos are exposed using the dumb protocol | |
|
200 | git_update_server_info = false | |
|
201 | ||
|
202 | ; RSS/ATOM feed options | |
|
203 | rss_cut_off_limit = 256000 | |
|
204 | rss_items_per_page = 10 | |
|
205 | rss_include_diff = false | |
|
206 | ||
|
207 | ; gist URL alias, used to create nicer urls for gist. This should be an | |
|
208 | ; url that does rewrites to _admin/gists/{gistid}. | |
|
209 | ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal | |
|
210 | ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid} | |
|
211 | gist_alias_url = | |
|
212 | ||
|
213 | ; List of views (using glob pattern syntax) that AUTH TOKENS could be | |
|
214 | ; used for access. | |
|
215 | ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it | |
|
216 | ; came from the the logged in user who own this authentication token. | |
|
217 | ; Additionally @TOKEN syntax can be used to bound the view to specific | |
|
218 | ; authentication token. Such view would be only accessible when used together | |
|
219 | ; with this authentication token | |
|
220 | ; list of all views can be found under `/_admin/permissions/auth_token_access` | |
|
221 | ; The list should be "," separated and on a single line. | |
|
222 | ; Most common views to enable: | |
|
223 | ||
|
224 | # RepoCommitsView:repo_commit_download | |
|
225 | # RepoCommitsView:repo_commit_patch | |
|
226 | # RepoCommitsView:repo_commit_raw | |
|
227 | # RepoCommitsView:repo_commit_raw@TOKEN | |
|
228 | # RepoFilesView:repo_files_diff | |
|
229 | # RepoFilesView:repo_archivefile | |
|
230 | # RepoFilesView:repo_file_raw | |
|
231 | # GistView:* | |
|
232 | api_access_controllers_whitelist = | |
|
233 | ||
|
234 | ; Default encoding used to convert from and to unicode | |
|
235 | ; can be also a comma separated list of encoding in case of mixed encodings | |
|
236 | default_encoding = UTF-8 | |
|
237 | ||
|
238 | ; instance-id prefix | |
|
239 | ; a prefix key for this instance used for cache invalidation when running | |
|
240 | ; multiple instances of RhodeCode, make sure it's globally unique for | |
|
241 | ; all running RhodeCode instances. Leave empty if you don't use it | |
|
242 | instance_id = | |
|
243 | ||
|
244 | ; Fallback authentication plugin. Set this to a plugin ID to force the usage | |
|
245 | ; of an authentication plugin also if it is disabled by it's settings. | |
|
246 | ; This could be useful if you are unable to log in to the system due to broken | |
|
247 | ; authentication settings. Then you can enable e.g. the internal RhodeCode auth | |
|
248 | ; module to log in again and fix the settings. | |
|
249 | ; Available builtin plugin IDs (hash is part of the ID): | |
|
250 | ; egg:rhodecode-enterprise-ce#rhodecode | |
|
251 | ; egg:rhodecode-enterprise-ce#pam | |
|
252 | ; egg:rhodecode-enterprise-ce#ldap | |
|
253 | ; egg:rhodecode-enterprise-ce#jasig_cas | |
|
254 | ; egg:rhodecode-enterprise-ce#headers | |
|
255 | ; egg:rhodecode-enterprise-ce#crowd | |
|
256 | ||
|
257 | #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode | |
|
258 | ||
|
259 | ; Flag to control loading of legacy plugins in py:/path format | |
|
260 | auth_plugin.import_legacy_plugins = true | |
|
261 | ||
|
262 | ; alternative return HTTP header for failed authentication. Default HTTP | |
|
263 | ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with | |
|
264 | ; handling that causing a series of failed authentication calls. | |
|
265 | ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code | |
|
266 | ; This will be served instead of default 401 on bad authentication | |
|
267 | auth_ret_code = | |
|
268 | ||
|
269 | ; use special detection method when serving auth_ret_code, instead of serving | |
|
270 | ; ret_code directly, use 401 initially (Which triggers credentials prompt) | |
|
271 | ; and then serve auth_ret_code to clients | |
|
272 | auth_ret_code_detection = false | |
|
273 | ||
|
274 | ; locking return code. When repository is locked return this HTTP code. 2XX | |
|
275 | ; codes don't break the transactions while 4XX codes do | |
|
276 | lock_ret_code = 423 | |
|
277 | ||
|
278 | ; allows to change the repository location in settings page | |
|
279 | allow_repo_location_change = true | |
|
280 | ||
|
281 | ; allows to setup custom hooks in settings page | |
|
282 | allow_custom_hooks_settings = true | |
|
283 | ||
|
284 | ; Generated license token required for EE edition license. | |
|
285 | ; New generated token value can be found in Admin > settings > license page. | |
|
286 | license_token = abra-cada-bra1-rce3 | |
|
287 | ||
|
288 | ; This flag hides sensitive information on the license page such as token, and license data | |
|
289 | license.hide_license_info = false | |
|
290 | ||
|
291 | ; supervisor connection uri, for managing supervisor and logs. | |
|
292 | supervisor.uri = 127.0.0.1:10001 | |
|
293 | ||
|
294 | ; supervisord group name/id we only want this RC instance to handle | |
|
295 | supervisor.group_id = web-1 | |
|
296 | ||
|
297 | ; Display extended labs settings | |
|
298 | labs_settings_active = true | |
|
299 | ||
|
300 | ; Custom exception store path, defaults to TMPDIR | |
|
301 | ; This is used to store exception from RhodeCode in shared directory | |
|
302 | #exception_tracker.store_path = | |
|
303 | ||
|
304 | ; File store configuration. This is used to store and serve uploaded files | |
|
305 | file_store.enabled = true | |
|
306 | ||
|
307 | ; Storage backend, available options are: local | |
|
308 | file_store.backend = local | |
|
309 | ||
|
310 | ; path to store the uploaded binaries | |
|
311 | file_store.storage_path = /var/opt/rhodecode_data/file_store | |
|
312 | ||
|
313 | ||
|
314 | ; ############# | |
|
315 | ; CELERY CONFIG | |
|
316 | ; ############# | |
|
317 | ||
|
318 | ; manually run celery: /path/to/celery worker -E --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini | |
|
319 | ||
|
320 | use_celery = true | |
|
321 | ||
|
322 | ; connection url to the message broker (default redis) | |
|
323 | celery.broker_url = redis://redis:6379/8 | |
|
324 | ||
|
325 | ; rabbitmq example | |
|
326 | #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost | |
|
327 | ||
|
328 | ; maximum tasks to execute before worker restart | |
|
329 | celery.max_tasks_per_child = 20 | |
|
330 | ||
|
331 | ; tasks will never be sent to the queue, but executed locally instead. | |
|
332 | celery.task_always_eager = false | |
|
333 | ||
|
334 | ; ############# | |
|
335 | ; DOGPILE CACHE | |
|
336 | ; ############# | |
|
337 | ||
|
338 | ; Default cache dir for caches. Putting this into a ramdisk can boost performance. | |
|
339 | ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space | |
|
340 | cache_dir = /var/opt/rhodecode_data | |
|
341 | ||
|
342 | ; ********************************************* | |
|
343 | ; `sql_cache_short` cache for heavy SQL queries | |
|
344 | ; Only supported backend is `memory_lru` | |
|
345 | ; ********************************************* | |
|
346 | rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru | |
|
347 | rc_cache.sql_cache_short.expiration_time = 30 | |
|
348 | ||
|
349 | ||
|
350 | ; ***************************************************** | |
|
351 | ; `cache_repo_longterm` cache for repo object instances | |
|
352 | ; Only supported backend is `memory_lru` | |
|
353 | ; ***************************************************** | |
|
354 | rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru | |
|
355 | ; by default we use 30 Days, cache is still invalidated on push | |
|
356 | rc_cache.cache_repo_longterm.expiration_time = 2592000 | |
|
357 | ; max items in LRU cache, set to smaller number to save memory, and expire last used caches | |
|
358 | rc_cache.cache_repo_longterm.max_size = 10000 | |
|
359 | ||
|
360 | ||
|
361 | ; ************************************************* | |
|
362 | ; `cache_perms` cache for permission tree, auth TTL | |
|
363 | ; ************************************************* | |
|
364 | #rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace | |
|
365 | #rc_cache.cache_perms.expiration_time = 300 | |
|
366 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set | |
|
367 | #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms.db | |
|
368 | ||
|
369 | ; alternative `cache_perms` redis backend with distributed lock | |
|
370 | rc_cache.cache_perms.backend = dogpile.cache.rc.redis | |
|
371 | rc_cache.cache_perms.expiration_time = 300 | |
|
372 | ||
|
373 | ; redis_expiration_time needs to be greater then expiration_time | |
|
374 | rc_cache.cache_perms.arguments.redis_expiration_time = 7200 | |
|
375 | ||
|
376 | rc_cache.cache_perms.arguments.host = redis | |
|
377 | rc_cache.cache_perms.arguments.port = 6379 | |
|
378 | rc_cache.cache_perms.arguments.db = 0 | |
|
379 | rc_cache.cache_perms.arguments.socket_timeout = 30 | |
|
380 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends | |
|
381 | #rc_cache.cache_perms.arguments.distributed_lock = true | |
|
382 | ||
|
383 | ||
|
384 | ; *************************************************** | |
|
385 | ; `cache_repo` cache for file tree, Readme, RSS FEEDS | |
|
386 | ; *************************************************** | |
|
387 | #rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace | |
|
388 | #rc_cache.cache_repo.expiration_time = 2592000 | |
|
389 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set | |
|
390 | #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo.db | |
|
391 | ||
|
392 | ; alternative `cache_repo` redis backend with distributed lock | |
|
393 | rc_cache.cache_repo.backend = dogpile.cache.rc.redis | |
|
394 | rc_cache.cache_repo.expiration_time = 2592000 | |
|
395 | ||
|
396 | ; redis_expiration_time needs to be greater then expiration_time | |
|
397 | #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400 | |
|
398 | ||
|
399 | rc_cache.cache_repo.arguments.host = redis | |
|
400 | rc_cache.cache_repo.arguments.port = 6379 | |
|
401 | rc_cache.cache_repo.arguments.db = 1 | |
|
402 | rc_cache.cache_repo.arguments.socket_timeout = 30 | |
|
403 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends | |
|
404 | #rc_cache.cache_repo.arguments.distributed_lock = true | |
|
405 | ||
|
406 | ||
|
407 | ; ############## | |
|
408 | ; BEAKER SESSION | |
|
409 | ; ############## | |
|
410 | ||
|
411 | ; beaker.session.type is type of storage options for the logged users sessions. Current allowed | |
|
412 | ; types are file, ext:redis, ext:database, ext:memcached, and memory (default if not specified). | |
|
413 | ; Fastest ones are Redis and ext:database | |
|
414 | #beaker.session.type = file | |
|
415 | #beaker.session.data_dir = /var/opt/rhodecode_data/sessions | |
|
416 | ||
|
417 | ; Redis based sessions | |
|
418 | beaker.session.type = ext:redis | |
|
419 | beaker.session.url = redis://redis:6379/2 | |
|
420 | ||
|
421 | ; DB based session, fast, and allows easy management over logged in users | |
|
422 | #beaker.session.type = ext:database | |
|
423 | #beaker.session.table_name = db_session | |
|
424 | #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode | |
|
425 | #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode | |
|
426 | #beaker.session.sa.pool_recycle = 3600 | |
|
427 | #beaker.session.sa.echo = false | |
|
428 | ||
|
429 | beaker.session.key = http_app | |
|
430 | beaker.session.secret = b39acb28b2304a27a6a0e911500bf7d1 | |
|
431 | beaker.session.lock_dir = /data_ramdisk/lock | |
|
432 | ||
|
433 | ; Secure encrypted cookie. Requires AES and AES python libraries | |
|
434 | ; you must disable beaker.session.secret to use this | |
|
435 | #beaker.session.encrypt_key = key_for_encryption | |
|
436 | #beaker.session.validate_key = validation_key | |
|
437 | ||
|
438 | ; Sets session as invalid (also logging out user) if it haven not been | |
|
439 | ; accessed for given amount of time in seconds | |
|
440 | beaker.session.timeout = 2592000 | |
|
441 | beaker.session.httponly = true | |
|
442 | ||
|
443 | ; Path to use for the cookie. Set to prefix if you use prefix middleware | |
|
444 | #beaker.session.cookie_path = /custom_prefix | |
|
445 | ||
|
446 | ; Set https secure cookie | |
|
447 | beaker.session.secure = false | |
|
448 | ||
|
449 | ; default cookie expiration time in seconds, set to `true` to set expire | |
|
450 | ; at browser close | |
|
451 | #beaker.session.cookie_expires = 3600 | |
|
452 | ||
|
453 | ; ############################# | |
|
454 | ; SEARCH INDEXING CONFIGURATION | |
|
455 | ; ############################# | |
|
456 | ||
|
457 | ; Full text search indexer is available in rhodecode-tools under | |
|
458 | ; `rhodecode-tools index` command | |
|
459 | ||
|
460 | ; WHOOSH Backend, doesn't require additional services to run | |
|
461 | ; it works good with few dozen repos | |
|
462 | #search.module = rhodecode.lib.index.whoosh | |
|
463 | #search.location = /var/opt/rhodecode_data/index | |
|
464 | ||
|
465 | ; ElasticSearch (EE edition only). Requires Elastic Search cluster | |
|
466 | ; to be installed, and running. Recommended for large amount of repositories | |
|
467 | search.module = rc_elasticsearch | |
|
468 | search.location = http://elasticsearch:9200 | |
|
469 | ; specify Elastic Search version, 6 for latest or 2 for legacy | |
|
470 | search.es_version = 6 | |
|
471 | ||
|
472 | ; #################### | |
|
473 | ; CHANNELSTREAM CONFIG | |
|
474 | ; #################### | |
|
475 | ||
|
476 | ; channelstream enables persistent connections and live notification | |
|
477 | ; in the system. It's also used by the chat system | |
|
478 | ||
|
479 | channelstream.enabled = true | |
|
480 | ||
|
481 | ; server address for channelstream server on the backend | |
|
482 | channelstream.server = channelstream:9800 | |
|
483 | ||
|
484 | ; location of the channelstream server from outside world | |
|
485 | ; use ws:// for http or wss:// for https. This address needs to be handled | |
|
486 | ; by external HTTP server such as Nginx or Apache | |
|
487 | ; see Nginx/Apache configuration examples in our docs | |
|
488 | channelstream.ws_url = ws:/localhost:8888/_channelstream | |
|
489 | channelstream.secret = b39acb28b2304a27a6a0e911500bf7d1 | |
|
490 | channelstream.history.location = /var/opt/rhodecode_data/channelstream_history | |
|
491 | ||
|
492 | ; Internal application path that Javascript uses to connect into. | |
|
493 | ; If you use proxy-prefix the prefix should be added before /_channelstream | |
|
494 | channelstream.proxy_path = /_channelstream | |
|
495 | ||
|
496 | ; Live chat for commits/pull requests. Requires CHANNELSTREAM to be enabled | |
|
497 | ; and configured. (EE edition only) | |
|
498 | chat.enabled = false | |
|
499 | ||
|
500 | ||
|
501 | ; ############################## | |
|
502 | ; MAIN RHODECODE DATABASE CONFIG | |
|
503 | ; ############################## | |
|
504 | ||
|
505 | #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30 | |
|
506 | #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode | |
|
507 | #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8 | |
|
508 | ; pymysql is an alternative driver for MySQL, use in case of problems with default one | |
|
509 | #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode | |
|
510 | ||
|
511 | sqlalchemy.db1.url = postgresql://rhodecode:hUc1adS7oDd6Oj3in3@database/rhodecode | |
|
512 | ||
|
513 | ; see sqlalchemy docs for other advanced settings | |
|
514 | ; print the sql statements to output | |
|
515 | sqlalchemy.db1.echo = false | |
|
516 | ||
|
517 | ; recycle the connections after this amount of seconds | |
|
518 | sqlalchemy.db1.pool_recycle = 3600 | |
|
519 | sqlalchemy.db1.convert_unicode = true | |
|
520 | ||
|
521 | ; the number of connections to keep open inside the connection pool. | |
|
522 | ; 0 indicates no limit | |
|
523 | #sqlalchemy.db1.pool_size = 5 | |
|
524 | ||
|
525 | ; The number of connections to allow in connection pool "overflow", that is | |
|
526 | ; connections that can be opened above and beyond the pool_size setting, | |
|
527 | ; which defaults to five. | |
|
528 | #sqlalchemy.db1.max_overflow = 10 | |
|
529 | ||
|
530 | ; Connection check ping, used to detect broken database connections | |
|
531 | ; could be enabled to better handle cases if MySQL has gone away errors | |
|
532 | #sqlalchemy.db1.ping_connection = true | |
|
533 | ||
|
534 | ; ########## | |
|
535 | ; VCS CONFIG | |
|
536 | ; ########## | |
|
537 | vcs.server.enable = true | |
|
538 | vcs.server = vcsserver:10010 | |
|
539 | ||
|
540 | ; Web server connectivity protocol, responsible for web based VCS operations | |
|
541 | ; Available protocols are: | |
|
542 | ; `http` - use http-rpc backend (default) | |
|
543 | vcs.server.protocol = http | |
|
544 | ||
|
545 | ; Push/Pull operations protocol, available options are: | |
|
546 | ; `http` - use http-rpc backend (default) | |
|
547 | vcs.scm_app_implementation = http | |
|
548 | ||
|
549 | ; Push/Pull operations hooks protocol, available options are: | |
|
550 | ; `http` - use http-rpc backend (default) | |
|
551 | vcs.hooks.protocol = http | |
|
552 | ||
|
553 | ; Host on which this instance is listening for hooks. If vcsserver is in other location | |
|
554 | ; this should be adjusted. | |
|
555 | vcs.hooks.host = rhodecode | |
|
556 | ||
|
557 | ; Start VCSServer with this instance as a subprocess, useful for development | |
|
558 | vcs.start_server = false | |
|
559 | ||
|
560 | ; List of enabled VCS backends, available options are: | |
|
561 | ; `hg` - mercurial | |
|
562 | ; `git` - git | |
|
563 | ; `svn` - subversion | |
|
564 | vcs.backends = hg, git, svn | |
|
565 | ||
|
566 | ; Wait this number of seconds before killing connection to the vcsserver | |
|
567 | vcs.connection_timeout = 3600 | |
|
568 | ||
|
569 | ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out. | |
|
570 | ; Available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible | |
|
571 | #vcs.svn.compatible_version = pre-1.8-compatible | |
|
572 | ||
|
573 | ||
|
574 | ; #################################################### | |
|
575 | ; Subversion proxy support (mod_dav_svn) | |
|
576 | ; Maps RhodeCode repo groups into SVN paths for Apache | |
|
577 | ; #################################################### | |
|
578 | ||
|
579 | ; Enable or disable the config file generation. | |
|
580 | svn.proxy.generate_config = true | |
|
581 | ||
|
582 | ; Generate config file with `SVNListParentPath` set to `On`. | |
|
583 | svn.proxy.list_parent_path = true | |
|
584 | ||
|
585 | ; Set location and file name of generated config file. | |
|
586 | svn.proxy.config_file_path = /etc/rhodecode/conf/svn/mod_dav_svn.conf | |
|
587 | ||
|
588 | ; alternative mod_dav config template. This needs to be a valid mako template | |
|
589 | ; Example template can be found in the source code: | |
|
590 | ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako | |
|
591 | #svn.proxy.config_template = ~/.rccontrol/community-1/custom_svn_conf.mako | |
|
592 | ||
|
593 | ; Used as a prefix to the `Location` block in the generated config file. | |
|
594 | ; In most cases it should be set to `/`. | |
|
595 | svn.proxy.location_root = / | |
|
596 | ||
|
597 | ; Command to reload the mod dav svn configuration on change. | |
|
598 | ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh | |
|
599 | ; Make sure user who runs RhodeCode process is allowed to reload Apache | |
|
600 | #svn.proxy.reload_cmd = /etc/init.d/apache2 reload | |
|
601 | ||
|
602 | ; If the timeout expires before the reload command finishes, the command will | |
|
603 | ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds. | |
|
604 | #svn.proxy.reload_timeout = 10 | |
|
605 | ||
|
606 | ; #################### | |
|
607 | ; SSH Support Settings | |
|
608 | ; #################### | |
|
609 | ||
|
610 | ; Defines if a custom authorized_keys file should be created and written on | |
|
611 | ; any change user ssh keys. Setting this to false also disables possibility | |
|
612 | ; of adding SSH keys by users from web interface. Super admins can still | |
|
613 | ; manage SSH Keys. | |
|
614 | ssh.generate_authorized_keyfile = true | |
|
615 | ||
|
616 | ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding` | |
|
617 | # ssh.authorized_keys_ssh_opts = | |
|
618 | ||
|
619 | ; Path to the authorized_keys file where the generate entries are placed. | |
|
620 | ; It is possible to have multiple key files specified in `sshd_config` e.g. | |
|
621 | ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode | |
|
622 | ssh.authorized_keys_file_path = /etc/rhodecode/conf/ssh/authorized_keys_rhodecode | |
|
623 | ||
|
624 | ; Command to execute the SSH wrapper. The binary is available in the | |
|
625 | ; RhodeCode installation directory. | |
|
626 | ; e.g ~/.rccontrol/community-1/profile/bin/rc-ssh-wrapper | |
|
627 | ssh.wrapper_cmd = /var/opt/rhodecode_bin/bin/rc-ssh-wrapper | |
|
628 | ||
|
629 | ; Allow shell when executing the ssh-wrapper command | |
|
630 | ssh.wrapper_cmd_allow_shell = false | |
|
631 | ||
|
632 | ; Enables logging, and detailed output send back to the client during SSH | |
|
633 | ; operations. Useful for debugging, shouldn't be used in production. | |
|
634 | ssh.enable_debug_logging = false | |
|
635 | ||
|
636 | ; Paths to binary executable, by default they are the names, but we can | |
|
637 | ; override them if we want to use a custom one | |
|
638 | ssh.executable.hg = ~/.rccontrol/vcsserver-1/profile/bin/hg | |
|
639 | ssh.executable.git = ~/.rccontrol/vcsserver-1/profile/bin/git | |
|
640 | ssh.executable.svn = ~/.rccontrol/vcsserver-1/profile/bin/svnserve | |
|
641 | ||
|
642 | ; Enables SSH key generator web interface. Disabling this still allows users | |
|
643 | ; to add their own keys. | |
|
644 | ssh.enable_ui_key_generator = true | |
|
645 | ||
|
646 | ; Dummy marker to add new entries after. | |
|
647 | ; Add any custom entries below. Please don't remove this marker. | |
|
648 | custom.conf = 1 | |
|
649 | ||
|
650 | ||
|
651 | ; ##################### | |
|
652 | ; LOGGING CONFIGURATION | |
|
653 | ; ##################### | |
|
654 | [loggers] | |
|
655 | keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper | |
|
656 | ||
|
657 | [handlers] | |
|
658 | keys = console, console_sql | |
|
659 | ||
|
660 | [formatters] | |
|
661 | keys = generic, color_formatter, color_formatter_sql | |
|
662 | ||
|
663 | ; ####### | |
|
664 | ; LOGGERS | |
|
665 | ; ####### | |
|
666 | [logger_root] | |
|
667 | level = NOTSET | |
|
668 | handlers = console | |
|
669 | ||
|
670 | [logger_sqlalchemy] | |
|
671 | level = INFO | |
|
672 | handlers = console_sql | |
|
673 | qualname = sqlalchemy.engine | |
|
674 | propagate = 0 | |
|
675 | ||
|
676 | [logger_beaker] | |
|
677 | level = DEBUG | |
|
678 | handlers = | |
|
679 | qualname = beaker.container | |
|
680 | propagate = 1 | |
|
681 | ||
|
682 | [logger_rhodecode] | |
|
683 | level = DEBUG | |
|
684 | handlers = | |
|
685 | qualname = rhodecode | |
|
686 | propagate = 1 | |
|
687 | ||
|
688 | [logger_ssh_wrapper] | |
|
689 | level = DEBUG | |
|
690 | handlers = | |
|
691 | qualname = ssh_wrapper | |
|
692 | propagate = 1 | |
|
693 | ||
|
694 | [logger_celery] | |
|
695 | level = DEBUG | |
|
696 | handlers = | |
|
697 | qualname = celery | |
|
698 | ||
|
699 | ||
|
700 | ; ######## | |
|
701 | ; HANDLERS | |
|
702 | ; ######## | |
|
703 | ||
|
704 | [handler_console] | |
|
705 | class = StreamHandler | |
|
706 | args = (sys.stderr, ) | |
|
707 | level = INFO | |
|
708 | formatter = generic | |
|
709 | ||
|
710 | [handler_console_sql] | |
|
711 | ; "level = DEBUG" logs SQL queries and results. | |
|
712 | ; "level = INFO" logs SQL queries. | |
|
713 | ; "level = WARN" logs neither. (Recommended for production systems.) | |
|
714 | class = StreamHandler | |
|
715 | args = (sys.stderr, ) | |
|
716 | level = WARN | |
|
717 | formatter = generic | |
|
718 | ||
|
719 | ; ########## | |
|
720 | ; FORMATTERS | |
|
721 | ; ########## | |
|
722 | ||
|
723 | [formatter_generic] | |
|
724 | class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter | |
|
725 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s | |
|
726 | datefmt = %Y-%m-%d %H:%M:%S | |
|
727 | ||
|
728 | [formatter_color_formatter] | |
|
729 | class = rhodecode.lib.logging_formatter.ColorFormatter | |
|
730 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s | |
|
731 | datefmt = %Y-%m-%d %H:%M:%S | |
|
732 | ||
|
733 | [formatter_color_formatter_sql] | |
|
734 | class = rhodecode.lib.logging_formatter.ColorFormatterSql | |
|
735 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s | |
|
736 | datefmt = %Y-%m-%d %H:%M:%S |
@@ -0,0 +1,1 b'' | |||
|
1 | --- LICENSE DATA SHOULD BE PASTED HERE --- No newline at end of file |
@@ -0,0 +1,199 b'' | |||
|
1 | ||
|
2 | ; ################################# | |
|
3 | ; RHODECODE VCSSERVER CONFIGURATION | |
|
4 | ; ################################# | |
|
5 | ||
|
6 | [server:main] | |
|
7 | ; COMMON HOST/IP CONFIG | |
|
8 | host = 0.0.0.0 | |
|
9 | port = 10010 | |
|
10 | ||
|
11 | ||
|
12 | ; ########################### | |
|
13 | ; GUNICORN APPLICATION SERVER | |
|
14 | ; ########################### | |
|
15 | ||
|
16 | ; run with gunicorn --log-config rhodecode.ini --paste rhodecode.ini | |
|
17 | ||
|
18 | ; Module to use, this setting shouldn't be changed | |
|
19 | use = egg:gunicorn#main | |
|
20 | ||
|
21 | ; Sets the number of process workers. More workers means more concurrent connections | |
|
22 | ; RhodeCode can handle at the same time. Each additional worker also it increases | |
|
23 | ; memory usage as each has it's own set of caches. | |
|
24 | ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more | |
|
25 | ; than 8-10 unless for really big deployments .e.g 700-1000 users. | |
|
26 | ; `instance_id = *` must be set in the [app:main] section below (which is the default) | |
|
27 | ; when using more than 1 worker. | |
|
28 | workers = 3 | |
|
29 | ||
|
30 | ; Gunicorn access log level | |
|
31 | loglevel = info | |
|
32 | ||
|
33 | ; Process name visible in process list | |
|
34 | proc_name = gunicorn-vcsserver-1 | |
|
35 | ||
|
36 | ; Type of worker class, one of `sync`, `gevent` | |
|
37 | ; currently `sync` is the only option allowed. | |
|
38 | worker_class = sync | |
|
39 | ||
|
40 | ; The maximum number of simultaneous clients. Valid only for gevent | |
|
41 | worker_connections = 10 | |
|
42 | ||
|
43 | ; Max number of requests that worker will handle before being gracefully restarted. | |
|
44 | ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once. | |
|
45 | max_requests = 3000 | |
|
46 | max_requests_jitter = 100 | |
|
47 | ||
|
48 | ; Amount of time a worker can spend with handling a request before it | |
|
49 | ; gets killed and restarted. By default set to 21600 (6hrs) | |
|
50 | ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h) | |
|
51 | timeout = 21600 | |
|
52 | ||
|
53 | ; The maximum size of HTTP request line in bytes. | |
|
54 | ; 0 for unlimited | |
|
55 | limit_request_line = 0 | |
|
56 | ||
|
57 | ; Limit the number of HTTP headers fields in a request. | |
|
58 | ; By default this value is 100 and can't be larger than 32768. | |
|
59 | limit_request_fields = 32768 | |
|
60 | ||
|
61 | ; Limit the allowed size of an HTTP request header field. | |
|
62 | ; Value is a positive number or 0. | |
|
63 | ; Setting it to 0 will allow unlimited header field sizes. | |
|
64 | limit_request_field_size = 0 | |
|
65 | ||
|
66 | ; Timeout for graceful workers restart. | |
|
67 | ; After receiving a restart signal, workers have this much time to finish | |
|
68 | ; serving requests. Workers still alive after the timeout (starting from the | |
|
69 | ; receipt of the restart signal) are force killed. | |
|
70 | ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h) | |
|
71 | graceful_timeout = 3600 | |
|
72 | ||
|
73 | # The number of seconds to wait for requests on a Keep-Alive connection. | |
|
74 | # Generally set in the 1-5 seconds range. | |
|
75 | keepalive = 2 | |
|
76 | ||
|
77 | ; Maximum memory usage that each worker can use before it will receive a | |
|
78 | ; graceful restart signal 0 = memory monitoring is disabled | |
|
79 | ; Examples: 268435456 (256MB), 536870912 (512MB) | |
|
80 | ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB) | |
|
81 | memory_max_usage = 2147483648 | |
|
82 | ||
|
83 | ; How often in seconds to check for memory usage for each gunicorn worker | |
|
84 | memory_usage_check_interval = 60 | |
|
85 | ||
|
86 | ; Threshold value for which we don't recycle worker if GarbageCollection | |
|
87 | ; frees up enough resources. Before each restart we try to run GC on worker | |
|
88 | ; in case we get enough free memory after that, restart will not happen. | |
|
89 | memory_usage_recovery_threshold = 0.8 | |
|
90 | ||
|
91 | ||
|
92 | [app:main] | |
|
93 | ; The %(here)s variable will be replaced with the absolute path of parent directory | |
|
94 | ; of this file | |
|
95 | use = egg:rhodecode-vcsserver | |
|
96 | ||
|
97 | ; Pyramid default locales, we need this to be set | |
|
98 | pyramid.default_locale_name = en | |
|
99 | ||
|
100 | ; default locale used by VCS systems | |
|
101 | locale = en_US.UTF-8 | |
|
102 | ||
|
103 | ; path to binaries for vcsserver, it should be set by the installer | |
|
104 | ; at installation time, e.g /home/user/vcsserver-1/profile/bin | |
|
105 | ; it can also be a path to nix-build output in case of development | |
|
106 | core.binary_dir = /home/rhodecode/.rccontrol/vcsserver-1/profile/bin | |
|
107 | ||
|
108 | ; Custom exception store path, defaults to TMPDIR | |
|
109 | ; This is used to store exception from RhodeCode in shared directory | |
|
110 | #exception_tracker.store_path = | |
|
111 | ||
|
112 | ; ############# | |
|
113 | ; DOGPILE CACHE | |
|
114 | ; ############# | |
|
115 | ||
|
116 | ; Default cache dir for caches. Putting this into a ramdisk can boost performance. | |
|
117 | ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space | |
|
118 | cache_dir = /var/opt/rhodecode_data | |
|
119 | ||
|
120 | ; *************************************** | |
|
121 | ; `repo_object` cache, default file based | |
|
122 | ; *************************************** | |
|
123 | ||
|
124 | ; `repo_object` cache settings for vcs methods for repositories | |
|
125 | #rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace | |
|
126 | ||
|
127 | ; cache auto-expires after N seconds | |
|
128 | ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days) | |
|
129 | #rc_cache.repo_object.expiration_time = 2592000 | |
|
130 | ||
|
131 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set | |
|
132 | #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache.db | |
|
133 | ||
|
134 | ; *********************************************************** | |
|
135 | ; `repo_object` cache with redis backend | |
|
136 | ; recommended for larger instance, and for better performance | |
|
137 | ; *********************************************************** | |
|
138 | ||
|
139 | ; `repo_object` cache settings for vcs methods for repositories | |
|
140 | rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack | |
|
141 | ||
|
142 | ; cache auto-expires after N seconds | |
|
143 | ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days) | |
|
144 | rc_cache.repo_object.expiration_time = 2592000 | |
|
145 | ||
|
146 | ; redis_expiration_time needs to be greater then expiration_time | |
|
147 | rc_cache.repo_object.arguments.redis_expiration_time = 3592000 | |
|
148 | ||
|
149 | rc_cache.repo_object.arguments.host = redis | |
|
150 | rc_cache.repo_object.arguments.port = 6379 | |
|
151 | rc_cache.repo_object.arguments.db = 5 | |
|
152 | rc_cache.repo_object.arguments.socket_timeout = 30 | |
|
153 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends | |
|
154 | #rc_cache.repo_object.arguments.distributed_lock = true | |
|
155 | ||
|
156 | ||
|
157 | ; ##################### | |
|
158 | ; LOGGING CONFIGURATION | |
|
159 | ; ##################### | |
|
160 | [loggers] | |
|
161 | keys = root, vcsserver | |
|
162 | ||
|
163 | [handlers] | |
|
164 | keys = console | |
|
165 | ||
|
166 | [formatters] | |
|
167 | keys = generic | |
|
168 | ||
|
169 | ; ####### | |
|
170 | ; LOGGERS | |
|
171 | ; ####### | |
|
172 | [logger_root] | |
|
173 | level = NOTSET | |
|
174 | handlers = console | |
|
175 | ||
|
176 | [logger_vcsserver] | |
|
177 | level = DEBUG | |
|
178 | handlers = | |
|
179 | qualname = vcsserver | |
|
180 | propagate = 1 | |
|
181 | ||
|
182 | ||
|
183 | ; ######## | |
|
184 | ; HANDLERS | |
|
185 | ; ######## | |
|
186 | ||
|
187 | [handler_console] | |
|
188 | class = StreamHandler | |
|
189 | args = (sys.stderr, ) | |
|
190 | level = INFO | |
|
191 | formatter = generic | |
|
192 | ||
|
193 | ; ########## | |
|
194 | ; FORMATTERS | |
|
195 | ; ########## | |
|
196 | ||
|
197 | [formatter_generic] | |
|
198 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s | |
|
199 | datefmt = %Y-%m-%d %H:%M:%S |
|
1 | NO CONTENT: new file 100644 |
|
1 | NO CONTENT: new file 100644 |
|
1 | NO CONTENT: new file 100644 |
@@ -0,0 +1,392 b'' | |||
|
1 | version: '3.9' | |
|
2 | ||
|
3 | volumes: | |
|
4 | ||
|
5 | # bind volume with logs | |
|
6 | logvolume: | |
|
7 | driver: local | |
|
8 | driver_opts: | |
|
9 | type: none | |
|
10 | o: bind | |
|
11 | device: $PWD/logs | |
|
12 | ||
|
13 | # bind-mount with configs | |
|
14 | confvolume: | |
|
15 | driver: local | |
|
16 | driver_opts: | |
|
17 | type: none | |
|
18 | o: bind | |
|
19 | device: $PWD/config | |
|
20 | ||
|
21 | # volume for rhodecode caches, archive caches, elasticsearch etc | |
|
22 | datavolume: {} | |
|
23 | ||
|
24 | # postgres store | |
|
25 | pg_data: {} | |
|
26 | ||
|
27 | # volume for rhodecode elasticsearch | |
|
28 | es_data: {} | |
|
29 | ||
|
30 | # RhodeCode repo-store, it's where the repositories will be stored | |
|
31 | rhodecode_repos: {} | |
|
32 | ||
|
33 | networks: | |
|
34 | rhodecode_network: | |
|
35 | name: rhodecode_network | |
|
36 | ||
|
37 | services: | |
|
38 | ||
|
39 | rhodecode: | |
|
40 | networks: | |
|
41 | - rhodecode_network | |
|
42 | image: rhodecode/app:${RC_VERSION:?specify-RC_VERSION-env-var} | |
|
43 | stdin_open: true | |
|
44 | tty: true | |
|
45 | restart: unless-stopped | |
|
46 | command: [ | |
|
47 | "/var/opt/rhodecode_bin/bin/gunicorn", | |
|
48 | "--name", | |
|
49 | "gunicorn-rhodecode-1", | |
|
50 | "--error-logfile=-", | |
|
51 | "--paster=/etc/rhodecode/conf/compose/rhodecode.optimized.ini", | |
|
52 | "--config=/etc/rhodecode/conf/gunicorn_conf.py" | |
|
53 | ] | |
|
54 | ports: | |
|
55 | - "127.0.0.1::10020" | |
|
56 | ||
|
57 | build: | |
|
58 | context: . | |
|
59 | dockerfile: service/rhodecode/rhodecode.dockerfile | |
|
60 | network: rhodecode_network | |
|
61 | args: | |
|
62 | TZ: ${TZ} | |
|
63 | RHODECODE_VERSION: ${RC_VERSION:?specify-RC_VERSION-env-var} | |
|
64 | RHODECODE_DB: postgresql://rhodecode:${POSTGRES_PASSWORD:?must-specify-db-password}@database/${POSTGRES_DB:?must-specify-db-name} | |
|
65 | RHODECODE_USER_NAME: ${RHODECODE_USER_NAME} | |
|
66 | RHODECODE_USER_PASS: ${RHODECODE_USER_PASS} | |
|
67 | RHODECODE_USER_EMAIL: ${RHODECODE_USER_EMAIL} | |
|
68 | ||
|
69 | environment: | |
|
70 | RC_APP_TYPE: rhodecode_http | |
|
71 | RC_APP_PROC: 1 | |
|
72 | ENV_RC_BASE_URL: ${RHODECODE_BASE_URL} | |
|
73 | SSL_CERT_FILE: "/etc/rhodecode/conf/ca-bundle.crt" | |
|
74 | REQUESTS_CA_BUNDLE: "/etc/rhodecode/conf/ca-bundle.crt" | |
|
75 | GIT_SSL_CAINFO: "/etc/rhodecode/conf/ca-bundle.crt" | |
|
76 | GEVENT_RESOLVER: "ares" | |
|
77 | ||
|
78 | DB_UPGRADE: 1 # run the DB upgrade | |
|
79 | SETUP_APP: 1 # run the application default settings setup, can be turned off after initial run | |
|
80 | MAIN_INI_PATH: /etc/rhodecode/conf/compose/rhodecode.optimized.ini | |
|
81 | ||
|
82 | # SVN Specific | |
|
83 | MOD_DAV_SVN_PORT: 8090 | |
|
84 | APACHE_LOG_DIR: /var/log/rhodecode/svn | |
|
85 | MOD_DAV_SVN_CONF_FILE: /etc/rhodecode/conf/svn/mod_dav_svn.conf | |
|
86 | ||
|
87 | ||
|
88 | healthcheck: | |
|
89 | test: [ "CMD", "curl", "-A", "RhodeCode-Healthcheck", "-s", "-o", "/dev/null", "-w", "'%{http_code}'", "http://127.0.0.1:10020/_admin/ops/ping" ] | |
|
90 | timeout: 30s | |
|
91 | interval: 60s | |
|
92 | retries: 10 | |
|
93 | ||
|
94 | depends_on: | |
|
95 | - database | |
|
96 | - redis | |
|
97 | ||
|
98 | volumes: | |
|
99 | - confvolume:/etc/rhodecode/conf | |
|
100 | - logvolume:/var/log/rhodecode | |
|
101 | - rhodecode_repos:/var/opt/rhodecode_repo_store | |
|
102 | - datavolume:/var/opt/rhodecode_data | |
|
103 | ||
|
104 | tmpfs: | |
|
105 | - /data_ramdisk:size=1G | |
|
106 | ||
|
107 | vcsserver: | |
|
108 | networks: | |
|
109 | - rhodecode_network | |
|
110 | image: rhodecode/app:${RC_VERSION:?specify-RC_VERSION-env-var} | |
|
111 | stdin_open: true | |
|
112 | tty: true | |
|
113 | restart: unless-stopped | |
|
114 | command: [ | |
|
115 | "/home/rhodecode/.rccontrol/vcsserver-1/profile/bin/gunicorn", | |
|
116 | "--name=gunicorn-vcsserver-1", | |
|
117 | "--error-logfile=-", | |
|
118 | "--paster=/etc/rhodecode/conf/compose/vcsserver.optimized.ini", | |
|
119 | "--config=/etc/rhodecode/conf/gunicorn_conf.py" | |
|
120 | ] | |
|
121 | ports: | |
|
122 | - "127.0.0.1::10010" | |
|
123 | ||
|
124 | healthcheck: | |
|
125 | test: [ "CMD", "curl", "-A", "RhodeCode-Healthcheck", "-s", "-o", "/dev/null", "-w", "'%{http_code}'", "http://127.0.0.1:10010/status" ] | |
|
126 | timeout: 30s | |
|
127 | interval: 60s | |
|
128 | retries: 10 | |
|
129 | ||
|
130 | environment: | |
|
131 | RC_APP_TYPE: rhodecode_vcsserver | |
|
132 | RC_APP_PROC: 1 | |
|
133 | MAIN_INI_PATH: /etc/rhodecode/conf/compose/vcsserver.optimized.ini | |
|
134 | ENV_RC_BASE_URL: ${RHODECODE_BASE_URL} | |
|
135 | SSL_CERT_FILE: "/etc/rhodecode/conf/ca-bundle.crt" | |
|
136 | REQUESTS_CA_BUNDLE: "/etc/rhodecode/conf/ca-bundle.crt" | |
|
137 | GIT_SSL_CAINFO: "/etc/rhodecode/conf/ca-bundle.crt" | |
|
138 | ||
|
139 | depends_on: | |
|
140 | - redis | |
|
141 | ||
|
142 | volumes: | |
|
143 | - confvolume:/etc/rhodecode/conf | |
|
144 | - logvolume:/var/log/rhodecode | |
|
145 | - rhodecode_repos:/var/opt/rhodecode_repo_store | |
|
146 | - datavolume:/var/opt/rhodecode_data | |
|
147 | ||
|
148 | celery: | |
|
149 | networks: | |
|
150 | - rhodecode_network | |
|
151 | image: rhodecode/app:${RC_VERSION:?specify-RC_VERSION-env-var} | |
|
152 | stdin_open: true | |
|
153 | tty: true | |
|
154 | restart: unless-stopped | |
|
155 | command: [ | |
|
156 | "/var/opt/rhodecode_bin/bin/celery", | |
|
157 | "worker", | |
|
158 | "--task-events", | |
|
159 | "--autoscale=10,2", | |
|
160 | "--no-color", | |
|
161 | "--app=rhodecode.lib.celerylib.loader", | |
|
162 | "--loglevel=DEBUG", | |
|
163 | "--ini=/etc/rhodecode/conf/compose/rhodecode.optimized.ini" | |
|
164 | ] | |
|
165 | ||
|
166 | environment: | |
|
167 | RC_APP_TYPE: rhodecode_celery | |
|
168 | RC_APP_PROC: 1 | |
|
169 | MAIN_INI_PATH: /etc/rhodecode/conf/compose/rhodecode.optimized.ini | |
|
170 | ENV_RC_BASE_URL: ${RHODECODE_BASE_URL} | |
|
171 | SSL_CERT_FILE: "/etc/rhodecode/conf/ca-bundle.crt" | |
|
172 | REQUESTS_CA_BUNDLE: "/etc/rhodecode/conf/ca-bundle.crt" | |
|
173 | GIT_SSL_CAINFO: "/etc/rhodecode/conf/ca-bundle.crt" | |
|
174 | ||
|
175 | depends_on: | |
|
176 | - database | |
|
177 | - redis | |
|
178 | ||
|
179 | volumes: | |
|
180 | - confvolume:/etc/rhodecode/conf | |
|
181 | - logvolume:/var/log/rhodecode | |
|
182 | - rhodecode_repos:/var/opt/rhodecode_repo_store | |
|
183 | - datavolume:/var/opt/rhodecode_data | |
|
184 | ||
|
185 | beat: | |
|
186 | # This service is not scalable | |
|
187 | networks: | |
|
188 | - rhodecode_network | |
|
189 | image: rhodecode/app:${RC_VERSION:?specify-RC_VERSION-env-var} | |
|
190 | stdin_open: true | |
|
191 | tty: true | |
|
192 | restart: unless-stopped | |
|
193 | command: [ | |
|
194 | "/var/opt/rhodecode_bin/bin/celery", | |
|
195 | "beat", | |
|
196 | "--no-color", | |
|
197 | "--app=rhodecode.lib.celerylib.loader", | |
|
198 | "--scheduler=rhodecode.lib.celerylib.scheduler.RcScheduler", | |
|
199 | "--loglevel=DEBUG", | |
|
200 | "--ini=/etc/rhodecode/conf/compose/rhodecode.optimized.ini" | |
|
201 | ] | |
|
202 | ||
|
203 | environment: | |
|
204 | RC_APP_TYPE: rhodecode_beat | |
|
205 | RC_APP_PROC: 1 | |
|
206 | MAIN_INI_PATH: /etc/rhodecode/conf/compose/rhodecode.optimized.ini | |
|
207 | ENV_RC_BASE_URL: ${RHODECODE_BASE_URL} | |
|
208 | SSL_CERT_FILE: "/etc/rhodecode/conf/ca-bundle.crt" | |
|
209 | REQUESTS_CA_BUNDLE: "/etc/rhodecode/conf/ca-bundle.crt" | |
|
210 | GIT_SSL_CAINFO: "/etc/rhodecode/conf/ca-bundle.crt" | |
|
211 | ||
|
212 | depends_on: | |
|
213 | - database | |
|
214 | - redis | |
|
215 | ||
|
216 | volumes: | |
|
217 | - confvolume:/etc/rhodecode/conf | |
|
218 | - logvolume:/var/log/rhodecode | |
|
219 | - rhodecode_repos:/var/opt/rhodecode_repo_store | |
|
220 | - datavolume:/var/opt/rhodecode_data | |
|
221 | ||
|
222 | svn: | |
|
223 | networks: | |
|
224 | - rhodecode_network | |
|
225 | image: rhodecode/app:${RC_VERSION:?specify-RC_VERSION-env-var} | |
|
226 | stdin_open: true | |
|
227 | tty: true | |
|
228 | restart: unless-stopped | |
|
229 | command: ["apachectl", "-D", "FOREGROUND"] | |
|
230 | ||
|
231 | environment: | |
|
232 | RC_APP_TYPE: rhodecode_svn | |
|
233 | ||
|
234 | # SVN Specific | |
|
235 | MOD_DAV_SVN_PORT: 8090 | |
|
236 | APACHE_LOG_DIR: /var/log/rhodecode/svn | |
|
237 | MOD_DAV_SVN_CONF_FILE: /etc/rhodecode/conf/svn/mod_dav_svn.conf | |
|
238 | ||
|
239 | ports: | |
|
240 | - "127.0.0.1::8090" | |
|
241 | ||
|
242 | healthcheck: | |
|
243 | test: [ "CMD", "curl", "-A", "RhodeCode-Healthcheck", "-s", "-o", "/dev/null", "-w", "'%{http_code}'", "http://127.0.0.1:8090/_server_status" ] | |
|
244 | timeout: 30s | |
|
245 | interval: 60s | |
|
246 | retries: 10 | |
|
247 | ||
|
248 | volumes: | |
|
249 | - confvolume:/etc/rhodecode/conf | |
|
250 | - logvolume:/var/log/rhodecode | |
|
251 | - rhodecode_repos:/var/opt/rhodecode_repo_store | |
|
252 | ||
|
253 | sshd: | |
|
254 | networks: | |
|
255 | - rhodecode_network | |
|
256 | image: rhodecode/app:${RC_VERSION:?specify-RC_VERSION-env-var} | |
|
257 | stdin_open: true | |
|
258 | tty: true | |
|
259 | restart: unless-stopped | |
|
260 | command: ["/usr/sbin/sshd", "-f", "/etc/rhodecode/sshd_config", "-D", "-e"] | |
|
261 | ||
|
262 | environment: | |
|
263 | RC_APP_TYPE: rhodecode_sshd | |
|
264 | SSH_BOOTSTRAP: 1 | |
|
265 | ||
|
266 | ports: | |
|
267 | # set from .env file | |
|
268 | - "${RC_SSH_PORT:?must-specify-ssh-port}:22" | |
|
269 | ||
|
270 | depends_on: {} | |
|
271 | ||
|
272 | volumes: | |
|
273 | - confvolume:/etc/rhodecode/conf | |
|
274 | - logvolume:/var/log/rhodecode | |
|
275 | - rhodecode_repos:/var/opt/rhodecode_repo_store | |
|
276 | - datavolume:/var/opt/rhodecode_data | |
|
277 | ||
|
278 | elasticsearch: | |
|
279 | networks: | |
|
280 | - rhodecode_network | |
|
281 | image: docker.elastic.co/elasticsearch/elasticsearch:6.8.13 | |
|
282 | ||
|
283 | environment: | |
|
284 | - cluster.name=elasticsearch-cluster | |
|
285 | - network.host=0.0.0.0 | |
|
286 | - bootstrap.memory_lock=true | |
|
287 | - discovery.type=single-node | |
|
288 | - "ES_JAVA_OPTS=-Xms512m -Xmx512m" | |
|
289 | ||
|
290 | ulimits: | |
|
291 | memlock: | |
|
292 | soft: -1 | |
|
293 | hard: -1 | |
|
294 | ||
|
295 | volumes: | |
|
296 | - es_data:/usr/share/elasticsearch/data | |
|
297 | ||
|
298 | channelstream: | |
|
299 | networks: | |
|
300 | - rhodecode_network | |
|
301 | image: channelstream/channelstream:0.7.1 | |
|
302 | restart: unless-stopped | |
|
303 | ||
|
304 | ports: | |
|
305 | - "127.0.0.1:9800:9800" | |
|
306 | ||
|
307 | command: ["channelstream", "-i", "/etc/rhodecode/conf/compose/channelstream.ini"] | |
|
308 | ||
|
309 | environment: | |
|
310 | CHANNELSTREAM_ALLOW_POSTING_FROM: 0.0.0.0 | |
|
311 | ||
|
312 | healthcheck: | |
|
313 | test: [ "CMD", "curl", "-s", "-o", "/dev/null", "-w", "'%{http_code}'", "http://127.0.0.1:9800/admin/sign_in" ] | |
|
314 | timeout: 30s | |
|
315 | interval: 60s | |
|
316 | retries: 10 | |
|
317 | ||
|
318 | volumes: | |
|
319 | - confvolume:/etc/rhodecode/conf | |
|
320 | - logvolume:/var/log/rhodecode | |
|
321 | ||
|
322 | redis: | |
|
323 | networks: | |
|
324 | - rhodecode_network | |
|
325 | image: rhodecode/redis:6.0.10 | |
|
326 | build: | |
|
327 | context: . | |
|
328 | dockerfile: service/redis/rhodecode_redis.dockerfile | |
|
329 | network: rhodecode_network | |
|
330 | ||
|
331 | restart: unless-stopped | |
|
332 | ||
|
333 | ports: | |
|
334 | - "127.0.0.1::6379" | |
|
335 | ||
|
336 | volumes: | |
|
337 | - logvolume:/var/log/rhodecode | |
|
338 | ||
|
339 | database: | |
|
340 | networks: | |
|
341 | - rhodecode_network | |
|
342 | image: rhodecode/database:13.1 | |
|
343 | build: | |
|
344 | context: . | |
|
345 | dockerfile: service/database/rhodecode_database.dockerfile | |
|
346 | network: rhodecode_network | |
|
347 | restart: unless-stopped | |
|
348 | ||
|
349 | ports: | |
|
350 | - "127.0.0.1::5432" | |
|
351 | ||
|
352 | environment: | |
|
353 | POSTGRES_DB: ${POSTGRES_DB:?must-specify-db-name} | |
|
354 | POSTGRES_USER: rhodecode | |
|
355 | POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:?must-specify-db-password} | |
|
356 | ||
|
357 | volumes: | |
|
358 | # save the pg_data volume | |
|
359 | - pg_data:/var/lib/postgresql/data | |
|
360 | - logvolume:/var/log/rhodecode | |
|
361 | ||
|
362 | nginx: | |
|
363 | networks: | |
|
364 | - rhodecode_network | |
|
365 | image: rhodecode/nginx:1.19.6 | |
|
366 | build: | |
|
367 | context: . | |
|
368 | dockerfile: service/nginx/rhodecode_nginx.dockerfile | |
|
369 | network: rhodecode_network | |
|
370 | restart: unless-stopped | |
|
371 | ||
|
372 | ports: | |
|
373 | # set from .env file | |
|
374 | - "${RC_HTTP_PORT:?must-specify-http-port}:80" | |
|
375 | - "${RC_HTTPS_PORT:?must-specify-https-port}:443" | |
|
376 | ||
|
377 | healthcheck: | |
|
378 | # change port 80 to 443 when only using SSL | |
|
379 | test: [ "CMD", "curl", "-A", "RhodeCode-Healthcheck", "-s", "-o", "/dev/null", "-w", "'%{http_code}'", "http://127.0.0.1:80/_admin/ops/ping" ] | |
|
380 | timeout: 30s | |
|
381 | interval: 60s | |
|
382 | retries: 10 | |
|
383 | ||
|
384 | depends_on: | |
|
385 | - channelstream | |
|
386 | ||
|
387 | volumes: | |
|
388 | - confvolume:/etc/rhodecode/conf | |
|
389 | - logvolume:/var/log/rhodecode | |
|
390 | ||
|
391 | volumes_from: | |
|
392 | - rhodecode:ro |
@@ -0,0 +1,143 b'' | |||
|
1 | #!/usr/bin/env bash | |
|
2 | set -Eeo pipefail | |
|
3 | ||
|
4 | function config_copy() { | |
|
5 | # copy over the configs if they don't exist | |
|
6 | for f in /etc/rhodecode/conf_build/*; do | |
|
7 | fname=${f##*/} | |
|
8 | if [ ! -f "/etc/rhodecode/conf/$fname" ]; then | |
|
9 | echo "$fname not exists copying over as default conf..." | |
|
10 | cp -v $f /etc/rhodecode/conf/$fname | |
|
11 | fi | |
|
12 | done | |
|
13 | ||
|
14 | } | |
|
15 | ||
|
16 | function db_upgrade() { | |
|
17 | echo 'ENTRYPOINT: Upgrading database.' | |
|
18 | /var/opt/rhodecode_bin/bin/rc-upgrade-db $MAIN_INI_PATH --force-yes | |
|
19 | } | |
|
20 | ||
|
21 | function db_init() { | |
|
22 | ||
|
23 | gosu $RC_USER \ | |
|
24 | /home/$RC_USER/.rccontrol/$RC_TYPE_ID/profile/bin/rc-setup-app \ | |
|
25 | $MAIN_INI_PATH \ | |
|
26 | --force-yes \ | |
|
27 | --skip-existing-db \ | |
|
28 | --user=$RHODECODE_USER_NAME \ | |
|
29 | --password=$RHODECODE_USER_PASS \ | |
|
30 | --email=$RHODECODE_USER_EMAIL \ | |
|
31 | --repos=$RHODECODE_REPO_DIR | |
|
32 | } | |
|
33 | ||
|
34 | function rhodecode_setup() { | |
|
35 | for f in /home/$RC_USER/.rccontrol/bootstrap/*.py; do | |
|
36 | fname=${f##*/} | |
|
37 | ||
|
38 | echo "Running script $fname on $RC_TYPE_ID" | |
|
39 | gosu $RC_USER /home/$RC_USER/.rccontrol/$RC_TYPE_ID/profile/bin/rc-ishell $MAIN_INI_PATH <<< "%run $f" | |
|
40 | ||
|
41 | done | |
|
42 | } | |
|
43 | ||
|
44 | function generate_ssh_keys() { | |
|
45 | keys_dir=/etc/rhodecode/conf/ssh | |
|
46 | ||
|
47 | if [[ ! -d $keys_dir ]]; then | |
|
48 | echo "Generating $keys_dir/ssh_host_rsa_key ..." | |
|
49 | gosu "$RC_USER" mkdir -p $keys_dir | |
|
50 | fi | |
|
51 | ||
|
52 | # Generate ssh host key for the first time | |
|
53 | if [[ ! -f $keys_dir/ssh_host_rsa_key ]]; then | |
|
54 | echo "Generating $keys_dir/ssh_host_rsa_key ..." | |
|
55 | gosu "$RC_USER" ssh-keygen -f $keys_dir/ssh_host_rsa_key -N '' -t rsa | |
|
56 | gosu "$RC_USER" chmod 0600 $keys_dir/ssh_host_rsa_key | |
|
57 | fi | |
|
58 | ||
|
59 | if [[ ! -f $keys_dir/ssh_host_ecdsa_key ]]; then | |
|
60 | echo "Generating $keys_dir/ssh_host_ecdsa_key ..." | |
|
61 | gosu "$RC_USER" ssh-keygen -f $keys_dir/ssh_host_ecdsa_key -N '' -t ecdsa | |
|
62 | gosu "$RC_USER" chmod 0600 $keys_dir/ssh_host_ecdsa_key | |
|
63 | fi | |
|
64 | ||
|
65 | if [[ ! -f $keys_dir/ssh_host_ed25519_key ]]; then | |
|
66 | echo "Generating $keys_dir/ssh_host_ed25519_key ..." | |
|
67 | gosu "$RC_USER" ssh-keygen -f $keys_dir/ssh_host_ed25519_key -N '' -t ed25519 | |
|
68 | gosu "$RC_USER" chmod 0600 $keys_dir/ssh_host_ed25519_key | |
|
69 | fi | |
|
70 | ||
|
71 | if [[ ! -f $keys_dir/authorized_keys ]]; then | |
|
72 | echo "Generating $keys_dir/authorized_keys..." | |
|
73 | gosu "$RC_USER" touch $keys_dir/authorized_keys | |
|
74 | fi | |
|
75 | ||
|
76 | sed -i "s/AllowUsers USER/AllowUsers $RC_USER/" $SSHD_CONF_FILE | |
|
77 | } | |
|
78 | ||
|
79 | ||
|
80 | echo "ENTRYPOINT: Running $RC_APP_TYPE with cmd '$1'" | |
|
81 | ||
|
82 | if [ "$SSH_BOOTSTRAP" = 1 ]; then | |
|
83 | # generate SSH keys | |
|
84 | generate_ssh_keys | |
|
85 | fi | |
|
86 | ||
|
87 | isLikelyWeb= | |
|
88 | case "$1" in | |
|
89 | supervisord | pserve | gunicorn ) isLikelyWeb=1 ;; | |
|
90 | esac | |
|
91 | ||
|
92 | if [[ $RC_APP_TYPE = "rhodecode_http" ]]; then | |
|
93 | ||
|
94 | DB_INIT_FILE=/var/opt/rhodecode_data/.dbinit_bootstrapped | |
|
95 | # Avoid DB_INIT to run 2x | |
|
96 | if [[ ! -e $DB_INIT_FILE ]]; then | |
|
97 | echo "ENTRYPOINT: Starting $RC_APP_TYPE initial db bootstrap" | |
|
98 | ||
|
99 | db_init | |
|
100 | ||
|
101 | gosu $RC_USER touch "$DB_INIT_FILE" | |
|
102 | echo "ENTRYPOINT: marked as db-bootstrapped at $DB_INIT_FILE" | |
|
103 | ||
|
104 | fi | |
|
105 | ||
|
106 | BOOTSTRAP_FILE=/var/opt/rhodecode_data/.setup_bootstrapped | |
|
107 | # Avoid destroying bootstrapping by simple start/stop | |
|
108 | if [[ ! -e $BOOTSTRAP_FILE ]]; then | |
|
109 | echo "ENTRYPOINT: Starting $RC_APP_TYPE initial bootstrap" | |
|
110 | ||
|
111 | # copy over default configuration files | |
|
112 | config_copy | |
|
113 | ||
|
114 | # setup application with specific options | |
|
115 | if [ "$SETUP_APP" = 1 ]; then | |
|
116 | rhodecode_setup | |
|
117 | fi | |
|
118 | ||
|
119 | gosu $RC_USER touch "$BOOTSTRAP_FILE" | |
|
120 | echo "ENTRYPOINT: marked as setup-bootstrapped at $BOOTSTRAP_FILE" | |
|
121 | ||
|
122 | fi | |
|
123 | ||
|
124 | if [ "$DB_UPGRADE" = 1 ]; then | |
|
125 | # run DB migrate | |
|
126 | db_upgrade | |
|
127 | fi | |
|
128 | ||
|
129 | fi | |
|
130 | ||
|
131 | ||
|
132 | if [ "$RC_APP_PROC" = 1 ]; then | |
|
133 | # Fix problem with zombie processes when using executables like supervisord/gunicorn | |
|
134 | set -- tini -- "$@" | |
|
135 | set -- gosu $RC_USER "$@" | |
|
136 | fi | |
|
137 | ||
|
138 | if [ "$RC_APP_TYPE" = "rhodecode_sshd" ]; then | |
|
139 | # Fix problem with Missing privilege separation directory error | |
|
140 | mkdir -p /run/sshd | |
|
141 | fi | |
|
142 | ||
|
143 | exec "$@" |
@@ -0,0 +1,49 b'' | |||
|
1 | #!/usr/bin/env bash | |
|
2 | set -Eeo pipefail | |
|
3 | ||
|
4 | ||
|
5 | function generate_ssh_keys() { | |
|
6 | keys_dir=/etc/rhodecode/conf/ssh | |
|
7 | ||
|
8 | if [[ ! -d $keys_dir ]]; then | |
|
9 | echo "Generating $keys_dir/ssh_host_rsa_key ..." | |
|
10 | mkdir -p $keys_dir | |
|
11 | fi | |
|
12 | ||
|
13 | # Generate ssh host key for the first time | |
|
14 | if [[ ! -f $keys_dir/ssh_host_rsa_key ]]; then | |
|
15 | echo "Generating $keys_dir/ssh_host_rsa_key ..." | |
|
16 | ssh-keygen -f $keys_dir/ssh_host_rsa_key -N '' -t rsa | |
|
17 | chmod 0600 $keys_dir/ssh_host_rsa_key | |
|
18 | fi | |
|
19 | ||
|
20 | if [[ ! -f $keys_dir/ssh_host_ecdsa_key ]]; then | |
|
21 | echo "Generating $keys_dir/ssh_host_ecdsa_key ..." | |
|
22 | ssh-keygen -f $keys_dir/ssh_host_ecdsa_key -N '' -t ecdsa | |
|
23 | chmod 0600 $keys_dir/ssh_host_ecdsa_key | |
|
24 | fi | |
|
25 | ||
|
26 | if [[ ! -f $keys_dir/ssh_host_ed25519_key ]]; then | |
|
27 | echo "Generating $keys_dir/ssh_host_ed25519_key ..." | |
|
28 | ssh-keygen -f $keys_dir/ssh_host_ed25519_key -N '' -t ed25519 | |
|
29 | chmod 0600 $keys_dir/ssh_host_ed25519_key | |
|
30 | fi | |
|
31 | ||
|
32 | if [[ ! -f $keys_dir/authorized_keys ]]; then | |
|
33 | echo "Generating $keys_dir/authorized_keys..." | |
|
34 | touch $keys_dir/authorized_keys | |
|
35 | fi | |
|
36 | ||
|
37 | sed -i "s/AllowUsers USER/AllowUsers $RC_USER/" $SSHD_CONF_FILE | |
|
38 | } | |
|
39 | ||
|
40 | echo "ENTRYPOINT: Running with cmd '$1'" | |
|
41 | ||
|
42 | ||
|
43 | if [ "$SSH_BOOTSTRAP" = 1 ]; then | |
|
44 | # generate SSH keys | |
|
45 | generate_ssh_keys | |
|
46 | fi | |
|
47 | ||
|
48 | mkdir -p /run/sshd | |
|
49 | exec "$@" |
@@ -0,0 +1,15 b'' | |||
|
1 | #!/usr/bin/env bash | |
|
2 | set -Eeo pipefail | |
|
3 | ||
|
4 | ||
|
5 | BOOTSTRAP_FILE=.bootstrapped | |
|
6 | ||
|
7 | # Avoid destroying bootstrapping by simple start/stop | |
|
8 | if [[ ! -e /.$BOOTSTRAP_FILE ]]; then | |
|
9 | echo "ENTRYPOINT: Starting $RC_APP_TYPE bootstrap" | |
|
10 | ||
|
11 | touch $MOD_DAV_SVN_CONF_FILE | |
|
12 | touch /$BOOTSTRAP_FILE | |
|
13 | fi | |
|
14 | ||
|
15 | exec "$@" No newline at end of file |
|
1 | NO CONTENT: new file 100644 |
|
1 | NO CONTENT: new file 100644 |
@@ -0,0 +1,8 b'' | |||
|
1 | max_connections = 400 | |
|
2 | shared_buffers = 1GB | |
|
3 | effective_cache_size = 3GB | |
|
4 | work_mem = 2621kB | |
|
5 | maintenance_work_mem = 256MB | |
|
6 | checkpoint_completion_target = 0.9 | |
|
7 | wal_buffers = 16MB | |
|
8 | default_statistics_target = 100 |
@@ -0,0 +1,4 b'' | |||
|
1 | FROM library/postgres:13.1 | |
|
2 | ||
|
3 | COPY service/database/customized.conf /etc/conf.d/pg_customized.conf | |
|
4 | CMD ["postgres", "-c", "log_statement=ddl"] No newline at end of file |
@@ -0,0 +1,114 b'' | |||
|
1 | # read more here http://tautt.com/best-nginx-configuration-for-security/ | |
|
2 | ||
|
3 | # config to don't allow the browser to render the page inside an frame or iframe | |
|
4 | # and avoid clickjacking http://en.wikipedia.org/wiki/Clickjacking | |
|
5 | # if you need to allow [i]frames, you can use SAMEORIGIN or even set an uri with ALLOW-FROM uri | |
|
6 | # https://developer.mozilla.org/en-US/docs/HTTP/X-Frame-Options | |
|
7 | #add_header X-Frame-Options SAMEORIGIN; | |
|
8 | ||
|
9 | # when serving user-supplied content, include a X-Content-Type-Options: nosniff header along with the Content-Type: header, | |
|
10 | # to disable content-type sniffing on some browsers. | |
|
11 | # https://www.owasp.org/index.php/List_of_useful_HTTP_headers | |
|
12 | # currently suppoorted in IE > 8 http://blogs.msdn.com/b/ie/archive/2008/09/02/ie8-security-part-vi-beta-2-update.aspx | |
|
13 | # http://msdn.microsoft.com/en-us/library/ie/gg622941(v=vs.85).aspx | |
|
14 | # 'soon' on Firefox https://bugzilla.mozilla.org/show_bug.cgi?id=471020 | |
|
15 | #add_header X-Content-Type-Options nosniff; | |
|
16 | ||
|
17 | # This header enables the Cross-site scripting (XSS) filter built into most recent web browsers. | |
|
18 | # It's usually enabled by default anyway, so the role of this header is to re-enable the filter for | |
|
19 | # this particular website if it was disabled by the user. | |
|
20 | # https://www.owasp.org/index.php/List_of_useful_HTTP_headers | |
|
21 | #add_header X-XSS-Protection "1; mode=block"; | |
|
22 | ||
|
23 | # with Content Security Policy (CSP) enabled(and a browser that supports it(http://caniuse.com/#feat=contentsecuritypolicy), | |
|
24 | # you can tell the browser that it can only download content from the domains you explicitly allow | |
|
25 | # http://www.html5rocks.com/en/tutorials/security/content-security-policy/ | |
|
26 | # https://www.owasp.org/index.php/Content_Security_Policy | |
|
27 | # I need to change our application code so we can increase security by disabling 'unsafe-inline' 'unsafe-eval' | |
|
28 | # directives for css and js(if you have inline css or js, you will need to keep it too). | |
|
29 | # more: http://www.html5rocks.com/en/tutorials/security/content-security-policy/#inline-code-considered-harmful | |
|
30 | #add_header Content-Security-Policy "default-src 'self'; script-src 'self' 'unsafe-inline' 'unsafe-eval' https://ssl.google-analytics.com https://assets.zendesk.com https://connect.facebook.net; img-src 'self' https://ssl.google-analytics.com https://s-static.ak.facebook.com https://assets.zendesk.com; style-src 'self' 'unsafe-inline' https://fonts.googleapis.com https://assets.zendesk.com; font-src 'self' https://themes.googleusercontent.com; frame-src https://assets.zendesk.com https://www.facebook.com https://s-static.ak.facebook.com https://tautt.zendesk.com; object-src 'none'"; | |
|
31 | ||
|
32 | ## rate limiter for certain pages to prevent brute force attacks | |
|
33 | limit_req_zone $binary_remote_addr zone=http_req_limit:10m rate=1r/s; | |
|
34 | ||
|
35 | ## custom log format | |
|
36 | log_format http_log_custom '$remote_addr - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" $request_time $upstream_response_time $pipe'; | |
|
37 | ||
|
38 | server { | |
|
39 | listen 80 default; | |
|
40 | # ensure we get the proper Docker DNS resolver for load balancing. | |
|
41 | resolver 127.0.0.11 ipv6=off; | |
|
42 | server_name localhost 127.0.0.1; | |
|
43 | access_log /var/log/rhodecode/nginx/rhodecode.http.access.log http_log_custom; | |
|
44 | error_log /var/log/rhodecode/nginx/rhodecode.http.error.log; | |
|
45 | ||
|
46 | # uncomment to redirect http traffic to https | |
|
47 | #return 301 https://$host$request_uri; | |
|
48 | ||
|
49 | client_body_buffer_size 128k; | |
|
50 | # maximum number and size of buffers for large headers to read from client request | |
|
51 | large_client_header_buffers 16 256k; | |
|
52 | ||
|
53 | ## serve static files by nginx, recommended | |
|
54 | location /_static/rhodecode { | |
|
55 | gzip on; | |
|
56 | gzip_min_length 500; | |
|
57 | gzip_proxied any; | |
|
58 | gzip_comp_level 4; | |
|
59 | gzip_types text/css text/javascript text/xml text/plain text/x-component application/javascript application/json application/xml application/rss+xml font/truetype font/opentype application/vnd.ms-fontobject image/svg+xml; | |
|
60 | gzip_vary on; | |
|
61 | gzip_disable "msie6"; | |
|
62 | expires 60d; | |
|
63 | ||
|
64 | alias /var/opt/rhodecode_data/static; | |
|
65 | } | |
|
66 | ||
|
67 | ## channelstream location handler, if channelstream live chat and notifications | |
|
68 | ## are enable this will proxy the requests to channelstream websocket server | |
|
69 | location /_channelstream { | |
|
70 | rewrite /_channelstream/(.*) /$1 break; | |
|
71 | gzip off; | |
|
72 | tcp_nodelay off; | |
|
73 | ||
|
74 | proxy_connect_timeout 10; | |
|
75 | proxy_send_timeout 10m; | |
|
76 | proxy_read_timeout 10m; | |
|
77 | ||
|
78 | proxy_set_header Host $host; | |
|
79 | proxy_set_header X-Real-IP $remote_addr; | |
|
80 | proxy_set_header X-Url-Scheme $scheme; | |
|
81 | proxy_set_header X-Forwarded-Proto $scheme; | |
|
82 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; | |
|
83 | ||
|
84 | proxy_http_version 1.1; | |
|
85 | proxy_set_header Upgrade $http_upgrade; | |
|
86 | proxy_set_header Connection "upgrade"; | |
|
87 | ||
|
88 | proxy_pass http://channelstream:9800; | |
|
89 | } | |
|
90 | ||
|
91 | ## rate limit this endpoint to prevent login page brute-force attacks | |
|
92 | location /_admin/login { | |
|
93 | limit_req zone=http_req_limit burst=10 nodelay; | |
|
94 | try_files $uri @rhodecode_http; | |
|
95 | } | |
|
96 | ||
|
97 | location / { | |
|
98 | include /etc/nginx/proxy.conf; | |
|
99 | try_files $uri @rhodecode_http; | |
|
100 | } | |
|
101 | ||
|
102 | location @rhodecode_http { | |
|
103 | include /etc/nginx/proxy.conf; | |
|
104 | proxy_pass http://rhodecode:10020; | |
|
105 | } | |
|
106 | ||
|
107 | ## Custom 502 error page. | |
|
108 | ## Will be displayed while RhodeCode server is turned off | |
|
109 | error_page 502 /502.html; | |
|
110 | location = /502.html { | |
|
111 | root /var/opt/static; | |
|
112 | } | |
|
113 | ||
|
114 | } |
@@ -0,0 +1,30 b'' | |||
|
1 | user root; | |
|
2 | worker_processes 2; | |
|
3 | ||
|
4 | pid /var/run/nginx.pid; | |
|
5 | error_log /var/log/rhodecode/nginx/default.error.log warn; | |
|
6 | ||
|
7 | events { | |
|
8 | worker_connections 1024; | |
|
9 | # multi_accept on; | |
|
10 | } | |
|
11 | ||
|
12 | http { | |
|
13 | include /etc/nginx/mime.types; | |
|
14 | default_type application/octet-stream; | |
|
15 | ||
|
16 | log_format main '$remote_addr - $remote_user [$time_local] ' | |
|
17 | '"$request" $status $body_bytes_sent ' | |
|
18 | '"$http_referer" "$http_user_agent" ' | |
|
19 | '$request_time $upstream_response_time $pipe'; | |
|
20 | ||
|
21 | access_log /var/log/rhodecode/nginx/default.access.log main; | |
|
22 | ||
|
23 | sendfile on; | |
|
24 | tcp_nopush on; | |
|
25 | tcp_nodelay on; | |
|
26 | keepalive_timeout 65; | |
|
27 | types_hash_max_size 2048; | |
|
28 | ||
|
29 | include /etc/nginx/sites-enabled/*.conf; | |
|
30 | } |
@@ -0,0 +1,36 b'' | |||
|
1 | proxy_redirect off; | |
|
2 | proxy_set_header Host $http_host; | |
|
3 | ||
|
4 | ## If you use HTTPS make sure you disable gzip compression | |
|
5 | ## to be safe against BREACH attack. | |
|
6 | gzip off; | |
|
7 | ||
|
8 | # Don't buffer requests in NGINX stream them using chunked-encoding | |
|
9 | proxy_buffering off; | |
|
10 | ||
|
11 | ## This is also required for later GIT to use streaming. | |
|
12 | ## Works only for Nginx 1.7.11 and newer | |
|
13 | proxy_request_buffering off; | |
|
14 | proxy_http_version 1.1; | |
|
15 | ||
|
16 | ## Set this to a larger number if you experience timeouts | |
|
17 | ## or 413 Request Entity Too Large, 10GB is enough for most cases | |
|
18 | client_max_body_size 10240m; | |
|
19 | ||
|
20 | ## needed for container auth | |
|
21 | # proxy_set_header REMOTE_USER $remote_user; | |
|
22 | # proxy_set_header X-Forwarded-User $remote_user; | |
|
23 | ||
|
24 | proxy_set_header X-Url-Scheme $scheme; | |
|
25 | proxy_set_header X-Host $http_host; | |
|
26 | proxy_set_header X-Real-IP $remote_addr; | |
|
27 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; | |
|
28 | proxy_set_header Proxy-host $proxy_host; | |
|
29 | ||
|
30 | proxy_connect_timeout 7200; | |
|
31 | proxy_send_timeout 7200; | |
|
32 | proxy_read_timeout 7200; | |
|
33 | proxy_buffers 8 32k; | |
|
34 | ||
|
35 | add_header X-Frame-Options SAMEORIGIN; | |
|
36 | add_header Strict-Transport-Security "max-age=31536000; includeSubdomains;"; |
@@ -0,0 +1,13 b'' | |||
|
1 | FROM library/nginx:1.19.6 | |
|
2 | ||
|
3 | ENV NGINX_ENTRYPOINT_QUIET_LOGS=1 | |
|
4 | ||
|
5 | RUN mkdir -p /etc/nginx/sites-enabled/ | |
|
6 | RUN mkdir -p /var/log/rhodecode/nginx | |
|
7 | COPY service/nginx/nginx.conf /etc/nginx/nginx.conf | |
|
8 | COPY service/nginx/http.conf /etc/nginx/sites-enabled/http.conf | |
|
9 | COPY service/nginx/proxy.conf /etc/nginx/proxy.conf | |
|
10 | ||
|
11 | VOLUME /var/log/rhodecode | |
|
12 | ||
|
13 | #TODO enable amplify No newline at end of file |
This diff has been collapsed as it changes many lines, (1865 lines changed) Show them Hide them | |||
@@ -0,0 +1,1865 b'' | |||
|
1 | # Redis configuration file example. | |
|
2 | # | |
|
3 | # Note that in order to read the configuration file, Redis must be | |
|
4 | # started with the file path as first argument: | |
|
5 | # | |
|
6 | # ./redis-server /path/to/redis.conf | |
|
7 | ||
|
8 | # Note on units: when memory size is needed, it is possible to specify | |
|
9 | # it in the usual form of 1k 5GB 4M and so forth: | |
|
10 | # | |
|
11 | # 1k => 1000 bytes | |
|
12 | # 1kb => 1024 bytes | |
|
13 | # 1m => 1000000 bytes | |
|
14 | # 1mb => 1024*1024 bytes | |
|
15 | # 1g => 1000000000 bytes | |
|
16 | # 1gb => 1024*1024*1024 bytes | |
|
17 | # | |
|
18 | # units are case insensitive so 1GB 1Gb 1gB are all the same. | |
|
19 | ||
|
20 | ################################## INCLUDES ################################### | |
|
21 | ||
|
22 | # Include one or more other config files here. This is useful if you | |
|
23 | # have a standard template that goes to all Redis servers but also need | |
|
24 | # to customize a few per-server settings. Include files can include | |
|
25 | # other files, so use this wisely. | |
|
26 | # | |
|
27 | # Note that option "include" won't be rewritten by command "CONFIG REWRITE" | |
|
28 | # from admin or Redis Sentinel. Since Redis always uses the last processed | |
|
29 | # line as value of a configuration directive, you'd better put includes | |
|
30 | # at the beginning of this file to avoid overwriting config change at runtime. | |
|
31 | # | |
|
32 | # If instead you are interested in using includes to override configuration | |
|
33 | # options, it is better to use include as the last line. | |
|
34 | # | |
|
35 | # include /path/to/local.conf | |
|
36 | # include /path/to/other.conf | |
|
37 | ||
|
38 | ################################## MODULES ##################################### | |
|
39 | ||
|
40 | # Load modules at startup. If the server is not able to load modules | |
|
41 | # it will abort. It is possible to use multiple loadmodule directives. | |
|
42 | # | |
|
43 | # loadmodule /path/to/my_module.so | |
|
44 | # loadmodule /path/to/other_module.so | |
|
45 | ||
|
46 | ################################## NETWORK ##################################### | |
|
47 | ||
|
48 | # By default, if no "bind" configuration directive is specified, Redis listens | |
|
49 | # for connections from all available network interfaces on the host machine. | |
|
50 | # It is possible to listen to just one or multiple selected interfaces using | |
|
51 | # the "bind" configuration directive, followed by one or more IP addresses. | |
|
52 | # | |
|
53 | # Examples: | |
|
54 | # | |
|
55 | # bind 192.168.1.100 10.0.0.1 | |
|
56 | # bind 127.0.0.1 ::1 | |
|
57 | # | |
|
58 | # ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the | |
|
59 | # internet, binding to all the interfaces is dangerous and will expose the | |
|
60 | # instance to everybody on the internet. So by default we uncomment the | |
|
61 | # following bind directive, that will force Redis to listen only on the | |
|
62 | # IPv4 loopback interface address (this means Redis will only be able to | |
|
63 | # accept client connections from the same host that it is running on). | |
|
64 | # | |
|
65 | # IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES | |
|
66 | # JUST COMMENT OUT THE FOLLOWING LINE. | |
|
67 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |
|
68 | #bind 127.0.0.1 | |
|
69 | ||
|
70 | # Protected mode is a layer of security protection, in order to avoid that | |
|
71 | # Redis instances left open on the internet are accessed and exploited. | |
|
72 | # | |
|
73 | # When protected mode is on and if: | |
|
74 | # | |
|
75 | # 1) The server is not binding explicitly to a set of addresses using the | |
|
76 | # "bind" directive. | |
|
77 | # 2) No password is configured. | |
|
78 | # | |
|
79 | # The server only accepts connections from clients connecting from the | |
|
80 | # IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain | |
|
81 | # sockets. | |
|
82 | # | |
|
83 | # By default protected mode is enabled. You should disable it only if | |
|
84 | # you are sure you want clients from other hosts to connect to Redis | |
|
85 | # even if no authentication is configured, nor a specific set of interfaces | |
|
86 | # are explicitly listed using the "bind" directive. | |
|
87 | protected-mode no | |
|
88 | ||
|
89 | # Accept connections on the specified port, default is 6379 (IANA #815344). | |
|
90 | # If port 0 is specified Redis will not listen on a TCP socket. | |
|
91 | port 6379 | |
|
92 | ||
|
93 | # TCP listen() backlog. | |
|
94 | # | |
|
95 | # In high requests-per-second environments you need a high backlog in order | |
|
96 | # to avoid slow clients connection issues. Note that the Linux kernel | |
|
97 | # will silently truncate it to the value of /proc/sys/net/core/somaxconn so | |
|
98 | # make sure to raise both the value of somaxconn and tcp_max_syn_backlog | |
|
99 | # in order to get the desired effect. | |
|
100 | tcp-backlog 511 | |
|
101 | ||
|
102 | # Unix socket. | |
|
103 | # | |
|
104 | # Specify the path for the Unix socket that will be used to listen for | |
|
105 | # incoming connections. There is no default, so Redis will not listen | |
|
106 | # on a unix socket when not specified. | |
|
107 | # | |
|
108 | # unixsocket /tmp/redis.sock | |
|
109 | # unixsocketperm 700 | |
|
110 | ||
|
111 | # Close the connection after a client is idle for N seconds (0 to disable) | |
|
112 | timeout 0 | |
|
113 | ||
|
114 | # TCP keepalive. | |
|
115 | # | |
|
116 | # If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence | |
|
117 | # of communication. This is useful for two reasons: | |
|
118 | # | |
|
119 | # 1) Detect dead peers. | |
|
120 | # 2) Force network equipment in the middle to consider the connection to be | |
|
121 | # alive. | |
|
122 | # | |
|
123 | # On Linux, the specified value (in seconds) is the period used to send ACKs. | |
|
124 | # Note that to close the connection the double of the time is needed. | |
|
125 | # On other kernels the period depends on the kernel configuration. | |
|
126 | # | |
|
127 | # A reasonable value for this option is 300 seconds, which is the new | |
|
128 | # Redis default starting with Redis 3.2.1. | |
|
129 | tcp-keepalive 300 | |
|
130 | ||
|
131 | ################################# TLS/SSL ##################################### | |
|
132 | ||
|
133 | # By default, TLS/SSL is disabled. To enable it, the "tls-port" configuration | |
|
134 | # directive can be used to define TLS-listening ports. To enable TLS on the | |
|
135 | # default port, use: | |
|
136 | # | |
|
137 | # port 0 | |
|
138 | # tls-port 6379 | |
|
139 | ||
|
140 | # Configure a X.509 certificate and private key to use for authenticating the | |
|
141 | # server to connected clients, masters or cluster peers. These files should be | |
|
142 | # PEM formatted. | |
|
143 | # | |
|
144 | # tls-cert-file redis.crt | |
|
145 | # tls-key-file redis.key | |
|
146 | ||
|
147 | # Configure a DH parameters file to enable Diffie-Hellman (DH) key exchange: | |
|
148 | # | |
|
149 | # tls-dh-params-file redis.dh | |
|
150 | ||
|
151 | # Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL | |
|
152 | # clients and peers. Redis requires an explicit configuration of at least one | |
|
153 | # of these, and will not implicitly use the system wide configuration. | |
|
154 | # | |
|
155 | # tls-ca-cert-file ca.crt | |
|
156 | # tls-ca-cert-dir /etc/ssl/certs | |
|
157 | ||
|
158 | # By default, clients (including replica servers) on a TLS port are required | |
|
159 | # to authenticate using valid client side certificates. | |
|
160 | # | |
|
161 | # If "no" is specified, client certificates are not required and not accepted. | |
|
162 | # If "optional" is specified, client certificates are accepted and must be | |
|
163 | # valid if provided, but are not required. | |
|
164 | # | |
|
165 | # tls-auth-clients no | |
|
166 | # tls-auth-clients optional | |
|
167 | ||
|
168 | # By default, a Redis replica does not attempt to establish a TLS connection | |
|
169 | # with its master. | |
|
170 | # | |
|
171 | # Use the following directive to enable TLS on replication links. | |
|
172 | # | |
|
173 | # tls-replication yes | |
|
174 | ||
|
175 | # By default, the Redis Cluster bus uses a plain TCP connection. To enable | |
|
176 | # TLS for the bus protocol, use the following directive: | |
|
177 | # | |
|
178 | # tls-cluster yes | |
|
179 | ||
|
180 | # Explicitly specify TLS versions to support. Allowed values are case insensitive | |
|
181 | # and include "TLSv1", "TLSv1.1", "TLSv1.2", "TLSv1.3" (OpenSSL >= 1.1.1) or | |
|
182 | # any combination. To enable only TLSv1.2 and TLSv1.3, use: | |
|
183 | # | |
|
184 | # tls-protocols "TLSv1.2 TLSv1.3" | |
|
185 | ||
|
186 | # Configure allowed ciphers. See the ciphers(1ssl) manpage for more information | |
|
187 | # about the syntax of this string. | |
|
188 | # | |
|
189 | # Note: this configuration applies only to <= TLSv1.2. | |
|
190 | # | |
|
191 | # tls-ciphers DEFAULT:!MEDIUM | |
|
192 | ||
|
193 | # Configure allowed TLSv1.3 ciphersuites. See the ciphers(1ssl) manpage for more | |
|
194 | # information about the syntax of this string, and specifically for TLSv1.3 | |
|
195 | # ciphersuites. | |
|
196 | # | |
|
197 | # tls-ciphersuites TLS_CHACHA20_POLY1305_SHA256 | |
|
198 | ||
|
199 | # When choosing a cipher, use the server's preference instead of the client | |
|
200 | # preference. By default, the server follows the client's preference. | |
|
201 | # | |
|
202 | # tls-prefer-server-ciphers yes | |
|
203 | ||
|
204 | # By default, TLS session caching is enabled to allow faster and less expensive | |
|
205 | # reconnections by clients that support it. Use the following directive to disable | |
|
206 | # caching. | |
|
207 | # | |
|
208 | # tls-session-caching no | |
|
209 | ||
|
210 | # Change the default number of TLS sessions cached. A zero value sets the cache | |
|
211 | # to unlimited size. The default size is 20480. | |
|
212 | # | |
|
213 | # tls-session-cache-size 5000 | |
|
214 | ||
|
215 | # Change the default timeout of cached TLS sessions. The default timeout is 300 | |
|
216 | # seconds. | |
|
217 | # | |
|
218 | # tls-session-cache-timeout 60 | |
|
219 | ||
|
220 | ################################# GENERAL ##################################### | |
|
221 | ||
|
222 | # By default Redis does not run as a daemon. Use 'yes' if you need it. | |
|
223 | # Note that Redis will write a pid file in /var/run/redis.pid when daemonized. | |
|
224 | daemonize no | |
|
225 | ||
|
226 | # If you run Redis from upstart or systemd, Redis can interact with your | |
|
227 | # supervision tree. Options: | |
|
228 | # supervised no - no supervision interaction | |
|
229 | # supervised upstart - signal upstart by putting Redis into SIGSTOP mode | |
|
230 | # requires "expect stop" in your upstart job config | |
|
231 | # supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET | |
|
232 | # supervised auto - detect upstart or systemd method based on | |
|
233 | # UPSTART_JOB or NOTIFY_SOCKET environment variables | |
|
234 | # Note: these supervision methods only signal "process is ready." | |
|
235 | # They do not enable continuous pings back to your supervisor. | |
|
236 | supervised no | |
|
237 | ||
|
238 | # If a pid file is specified, Redis writes it where specified at startup | |
|
239 | # and removes it at exit. | |
|
240 | # | |
|
241 | # When the server runs non daemonized, no pid file is created if none is | |
|
242 | # specified in the configuration. When the server is daemonized, the pid file | |
|
243 | # is used even if not specified, defaulting to "/var/run/redis.pid". | |
|
244 | # | |
|
245 | # Creating a pid file is best effort: if Redis is not able to create it | |
|
246 | # nothing bad happens, the server will start and run normally. | |
|
247 | pidfile /var/run/redis_6379.pid | |
|
248 | ||
|
249 | # Specify the server verbosity level. | |
|
250 | # This can be one of: | |
|
251 | # debug (a lot of information, useful for development/testing) | |
|
252 | # verbose (many rarely useful info, but not a mess like the debug level) | |
|
253 | # notice (moderately verbose, what you want in production probably) | |
|
254 | # warning (only very important / critical messages are logged) | |
|
255 | loglevel notice | |
|
256 | ||
|
257 | # Specify the log file name. Also the empty string can be used to force | |
|
258 | # Redis to log on the standard output. Note that if you use standard | |
|
259 | # output for logging but daemonize, logs will be sent to /dev/null | |
|
260 | logfile "" | |
|
261 | ||
|
262 | # To enable logging to the system logger, just set 'syslog-enabled' to yes, | |
|
263 | # and optionally update the other syslog parameters to suit your needs. | |
|
264 | # syslog-enabled no | |
|
265 | ||
|
266 | # Specify the syslog identity. | |
|
267 | # syslog-ident redis | |
|
268 | ||
|
269 | # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. | |
|
270 | # syslog-facility local0 | |
|
271 | ||
|
272 | # Set the number of databases. The default database is DB 0, you can select | |
|
273 | # a different one on a per-connection basis using SELECT <dbid> where | |
|
274 | # dbid is a number between 0 and 'databases'-1 | |
|
275 | databases 16 | |
|
276 | ||
|
277 | # By default Redis shows an ASCII art logo only when started to log to the | |
|
278 | # standard output and if the standard output is a TTY. Basically this means | |
|
279 | # that normally a logo is displayed only in interactive sessions. | |
|
280 | # | |
|
281 | # However it is possible to force the pre-4.0 behavior and always show a | |
|
282 | # ASCII art logo in startup logs by setting the following option to yes. | |
|
283 | always-show-logo yes | |
|
284 | ||
|
285 | ################################ SNAPSHOTTING ################################ | |
|
286 | # | |
|
287 | # Save the DB on disk: | |
|
288 | # | |
|
289 | # save <seconds> <changes> | |
|
290 | # | |
|
291 | # Will save the DB if both the given number of seconds and the given | |
|
292 | # number of write operations against the DB occurred. | |
|
293 | # | |
|
294 | # In the example below the behavior will be to save: | |
|
295 | # after 900 sec (15 min) if at least 1 key changed | |
|
296 | # after 300 sec (5 min) if at least 10 keys changed | |
|
297 | # after 60 sec if at least 10000 keys changed | |
|
298 | # | |
|
299 | # Note: you can disable saving completely by commenting out all "save" lines. | |
|
300 | # | |
|
301 | # It is also possible to remove all the previously configured save | |
|
302 | # points by adding a save directive with a single empty string argument | |
|
303 | # like in the following example: | |
|
304 | # | |
|
305 | # save "" | |
|
306 | ||
|
307 | save 900 1 | |
|
308 | save 300 10 | |
|
309 | save 60 10000 | |
|
310 | ||
|
311 | # By default Redis will stop accepting writes if RDB snapshots are enabled | |
|
312 | # (at least one save point) and the latest background save failed. | |
|
313 | # This will make the user aware (in a hard way) that data is not persisting | |
|
314 | # on disk properly, otherwise chances are that no one will notice and some | |
|
315 | # disaster will happen. | |
|
316 | # | |
|
317 | # If the background saving process will start working again Redis will | |
|
318 | # automatically allow writes again. | |
|
319 | # | |
|
320 | # However if you have setup your proper monitoring of the Redis server | |
|
321 | # and persistence, you may want to disable this feature so that Redis will | |
|
322 | # continue to work as usual even if there are problems with disk, | |
|
323 | # permissions, and so forth. | |
|
324 | stop-writes-on-bgsave-error yes | |
|
325 | ||
|
326 | # Compress string objects using LZF when dump .rdb databases? | |
|
327 | # By default compression is enabled as it's almost always a win. | |
|
328 | # If you want to save some CPU in the saving child set it to 'no' but | |
|
329 | # the dataset will likely be bigger if you have compressible values or keys. | |
|
330 | rdbcompression yes | |
|
331 | ||
|
332 | # Since version 5 of RDB a CRC64 checksum is placed at the end of the file. | |
|
333 | # This makes the format more resistant to corruption but there is a performance | |
|
334 | # hit to pay (around 10%) when saving and loading RDB files, so you can disable it | |
|
335 | # for maximum performances. | |
|
336 | # | |
|
337 | # RDB files created with checksum disabled have a checksum of zero that will | |
|
338 | # tell the loading code to skip the check. | |
|
339 | rdbchecksum yes | |
|
340 | ||
|
341 | # The filename where to dump the DB | |
|
342 | dbfilename dump.rdb | |
|
343 | ||
|
344 | # Remove RDB files used by replication in instances without persistence | |
|
345 | # enabled. By default this option is disabled, however there are environments | |
|
346 | # where for regulations or other security concerns, RDB files persisted on | |
|
347 | # disk by masters in order to feed replicas, or stored on disk by replicas | |
|
348 | # in order to load them for the initial synchronization, should be deleted | |
|
349 | # ASAP. Note that this option ONLY WORKS in instances that have both AOF | |
|
350 | # and RDB persistence disabled, otherwise is completely ignored. | |
|
351 | # | |
|
352 | # An alternative (and sometimes better) way to obtain the same effect is | |
|
353 | # to use diskless replication on both master and replicas instances. However | |
|
354 | # in the case of replicas, diskless is not always an option. | |
|
355 | rdb-del-sync-files no | |
|
356 | ||
|
357 | # The working directory. | |
|
358 | # | |
|
359 | # The DB will be written inside this directory, with the filename specified | |
|
360 | # above using the 'dbfilename' configuration directive. | |
|
361 | # | |
|
362 | # The Append Only File will also be created inside this directory. | |
|
363 | # | |
|
364 | # Note that you must specify a directory here, not a file name. | |
|
365 | dir ./ | |
|
366 | ||
|
367 | ################################# REPLICATION ################################# | |
|
368 | ||
|
369 | # Master-Replica replication. Use replicaof to make a Redis instance a copy of | |
|
370 | # another Redis server. A few things to understand ASAP about Redis replication. | |
|
371 | # | |
|
372 | # +------------------+ +---------------+ | |
|
373 | # | Master | ---> | Replica | | |
|
374 | # | (receive writes) | | (exact copy) | | |
|
375 | # +------------------+ +---------------+ | |
|
376 | # | |
|
377 | # 1) Redis replication is asynchronous, but you can configure a master to | |
|
378 | # stop accepting writes if it appears to be not connected with at least | |
|
379 | # a given number of replicas. | |
|
380 | # 2) Redis replicas are able to perform a partial resynchronization with the | |
|
381 | # master if the replication link is lost for a relatively small amount of | |
|
382 | # time. You may want to configure the replication backlog size (see the next | |
|
383 | # sections of this file) with a sensible value depending on your needs. | |
|
384 | # 3) Replication is automatic and does not need user intervention. After a | |
|
385 | # network partition replicas automatically try to reconnect to masters | |
|
386 | # and resynchronize with them. | |
|
387 | # | |
|
388 | # replicaof <masterip> <masterport> | |
|
389 | ||
|
390 | # If the master is password protected (using the "requirepass" configuration | |
|
391 | # directive below) it is possible to tell the replica to authenticate before | |
|
392 | # starting the replication synchronization process, otherwise the master will | |
|
393 | # refuse the replica request. | |
|
394 | # | |
|
395 | # masterauth <master-password> | |
|
396 | # | |
|
397 | # However this is not enough if you are using Redis ACLs (for Redis version | |
|
398 | # 6 or greater), and the default user is not capable of running the PSYNC | |
|
399 | # command and/or other commands needed for replication. In this case it's | |
|
400 | # better to configure a special user to use with replication, and specify the | |
|
401 | # masteruser configuration as such: | |
|
402 | # | |
|
403 | # masteruser <username> | |
|
404 | # | |
|
405 | # When masteruser is specified, the replica will authenticate against its | |
|
406 | # master using the new AUTH form: AUTH <username> <password>. | |
|
407 | ||
|
408 | # When a replica loses its connection with the master, or when the replication | |
|
409 | # is still in progress, the replica can act in two different ways: | |
|
410 | # | |
|
411 | # 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will | |
|
412 | # still reply to client requests, possibly with out of date data, or the | |
|
413 | # data set may just be empty if this is the first synchronization. | |
|
414 | # | |
|
415 | # 2) If replica-serve-stale-data is set to 'no' the replica will reply with | |
|
416 | # an error "SYNC with master in progress" to all commands except: | |
|
417 | # INFO, REPLICAOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG, SUBSCRIBE, | |
|
418 | # UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, COMMAND, POST, | |
|
419 | # HOST and LATENCY. | |
|
420 | # | |
|
421 | replica-serve-stale-data yes | |
|
422 | ||
|
423 | # You can configure a replica instance to accept writes or not. Writing against | |
|
424 | # a replica instance may be useful to store some ephemeral data (because data | |
|
425 | # written on a replica will be easily deleted after resync with the master) but | |
|
426 | # may also cause problems if clients are writing to it because of a | |
|
427 | # misconfiguration. | |
|
428 | # | |
|
429 | # Since Redis 2.6 by default replicas are read-only. | |
|
430 | # | |
|
431 | # Note: read only replicas are not designed to be exposed to untrusted clients | |
|
432 | # on the internet. It's just a protection layer against misuse of the instance. | |
|
433 | # Still a read only replica exports by default all the administrative commands | |
|
434 | # such as CONFIG, DEBUG, and so forth. To a limited extent you can improve | |
|
435 | # security of read only replicas using 'rename-command' to shadow all the | |
|
436 | # administrative / dangerous commands. | |
|
437 | replica-read-only yes | |
|
438 | ||
|
439 | # Replication SYNC strategy: disk or socket. | |
|
440 | # | |
|
441 | # New replicas and reconnecting replicas that are not able to continue the | |
|
442 | # replication process just receiving differences, need to do what is called a | |
|
443 | # "full synchronization". An RDB file is transmitted from the master to the | |
|
444 | # replicas. | |
|
445 | # | |
|
446 | # The transmission can happen in two different ways: | |
|
447 | # | |
|
448 | # 1) Disk-backed: The Redis master creates a new process that writes the RDB | |
|
449 | # file on disk. Later the file is transferred by the parent | |
|
450 | # process to the replicas incrementally. | |
|
451 | # 2) Diskless: The Redis master creates a new process that directly writes the | |
|
452 | # RDB file to replica sockets, without touching the disk at all. | |
|
453 | # | |
|
454 | # With disk-backed replication, while the RDB file is generated, more replicas | |
|
455 | # can be queued and served with the RDB file as soon as the current child | |
|
456 | # producing the RDB file finishes its work. With diskless replication instead | |
|
457 | # once the transfer starts, new replicas arriving will be queued and a new | |
|
458 | # transfer will start when the current one terminates. | |
|
459 | # | |
|
460 | # When diskless replication is used, the master waits a configurable amount of | |
|
461 | # time (in seconds) before starting the transfer in the hope that multiple | |
|
462 | # replicas will arrive and the transfer can be parallelized. | |
|
463 | # | |
|
464 | # With slow disks and fast (large bandwidth) networks, diskless replication | |
|
465 | # works better. | |
|
466 | repl-diskless-sync no | |
|
467 | ||
|
468 | # When diskless replication is enabled, it is possible to configure the delay | |
|
469 | # the server waits in order to spawn the child that transfers the RDB via socket | |
|
470 | # to the replicas. | |
|
471 | # | |
|
472 | # This is important since once the transfer starts, it is not possible to serve | |
|
473 | # new replicas arriving, that will be queued for the next RDB transfer, so the | |
|
474 | # server waits a delay in order to let more replicas arrive. | |
|
475 | # | |
|
476 | # The delay is specified in seconds, and by default is 5 seconds. To disable | |
|
477 | # it entirely just set it to 0 seconds and the transfer will start ASAP. | |
|
478 | repl-diskless-sync-delay 5 | |
|
479 | ||
|
480 | # ----------------------------------------------------------------------------- | |
|
481 | # WARNING: RDB diskless load is experimental. Since in this setup the replica | |
|
482 | # does not immediately store an RDB on disk, it may cause data loss during | |
|
483 | # failovers. RDB diskless load + Redis modules not handling I/O reads may also | |
|
484 | # cause Redis to abort in case of I/O errors during the initial synchronization | |
|
485 | # stage with the master. Use only if your do what you are doing. | |
|
486 | # ----------------------------------------------------------------------------- | |
|
487 | # | |
|
488 | # Replica can load the RDB it reads from the replication link directly from the | |
|
489 | # socket, or store the RDB to a file and read that file after it was completely | |
|
490 | # received from the master. | |
|
491 | # | |
|
492 | # In many cases the disk is slower than the network, and storing and loading | |
|
493 | # the RDB file may increase replication time (and even increase the master's | |
|
494 | # Copy on Write memory and salve buffers). | |
|
495 | # However, parsing the RDB file directly from the socket may mean that we have | |
|
496 | # to flush the contents of the current database before the full rdb was | |
|
497 | # received. For this reason we have the following options: | |
|
498 | # | |
|
499 | # "disabled" - Don't use diskless load (store the rdb file to the disk first) | |
|
500 | # "on-empty-db" - Use diskless load only when it is completely safe. | |
|
501 | # "swapdb" - Keep a copy of the current db contents in RAM while parsing | |
|
502 | # the data directly from the socket. note that this requires | |
|
503 | # sufficient memory, if you don't have it, you risk an OOM kill. | |
|
504 | repl-diskless-load disabled | |
|
505 | ||
|
506 | # Replicas send PINGs to server in a predefined interval. It's possible to | |
|
507 | # change this interval with the repl_ping_replica_period option. The default | |
|
508 | # value is 10 seconds. | |
|
509 | # | |
|
510 | # repl-ping-replica-period 10 | |
|
511 | ||
|
512 | # The following option sets the replication timeout for: | |
|
513 | # | |
|
514 | # 1) Bulk transfer I/O during SYNC, from the point of view of replica. | |
|
515 | # 2) Master timeout from the point of view of replicas (data, pings). | |
|
516 | # 3) Replica timeout from the point of view of masters (REPLCONF ACK pings). | |
|
517 | # | |
|
518 | # It is important to make sure that this value is greater than the value | |
|
519 | # specified for repl-ping-replica-period otherwise a timeout will be detected | |
|
520 | # every time there is low traffic between the master and the replica. The default | |
|
521 | # value is 60 seconds. | |
|
522 | # | |
|
523 | # repl-timeout 60 | |
|
524 | ||
|
525 | # Disable TCP_NODELAY on the replica socket after SYNC? | |
|
526 | # | |
|
527 | # If you select "yes" Redis will use a smaller number of TCP packets and | |
|
528 | # less bandwidth to send data to replicas. But this can add a delay for | |
|
529 | # the data to appear on the replica side, up to 40 milliseconds with | |
|
530 | # Linux kernels using a default configuration. | |
|
531 | # | |
|
532 | # If you select "no" the delay for data to appear on the replica side will | |
|
533 | # be reduced but more bandwidth will be used for replication. | |
|
534 | # | |
|
535 | # By default we optimize for low latency, but in very high traffic conditions | |
|
536 | # or when the master and replicas are many hops away, turning this to "yes" may | |
|
537 | # be a good idea. | |
|
538 | repl-disable-tcp-nodelay no | |
|
539 | ||
|
540 | # Set the replication backlog size. The backlog is a buffer that accumulates | |
|
541 | # replica data when replicas are disconnected for some time, so that when a | |
|
542 | # replica wants to reconnect again, often a full resync is not needed, but a | |
|
543 | # partial resync is enough, just passing the portion of data the replica | |
|
544 | # missed while disconnected. | |
|
545 | # | |
|
546 | # The bigger the replication backlog, the longer the replica can endure the | |
|
547 | # disconnect and later be able to perform a partial resynchronization. | |
|
548 | # | |
|
549 | # The backlog is only allocated if there is at least one replica connected. | |
|
550 | # | |
|
551 | # repl-backlog-size 1mb | |
|
552 | ||
|
553 | # After a master has no connected replicas for some time, the backlog will be | |
|
554 | # freed. The following option configures the amount of seconds that need to | |
|
555 | # elapse, starting from the time the last replica disconnected, for the backlog | |
|
556 | # buffer to be freed. | |
|
557 | # | |
|
558 | # Note that replicas never free the backlog for timeout, since they may be | |
|
559 | # promoted to masters later, and should be able to correctly "partially | |
|
560 | # resynchronize" with other replicas: hence they should always accumulate backlog. | |
|
561 | # | |
|
562 | # A value of 0 means to never release the backlog. | |
|
563 | # | |
|
564 | # repl-backlog-ttl 3600 | |
|
565 | ||
|
566 | # The replica priority is an integer number published by Redis in the INFO | |
|
567 | # output. It is used by Redis Sentinel in order to select a replica to promote | |
|
568 | # into a master if the master is no longer working correctly. | |
|
569 | # | |
|
570 | # A replica with a low priority number is considered better for promotion, so | |
|
571 | # for instance if there are three replicas with priority 10, 100, 25 Sentinel | |
|
572 | # will pick the one with priority 10, that is the lowest. | |
|
573 | # | |
|
574 | # However a special priority of 0 marks the replica as not able to perform the | |
|
575 | # role of master, so a replica with priority of 0 will never be selected by | |
|
576 | # Redis Sentinel for promotion. | |
|
577 | # | |
|
578 | # By default the priority is 100. | |
|
579 | replica-priority 100 | |
|
580 | ||
|
581 | # It is possible for a master to stop accepting writes if there are less than | |
|
582 | # N replicas connected, having a lag less or equal than M seconds. | |
|
583 | # | |
|
584 | # The N replicas need to be in "online" state. | |
|
585 | # | |
|
586 | # The lag in seconds, that must be <= the specified value, is calculated from | |
|
587 | # the last ping received from the replica, that is usually sent every second. | |
|
588 | # | |
|
589 | # This option does not GUARANTEE that N replicas will accept the write, but | |
|
590 | # will limit the window of exposure for lost writes in case not enough replicas | |
|
591 | # are available, to the specified number of seconds. | |
|
592 | # | |
|
593 | # For example to require at least 3 replicas with a lag <= 10 seconds use: | |
|
594 | # | |
|
595 | # min-replicas-to-write 3 | |
|
596 | # min-replicas-max-lag 10 | |
|
597 | # | |
|
598 | # Setting one or the other to 0 disables the feature. | |
|
599 | # | |
|
600 | # By default min-replicas-to-write is set to 0 (feature disabled) and | |
|
601 | # min-replicas-max-lag is set to 10. | |
|
602 | ||
|
603 | # A Redis master is able to list the address and port of the attached | |
|
604 | # replicas in different ways. For example the "INFO replication" section | |
|
605 | # offers this information, which is used, among other tools, by | |
|
606 | # Redis Sentinel in order to discover replica instances. | |
|
607 | # Another place where this info is available is in the output of the | |
|
608 | # "ROLE" command of a master. | |
|
609 | # | |
|
610 | # The listed IP address and port normally reported by a replica is | |
|
611 | # obtained in the following way: | |
|
612 | # | |
|
613 | # IP: The address is auto detected by checking the peer address | |
|
614 | # of the socket used by the replica to connect with the master. | |
|
615 | # | |
|
616 | # Port: The port is communicated by the replica during the replication | |
|
617 | # handshake, and is normally the port that the replica is using to | |
|
618 | # listen for connections. | |
|
619 | # | |
|
620 | # However when port forwarding or Network Address Translation (NAT) is | |
|
621 | # used, the replica may actually be reachable via different IP and port | |
|
622 | # pairs. The following two options can be used by a replica in order to | |
|
623 | # report to its master a specific set of IP and port, so that both INFO | |
|
624 | # and ROLE will report those values. | |
|
625 | # | |
|
626 | # There is no need to use both the options if you need to override just | |
|
627 | # the port or the IP address. | |
|
628 | # | |
|
629 | # replica-announce-ip 5.5.5.5 | |
|
630 | # replica-announce-port 1234 | |
|
631 | ||
|
632 | ############################### KEYS TRACKING ################################# | |
|
633 | ||
|
634 | # Redis implements server assisted support for client side caching of values. | |
|
635 | # This is implemented using an invalidation table that remembers, using | |
|
636 | # 16 millions of slots, what clients may have certain subsets of keys. In turn | |
|
637 | # this is used in order to send invalidation messages to clients. Please | |
|
638 | # check this page to understand more about the feature: | |
|
639 | # | |
|
640 | # https://redis.io/topics/client-side-caching | |
|
641 | # | |
|
642 | # When tracking is enabled for a client, all the read only queries are assumed | |
|
643 | # to be cached: this will force Redis to store information in the invalidation | |
|
644 | # table. When keys are modified, such information is flushed away, and | |
|
645 | # invalidation messages are sent to the clients. However if the workload is | |
|
646 | # heavily dominated by reads, Redis could use more and more memory in order | |
|
647 | # to track the keys fetched by many clients. | |
|
648 | # | |
|
649 | # For this reason it is possible to configure a maximum fill value for the | |
|
650 | # invalidation table. By default it is set to 1M of keys, and once this limit | |
|
651 | # is reached, Redis will start to evict keys in the invalidation table | |
|
652 | # even if they were not modified, just to reclaim memory: this will in turn | |
|
653 | # force the clients to invalidate the cached values. Basically the table | |
|
654 | # maximum size is a trade off between the memory you want to spend server | |
|
655 | # side to track information about who cached what, and the ability of clients | |
|
656 | # to retain cached objects in memory. | |
|
657 | # | |
|
658 | # If you set the value to 0, it means there are no limits, and Redis will | |
|
659 | # retain as many keys as needed in the invalidation table. | |
|
660 | # In the "stats" INFO section, you can find information about the number of | |
|
661 | # keys in the invalidation table at every given moment. | |
|
662 | # | |
|
663 | # Note: when key tracking is used in broadcasting mode, no memory is used | |
|
664 | # in the server side so this setting is useless. | |
|
665 | # | |
|
666 | # tracking-table-max-keys 1000000 | |
|
667 | ||
|
668 | ################################## SECURITY ################################### | |
|
669 | ||
|
670 | # Warning: since Redis is pretty fast, an outside user can try up to | |
|
671 | # 1 million passwords per second against a modern box. This means that you | |
|
672 | # should use very strong passwords, otherwise they will be very easy to break. | |
|
673 | # Note that because the password is really a shared secret between the client | |
|
674 | # and the server, and should not be memorized by any human, the password | |
|
675 | # can be easily a long string from /dev/urandom or whatever, so by using a | |
|
676 | # long and unguessable password no brute force attack will be possible. | |
|
677 | ||
|
678 | # Redis ACL users are defined in the following format: | |
|
679 | # | |
|
680 | # user <username> ... acl rules ... | |
|
681 | # | |
|
682 | # For example: | |
|
683 | # | |
|
684 | # user worker +@list +@connection ~jobs:* on >ffa9203c493aa99 | |
|
685 | # | |
|
686 | # The special username "default" is used for new connections. If this user | |
|
687 | # has the "nopass" rule, then new connections will be immediately authenticated | |
|
688 | # as the "default" user without the need of any password provided via the | |
|
689 | # AUTH command. Otherwise if the "default" user is not flagged with "nopass" | |
|
690 | # the connections will start in not authenticated state, and will require | |
|
691 | # AUTH (or the HELLO command AUTH option) in order to be authenticated and | |
|
692 | # start to work. | |
|
693 | # | |
|
694 | # The ACL rules that describe what a user can do are the following: | |
|
695 | # | |
|
696 | # on Enable the user: it is possible to authenticate as this user. | |
|
697 | # off Disable the user: it's no longer possible to authenticate | |
|
698 | # with this user, however the already authenticated connections | |
|
699 | # will still work. | |
|
700 | # +<command> Allow the execution of that command | |
|
701 | # -<command> Disallow the execution of that command | |
|
702 | # +@<category> Allow the execution of all the commands in such category | |
|
703 | # with valid categories are like @admin, @set, @sortedset, ... | |
|
704 | # and so forth, see the full list in the server.c file where | |
|
705 | # the Redis command table is described and defined. | |
|
706 | # The special category @all means all the commands, but currently | |
|
707 | # present in the server, and that will be loaded in the future | |
|
708 | # via modules. | |
|
709 | # +<command>|subcommand Allow a specific subcommand of an otherwise | |
|
710 | # disabled command. Note that this form is not | |
|
711 | # allowed as negative like -DEBUG|SEGFAULT, but | |
|
712 | # only additive starting with "+". | |
|
713 | # allcommands Alias for +@all. Note that it implies the ability to execute | |
|
714 | # all the future commands loaded via the modules system. | |
|
715 | # nocommands Alias for -@all. | |
|
716 | # ~<pattern> Add a pattern of keys that can be mentioned as part of | |
|
717 | # commands. For instance ~* allows all the keys. The pattern | |
|
718 | # is a glob-style pattern like the one of KEYS. | |
|
719 | # It is possible to specify multiple patterns. | |
|
720 | # allkeys Alias for ~* | |
|
721 | # resetkeys Flush the list of allowed keys patterns. | |
|
722 | # ><password> Add this password to the list of valid password for the user. | |
|
723 | # For example >mypass will add "mypass" to the list. | |
|
724 | # This directive clears the "nopass" flag (see later). | |
|
725 | # <<password> Remove this password from the list of valid passwords. | |
|
726 | # nopass All the set passwords of the user are removed, and the user | |
|
727 | # is flagged as requiring no password: it means that every | |
|
728 | # password will work against this user. If this directive is | |
|
729 | # used for the default user, every new connection will be | |
|
730 | # immediately authenticated with the default user without | |
|
731 | # any explicit AUTH command required. Note that the "resetpass" | |
|
732 | # directive will clear this condition. | |
|
733 | # resetpass Flush the list of allowed passwords. Moreover removes the | |
|
734 | # "nopass" status. After "resetpass" the user has no associated | |
|
735 | # passwords and there is no way to authenticate without adding | |
|
736 | # some password (or setting it as "nopass" later). | |
|
737 | # reset Performs the following actions: resetpass, resetkeys, off, | |
|
738 | # -@all. The user returns to the same state it has immediately | |
|
739 | # after its creation. | |
|
740 | # | |
|
741 | # ACL rules can be specified in any order: for instance you can start with | |
|
742 | # passwords, then flags, or key patterns. However note that the additive | |
|
743 | # and subtractive rules will CHANGE MEANING depending on the ordering. | |
|
744 | # For instance see the following example: | |
|
745 | # | |
|
746 | # user alice on +@all -DEBUG ~* >somepassword | |
|
747 | # | |
|
748 | # This will allow "alice" to use all the commands with the exception of the | |
|
749 | # DEBUG command, since +@all added all the commands to the set of the commands | |
|
750 | # alice can use, and later DEBUG was removed. However if we invert the order | |
|
751 | # of two ACL rules the result will be different: | |
|
752 | # | |
|
753 | # user alice on -DEBUG +@all ~* >somepassword | |
|
754 | # | |
|
755 | # Now DEBUG was removed when alice had yet no commands in the set of allowed | |
|
756 | # commands, later all the commands are added, so the user will be able to | |
|
757 | # execute everything. | |
|
758 | # | |
|
759 | # Basically ACL rules are processed left-to-right. | |
|
760 | # | |
|
761 | # For more information about ACL configuration please refer to | |
|
762 | # the Redis web site at https://redis.io/topics/acl | |
|
763 | ||
|
764 | # ACL LOG | |
|
765 | # | |
|
766 | # The ACL Log tracks failed commands and authentication events associated | |
|
767 | # with ACLs. The ACL Log is useful to troubleshoot failed commands blocked | |
|
768 | # by ACLs. The ACL Log is stored in memory. You can reclaim memory with | |
|
769 | # ACL LOG RESET. Define the maximum entry length of the ACL Log below. | |
|
770 | acllog-max-len 128 | |
|
771 | ||
|
772 | # Using an external ACL file | |
|
773 | # | |
|
774 | # Instead of configuring users here in this file, it is possible to use | |
|
775 | # a stand-alone file just listing users. The two methods cannot be mixed: | |
|
776 | # if you configure users here and at the same time you activate the external | |
|
777 | # ACL file, the server will refuse to start. | |
|
778 | # | |
|
779 | # The format of the external ACL user file is exactly the same as the | |
|
780 | # format that is used inside redis.conf to describe users. | |
|
781 | # | |
|
782 | # aclfile /etc/redis/users.acl | |
|
783 | ||
|
784 | # IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatibility | |
|
785 | # layer on top of the new ACL system. The option effect will be just setting | |
|
786 | # the password for the default user. Clients will still authenticate using | |
|
787 | # AUTH <password> as usually, or more explicitly with AUTH default <password> | |
|
788 | # if they follow the new protocol: both will work. | |
|
789 | # | |
|
790 | # requirepass foobared | |
|
791 | ||
|
792 | # Command renaming (DEPRECATED). | |
|
793 | # | |
|
794 | # ------------------------------------------------------------------------ | |
|
795 | # WARNING: avoid using this option if possible. Instead use ACLs to remove | |
|
796 | # commands from the default user, and put them only in some admin user you | |
|
797 | # create for administrative purposes. | |
|
798 | # ------------------------------------------------------------------------ | |
|
799 | # | |
|
800 | # It is possible to change the name of dangerous commands in a shared | |
|
801 | # environment. For instance the CONFIG command may be renamed into something | |
|
802 | # hard to guess so that it will still be available for internal-use tools | |
|
803 | # but not available for general clients. | |
|
804 | # | |
|
805 | # Example: | |
|
806 | # | |
|
807 | # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 | |
|
808 | # | |
|
809 | # It is also possible to completely kill a command by renaming it into | |
|
810 | # an empty string: | |
|
811 | # | |
|
812 | # rename-command CONFIG "" | |
|
813 | # | |
|
814 | # Please note that changing the name of commands that are logged into the | |
|
815 | # AOF file or transmitted to replicas may cause problems. | |
|
816 | ||
|
817 | ################################### CLIENTS #################################### | |
|
818 | ||
|
819 | # Set the max number of connected clients at the same time. By default | |
|
820 | # this limit is set to 10000 clients, however if the Redis server is not | |
|
821 | # able to configure the process file limit to allow for the specified limit | |
|
822 | # the max number of allowed clients is set to the current file limit | |
|
823 | # minus 32 (as Redis reserves a few file descriptors for internal uses). | |
|
824 | # | |
|
825 | # Once the limit is reached Redis will close all the new connections sending | |
|
826 | # an error 'max number of clients reached'. | |
|
827 | # | |
|
828 | # IMPORTANT: When Redis Cluster is used, the max number of connections is also | |
|
829 | # shared with the cluster bus: every node in the cluster will use two | |
|
830 | # connections, one incoming and another outgoing. It is important to size the | |
|
831 | # limit accordingly in case of very large clusters. | |
|
832 | # | |
|
833 | # maxclients 10000 | |
|
834 | ||
|
835 | ############################## MEMORY MANAGEMENT ################################ | |
|
836 | ||
|
837 | # Set a memory usage limit to the specified amount of bytes. | |
|
838 | # When the memory limit is reached Redis will try to remove keys | |
|
839 | # according to the eviction policy selected (see maxmemory-policy). | |
|
840 | # | |
|
841 | # If Redis can't remove keys according to the policy, or if the policy is | |
|
842 | # set to 'noeviction', Redis will start to reply with errors to commands | |
|
843 | # that would use more memory, like SET, LPUSH, and so on, and will continue | |
|
844 | # to reply to read-only commands like GET. | |
|
845 | # | |
|
846 | # This option is usually useful when using Redis as an LRU or LFU cache, or to | |
|
847 | # set a hard memory limit for an instance (using the 'noeviction' policy). | |
|
848 | # | |
|
849 | # WARNING: If you have replicas attached to an instance with maxmemory on, | |
|
850 | # the size of the output buffers needed to feed the replicas are subtracted | |
|
851 | # from the used memory count, so that network problems / resyncs will | |
|
852 | # not trigger a loop where keys are evicted, and in turn the output | |
|
853 | # buffer of replicas is full with DELs of keys evicted triggering the deletion | |
|
854 | # of more keys, and so forth until the database is completely emptied. | |
|
855 | # | |
|
856 | # In short... if you have replicas attached it is suggested that you set a lower | |
|
857 | # limit for maxmemory so that there is some free RAM on the system for replica | |
|
858 | # output buffers (but this is not needed if the policy is 'noeviction'). | |
|
859 | # | |
|
860 | maxmemory 8192mb | |
|
861 | ||
|
862 | # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory | |
|
863 | # is reached. You can select one from the following behaviors: | |
|
864 | # | |
|
865 | # volatile-lru -> Evict using approximated LRU, only keys with an expire set. | |
|
866 | # allkeys-lru -> Evict any key using approximated LRU. | |
|
867 | # volatile-lfu -> Evict using approximated LFU, only keys with an expire set. | |
|
868 | # allkeys-lfu -> Evict any key using approximated LFU. | |
|
869 | # volatile-random -> Remove a random key having an expire set. | |
|
870 | # allkeys-random -> Remove a random key, any key. | |
|
871 | # volatile-ttl -> Remove the key with the nearest expire time (minor TTL) | |
|
872 | # noeviction -> Don't evict anything, just return an error on write operations. | |
|
873 | # | |
|
874 | # LRU means Least Recently Used | |
|
875 | # LFU means Least Frequently Used | |
|
876 | # | |
|
877 | # Both LRU, LFU and volatile-ttl are implemented using approximated | |
|
878 | # randomized algorithms. | |
|
879 | # | |
|
880 | # Note: with any of the above policies, Redis will return an error on write | |
|
881 | # operations, when there are no suitable keys for eviction. | |
|
882 | # | |
|
883 | # At the date of writing these commands are: set setnx setex append | |
|
884 | # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd | |
|
885 | # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby | |
|
886 | # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby | |
|
887 | # getset mset msetnx exec sort | |
|
888 | # | |
|
889 | # The default is: | |
|
890 | # | |
|
891 | maxmemory-policy volatile-lru | |
|
892 | ||
|
893 | # LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated | |
|
894 | # algorithms (in order to save memory), so you can tune it for speed or | |
|
895 | # accuracy. By default Redis will check five keys and pick the one that was | |
|
896 | # used least recently, you can change the sample size using the following | |
|
897 | # configuration directive. | |
|
898 | # | |
|
899 | # The default of 5 produces good enough results. 10 Approximates very closely | |
|
900 | # true LRU but costs more CPU. 3 is faster but not very accurate. | |
|
901 | # | |
|
902 | maxmemory-samples 5 | |
|
903 | ||
|
904 | # Starting from Redis 5, by default a replica will ignore its maxmemory setting | |
|
905 | # (unless it is promoted to master after a failover or manually). It means | |
|
906 | # that the eviction of keys will be just handled by the master, sending the | |
|
907 | # DEL commands to the replica as keys evict in the master side. | |
|
908 | # | |
|
909 | # This behavior ensures that masters and replicas stay consistent, and is usually | |
|
910 | # what you want, however if your replica is writable, or you want the replica | |
|
911 | # to have a different memory setting, and you are sure all the writes performed | |
|
912 | # to the replica are idempotent, then you may change this default (but be sure | |
|
913 | # to understand what you are doing). | |
|
914 | # | |
|
915 | # Note that since the replica by default does not evict, it may end using more | |
|
916 | # memory than the one set via maxmemory (there are certain buffers that may | |
|
917 | # be larger on the replica, or data structures may sometimes take more memory | |
|
918 | # and so forth). So make sure you monitor your replicas and make sure they | |
|
919 | # have enough memory to never hit a real out-of-memory condition before the | |
|
920 | # master hits the configured maxmemory setting. | |
|
921 | # | |
|
922 | # replica-ignore-maxmemory yes | |
|
923 | ||
|
924 | # Redis reclaims expired keys in two ways: upon access when those keys are | |
|
925 | # found to be expired, and also in background, in what is called the | |
|
926 | # "active expire key". The key space is slowly and interactively scanned | |
|
927 | # looking for expired keys to reclaim, so that it is possible to free memory | |
|
928 | # of keys that are expired and will never be accessed again in a short time. | |
|
929 | # | |
|
930 | # The default effort of the expire cycle will try to avoid having more than | |
|
931 | # ten percent of expired keys still in memory, and will try to avoid consuming | |
|
932 | # more than 25% of total memory and to add latency to the system. However | |
|
933 | # it is possible to increase the expire "effort" that is normally set to | |
|
934 | # "1", to a greater value, up to the value "10". At its maximum value the | |
|
935 | # system will use more CPU, longer cycles (and technically may introduce | |
|
936 | # more latency), and will tolerate less already expired keys still present | |
|
937 | # in the system. It's a tradeoff between memory, CPU and latency. | |
|
938 | # | |
|
939 | # active-expire-effort 1 | |
|
940 | ||
|
941 | ############################# LAZY FREEING #################################### | |
|
942 | ||
|
943 | # Redis has two primitives to delete keys. One is called DEL and is a blocking | |
|
944 | # deletion of the object. It means that the server stops processing new commands | |
|
945 | # in order to reclaim all the memory associated with an object in a synchronous | |
|
946 | # way. If the key deleted is associated with a small object, the time needed | |
|
947 | # in order to execute the DEL command is very small and comparable to most other | |
|
948 | # O(1) or O(log_N) commands in Redis. However if the key is associated with an | |
|
949 | # aggregated value containing millions of elements, the server can block for | |
|
950 | # a long time (even seconds) in order to complete the operation. | |
|
951 | # | |
|
952 | # For the above reasons Redis also offers non blocking deletion primitives | |
|
953 | # such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and | |
|
954 | # FLUSHDB commands, in order to reclaim memory in background. Those commands | |
|
955 | # are executed in constant time. Another thread will incrementally free the | |
|
956 | # object in the background as fast as possible. | |
|
957 | # | |
|
958 | # DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. | |
|
959 | # It's up to the design of the application to understand when it is a good | |
|
960 | # idea to use one or the other. However the Redis server sometimes has to | |
|
961 | # delete keys or flush the whole database as a side effect of other operations. | |
|
962 | # Specifically Redis deletes objects independently of a user call in the | |
|
963 | # following scenarios: | |
|
964 | # | |
|
965 | # 1) On eviction, because of the maxmemory and maxmemory policy configurations, | |
|
966 | # in order to make room for new data, without going over the specified | |
|
967 | # memory limit. | |
|
968 | # 2) Because of expire: when a key with an associated time to live (see the | |
|
969 | # EXPIRE command) must be deleted from memory. | |
|
970 | # 3) Because of a side effect of a command that stores data on a key that may | |
|
971 | # already exist. For example the RENAME command may delete the old key | |
|
972 | # content when it is replaced with another one. Similarly SUNIONSTORE | |
|
973 | # or SORT with STORE option may delete existing keys. The SET command | |
|
974 | # itself removes any old content of the specified key in order to replace | |
|
975 | # it with the specified string. | |
|
976 | # 4) During replication, when a replica performs a full resynchronization with | |
|
977 | # its master, the content of the whole database is removed in order to | |
|
978 | # load the RDB file just transferred. | |
|
979 | # | |
|
980 | # In all the above cases the default is to delete objects in a blocking way, | |
|
981 | # like if DEL was called. However you can configure each case specifically | |
|
982 | # in order to instead release memory in a non-blocking way like if UNLINK | |
|
983 | # was called, using the following configuration directives. | |
|
984 | ||
|
985 | lazyfree-lazy-eviction no | |
|
986 | lazyfree-lazy-expire no | |
|
987 | lazyfree-lazy-server-del no | |
|
988 | replica-lazy-flush no | |
|
989 | ||
|
990 | # It is also possible, for the case when to replace the user code DEL calls | |
|
991 | # with UNLINK calls is not easy, to modify the default behavior of the DEL | |
|
992 | # command to act exactly like UNLINK, using the following configuration | |
|
993 | # directive: | |
|
994 | ||
|
995 | lazyfree-lazy-user-del no | |
|
996 | ||
|
997 | ################################ THREADED I/O ################################# | |
|
998 | ||
|
999 | # Redis is mostly single threaded, however there are certain threaded | |
|
1000 | # operations such as UNLINK, slow I/O accesses and other things that are | |
|
1001 | # performed on side threads. | |
|
1002 | # | |
|
1003 | # Now it is also possible to handle Redis clients socket reads and writes | |
|
1004 | # in different I/O threads. Since especially writing is so slow, normally | |
|
1005 | # Redis users use pipelining in order to speed up the Redis performances per | |
|
1006 | # core, and spawn multiple instances in order to scale more. Using I/O | |
|
1007 | # threads it is possible to easily speedup two times Redis without resorting | |
|
1008 | # to pipelining nor sharding of the instance. | |
|
1009 | # | |
|
1010 | # By default threading is disabled, we suggest enabling it only in machines | |
|
1011 | # that have at least 4 or more cores, leaving at least one spare core. | |
|
1012 | # Using more than 8 threads is unlikely to help much. We also recommend using | |
|
1013 | # threaded I/O only if you actually have performance problems, with Redis | |
|
1014 | # instances being able to use a quite big percentage of CPU time, otherwise | |
|
1015 | # there is no point in using this feature. | |
|
1016 | # | |
|
1017 | # So for instance if you have a four cores boxes, try to use 2 or 3 I/O | |
|
1018 | # threads, if you have a 8 cores, try to use 6 threads. In order to | |
|
1019 | # enable I/O threads use the following configuration directive: | |
|
1020 | # | |
|
1021 | # io-threads 4 | |
|
1022 | # | |
|
1023 | # Setting io-threads to 1 will just use the main thread as usual. | |
|
1024 | # When I/O threads are enabled, we only use threads for writes, that is | |
|
1025 | # to thread the write(2) syscall and transfer the client buffers to the | |
|
1026 | # socket. However it is also possible to enable threading of reads and | |
|
1027 | # protocol parsing using the following configuration directive, by setting | |
|
1028 | # it to yes: | |
|
1029 | # | |
|
1030 | # io-threads-do-reads no | |
|
1031 | # | |
|
1032 | # Usually threading reads doesn't help much. | |
|
1033 | # | |
|
1034 | # NOTE 1: This configuration directive cannot be changed at runtime via | |
|
1035 | # CONFIG SET. Aso this feature currently does not work when SSL is | |
|
1036 | # enabled. | |
|
1037 | # | |
|
1038 | # NOTE 2: If you want to test the Redis speedup using redis-benchmark, make | |
|
1039 | # sure you also run the benchmark itself in threaded mode, using the | |
|
1040 | # --threads option to match the number of Redis threads, otherwise you'll not | |
|
1041 | # be able to notice the improvements. | |
|
1042 | ||
|
1043 | ############################ KERNEL OOM CONTROL ############################## | |
|
1044 | ||
|
1045 | # On Linux, it is possible to hint the kernel OOM killer on what processes | |
|
1046 | # should be killed first when out of memory. | |
|
1047 | # | |
|
1048 | # Enabling this feature makes Redis actively control the oom_score_adj value | |
|
1049 | # for all its processes, depending on their role. The default scores will | |
|
1050 | # attempt to have background child processes killed before all others, and | |
|
1051 | # replicas killed before masters. | |
|
1052 | ||
|
1053 | oom-score-adj no | |
|
1054 | ||
|
1055 | # When oom-score-adj is used, this directive controls the specific values used | |
|
1056 | # for master, replica and background child processes. Values range -1000 to | |
|
1057 | # 1000 (higher means more likely to be killed). | |
|
1058 | # | |
|
1059 | # Unprivileged processes (not root, and without CAP_SYS_RESOURCE capabilities) | |
|
1060 | # can freely increase their value, but not decrease it below its initial | |
|
1061 | # settings. | |
|
1062 | # | |
|
1063 | # Values are used relative to the initial value of oom_score_adj when the server | |
|
1064 | # starts. Because typically the initial value is 0, they will often match the | |
|
1065 | # absolute values. | |
|
1066 | ||
|
1067 | oom-score-adj-values 0 200 800 | |
|
1068 | ||
|
1069 | ############################## APPEND ONLY MODE ############################### | |
|
1070 | ||
|
1071 | # By default Redis asynchronously dumps the dataset on disk. This mode is | |
|
1072 | # good enough in many applications, but an issue with the Redis process or | |
|
1073 | # a power outage may result into a few minutes of writes lost (depending on | |
|
1074 | # the configured save points). | |
|
1075 | # | |
|
1076 | # The Append Only File is an alternative persistence mode that provides | |
|
1077 | # much better durability. For instance using the default data fsync policy | |
|
1078 | # (see later in the config file) Redis can lose just one second of writes in a | |
|
1079 | # dramatic event like a server power outage, or a single write if something | |
|
1080 | # wrong with the Redis process itself happens, but the operating system is | |
|
1081 | # still running correctly. | |
|
1082 | # | |
|
1083 | # AOF and RDB persistence can be enabled at the same time without problems. | |
|
1084 | # If the AOF is enabled on startup Redis will load the AOF, that is the file | |
|
1085 | # with the better durability guarantees. | |
|
1086 | # | |
|
1087 | # Please check http://redis.io/topics/persistence for more information. | |
|
1088 | ||
|
1089 | appendonly no | |
|
1090 | ||
|
1091 | # The name of the append only file (default: "appendonly.aof") | |
|
1092 | ||
|
1093 | appendfilename "appendonly.aof" | |
|
1094 | ||
|
1095 | # The fsync() call tells the Operating System to actually write data on disk | |
|
1096 | # instead of waiting for more data in the output buffer. Some OS will really flush | |
|
1097 | # data on disk, some other OS will just try to do it ASAP. | |
|
1098 | # | |
|
1099 | # Redis supports three different modes: | |
|
1100 | # | |
|
1101 | # no: don't fsync, just let the OS flush the data when it wants. Faster. | |
|
1102 | # always: fsync after every write to the append only log. Slow, Safest. | |
|
1103 | # everysec: fsync only one time every second. Compromise. | |
|
1104 | # | |
|
1105 | # The default is "everysec", as that's usually the right compromise between | |
|
1106 | # speed and data safety. It's up to you to understand if you can relax this to | |
|
1107 | # "no" that will let the operating system flush the output buffer when | |
|
1108 | # it wants, for better performances (but if you can live with the idea of | |
|
1109 | # some data loss consider the default persistence mode that's snapshotting), | |
|
1110 | # or on the contrary, use "always" that's very slow but a bit safer than | |
|
1111 | # everysec. | |
|
1112 | # | |
|
1113 | # More details please check the following article: | |
|
1114 | # http://antirez.com/post/redis-persistence-demystified.html | |
|
1115 | # | |
|
1116 | # If unsure, use "everysec". | |
|
1117 | ||
|
1118 | # appendfsync always | |
|
1119 | appendfsync everysec | |
|
1120 | # appendfsync no | |
|
1121 | ||
|
1122 | # When the AOF fsync policy is set to always or everysec, and a background | |
|
1123 | # saving process (a background save or AOF log background rewriting) is | |
|
1124 | # performing a lot of I/O against the disk, in some Linux configurations | |
|
1125 | # Redis may block too long on the fsync() call. Note that there is no fix for | |
|
1126 | # this currently, as even performing fsync in a different thread will block | |
|
1127 | # our synchronous write(2) call. | |
|
1128 | # | |
|
1129 | # In order to mitigate this problem it's possible to use the following option | |
|
1130 | # that will prevent fsync() from being called in the main process while a | |
|
1131 | # BGSAVE or BGREWRITEAOF is in progress. | |
|
1132 | # | |
|
1133 | # This means that while another child is saving, the durability of Redis is | |
|
1134 | # the same as "appendfsync none". In practical terms, this means that it is | |
|
1135 | # possible to lose up to 30 seconds of log in the worst scenario (with the | |
|
1136 | # default Linux settings). | |
|
1137 | # | |
|
1138 | # If you have latency problems turn this to "yes". Otherwise leave it as | |
|
1139 | # "no" that is the safest pick from the point of view of durability. | |
|
1140 | ||
|
1141 | no-appendfsync-on-rewrite no | |
|
1142 | ||
|
1143 | # Automatic rewrite of the append only file. | |
|
1144 | # Redis is able to automatically rewrite the log file implicitly calling | |
|
1145 | # BGREWRITEAOF when the AOF log size grows by the specified percentage. | |
|
1146 | # | |
|
1147 | # This is how it works: Redis remembers the size of the AOF file after the | |
|
1148 | # latest rewrite (if no rewrite has happened since the restart, the size of | |
|
1149 | # the AOF at startup is used). | |
|
1150 | # | |
|
1151 | # This base size is compared to the current size. If the current size is | |
|
1152 | # bigger than the specified percentage, the rewrite is triggered. Also | |
|
1153 | # you need to specify a minimal size for the AOF file to be rewritten, this | |
|
1154 | # is useful to avoid rewriting the AOF file even if the percentage increase | |
|
1155 | # is reached but it is still pretty small. | |
|
1156 | # | |
|
1157 | # Specify a percentage of zero in order to disable the automatic AOF | |
|
1158 | # rewrite feature. | |
|
1159 | ||
|
1160 | auto-aof-rewrite-percentage 100 | |
|
1161 | auto-aof-rewrite-min-size 64mb | |
|
1162 | ||
|
1163 | # An AOF file may be found to be truncated at the end during the Redis | |
|
1164 | # startup process, when the AOF data gets loaded back into memory. | |
|
1165 | # This may happen when the system where Redis is running | |
|
1166 | # crashes, especially when an ext4 filesystem is mounted without the | |
|
1167 | # data=ordered option (however this can't happen when Redis itself | |
|
1168 | # crashes or aborts but the operating system still works correctly). | |
|
1169 | # | |
|
1170 | # Redis can either exit with an error when this happens, or load as much | |
|
1171 | # data as possible (the default now) and start if the AOF file is found | |
|
1172 | # to be truncated at the end. The following option controls this behavior. | |
|
1173 | # | |
|
1174 | # If aof-load-truncated is set to yes, a truncated AOF file is loaded and | |
|
1175 | # the Redis server starts emitting a log to inform the user of the event. | |
|
1176 | # Otherwise if the option is set to no, the server aborts with an error | |
|
1177 | # and refuses to start. When the option is set to no, the user requires | |
|
1178 | # to fix the AOF file using the "redis-check-aof" utility before to restart | |
|
1179 | # the server. | |
|
1180 | # | |
|
1181 | # Note that if the AOF file will be found to be corrupted in the middle | |
|
1182 | # the server will still exit with an error. This option only applies when | |
|
1183 | # Redis will try to read more data from the AOF file but not enough bytes | |
|
1184 | # will be found. | |
|
1185 | aof-load-truncated yes | |
|
1186 | ||
|
1187 | # When rewriting the AOF file, Redis is able to use an RDB preamble in the | |
|
1188 | # AOF file for faster rewrites and recoveries. When this option is turned | |
|
1189 | # on the rewritten AOF file is composed of two different stanzas: | |
|
1190 | # | |
|
1191 | # [RDB file][AOF tail] | |
|
1192 | # | |
|
1193 | # When loading, Redis recognizes that the AOF file starts with the "REDIS" | |
|
1194 | # string and loads the prefixed RDB file, then continues loading the AOF | |
|
1195 | # tail. | |
|
1196 | aof-use-rdb-preamble yes | |
|
1197 | ||
|
1198 | ################################ LUA SCRIPTING ############################### | |
|
1199 | ||
|
1200 | # Max execution time of a Lua script in milliseconds. | |
|
1201 | # | |
|
1202 | # If the maximum execution time is reached Redis will log that a script is | |
|
1203 | # still in execution after the maximum allowed time and will start to | |
|
1204 | # reply to queries with an error. | |
|
1205 | # | |
|
1206 | # When a long running script exceeds the maximum execution time only the | |
|
1207 | # SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be | |
|
1208 | # used to stop a script that did not yet call any write commands. The second | |
|
1209 | # is the only way to shut down the server in the case a write command was | |
|
1210 | # already issued by the script but the user doesn't want to wait for the natural | |
|
1211 | # termination of the script. | |
|
1212 | # | |
|
1213 | # Set it to 0 or a negative value for unlimited execution without warnings. | |
|
1214 | lua-time-limit 5000 | |
|
1215 | ||
|
1216 | ################################ REDIS CLUSTER ############################### | |
|
1217 | ||
|
1218 | # Normal Redis instances can't be part of a Redis Cluster; only nodes that are | |
|
1219 | # started as cluster nodes can. In order to start a Redis instance as a | |
|
1220 | # cluster node enable the cluster support uncommenting the following: | |
|
1221 | # | |
|
1222 | # cluster-enabled yes | |
|
1223 | ||
|
1224 | # Every cluster node has a cluster configuration file. This file is not | |
|
1225 | # intended to be edited by hand. It is created and updated by Redis nodes. | |
|
1226 | # Every Redis Cluster node requires a different cluster configuration file. | |
|
1227 | # Make sure that instances running in the same system do not have | |
|
1228 | # overlapping cluster configuration file names. | |
|
1229 | # | |
|
1230 | # cluster-config-file nodes-6379.conf | |
|
1231 | ||
|
1232 | # Cluster node timeout is the amount of milliseconds a node must be unreachable | |
|
1233 | # for it to be considered in failure state. | |
|
1234 | # Most other internal time limits are a multiple of the node timeout. | |
|
1235 | # | |
|
1236 | # cluster-node-timeout 15000 | |
|
1237 | ||
|
1238 | # A replica of a failing master will avoid to start a failover if its data | |
|
1239 | # looks too old. | |
|
1240 | # | |
|
1241 | # There is no simple way for a replica to actually have an exact measure of | |
|
1242 | # its "data age", so the following two checks are performed: | |
|
1243 | # | |
|
1244 | # 1) If there are multiple replicas able to failover, they exchange messages | |
|
1245 | # in order to try to give an advantage to the replica with the best | |
|
1246 | # replication offset (more data from the master processed). | |
|
1247 | # Replicas will try to get their rank by offset, and apply to the start | |
|
1248 | # of the failover a delay proportional to their rank. | |
|
1249 | # | |
|
1250 | # 2) Every single replica computes the time of the last interaction with | |
|
1251 | # its master. This can be the last ping or command received (if the master | |
|
1252 | # is still in the "connected" state), or the time that elapsed since the | |
|
1253 | # disconnection with the master (if the replication link is currently down). | |
|
1254 | # If the last interaction is too old, the replica will not try to failover | |
|
1255 | # at all. | |
|
1256 | # | |
|
1257 | # The point "2" can be tuned by user. Specifically a replica will not perform | |
|
1258 | # the failover if, since the last interaction with the master, the time | |
|
1259 | # elapsed is greater than: | |
|
1260 | # | |
|
1261 | # (node-timeout * cluster-replica-validity-factor) + repl-ping-replica-period | |
|
1262 | # | |
|
1263 | # So for example if node-timeout is 30 seconds, and the cluster-replica-validity-factor | |
|
1264 | # is 10, and assuming a default repl-ping-replica-period of 10 seconds, the | |
|
1265 | # replica will not try to failover if it was not able to talk with the master | |
|
1266 | # for longer than 310 seconds. | |
|
1267 | # | |
|
1268 | # A large cluster-replica-validity-factor may allow replicas with too old data to failover | |
|
1269 | # a master, while a too small value may prevent the cluster from being able to | |
|
1270 | # elect a replica at all. | |
|
1271 | # | |
|
1272 | # For maximum availability, it is possible to set the cluster-replica-validity-factor | |
|
1273 | # to a value of 0, which means, that replicas will always try to failover the | |
|
1274 | # master regardless of the last time they interacted with the master. | |
|
1275 | # (However they'll always try to apply a delay proportional to their | |
|
1276 | # offset rank). | |
|
1277 | # | |
|
1278 | # Zero is the only value able to guarantee that when all the partitions heal | |
|
1279 | # the cluster will always be able to continue. | |
|
1280 | # | |
|
1281 | # cluster-replica-validity-factor 10 | |
|
1282 | ||
|
1283 | # Cluster replicas are able to migrate to orphaned masters, that are masters | |
|
1284 | # that are left without working replicas. This improves the cluster ability | |
|
1285 | # to resist to failures as otherwise an orphaned master can't be failed over | |
|
1286 | # in case of failure if it has no working replicas. | |
|
1287 | # | |
|
1288 | # Replicas migrate to orphaned masters only if there are still at least a | |
|
1289 | # given number of other working replicas for their old master. This number | |
|
1290 | # is the "migration barrier". A migration barrier of 1 means that a replica | |
|
1291 | # will migrate only if there is at least 1 other working replica for its master | |
|
1292 | # and so forth. It usually reflects the number of replicas you want for every | |
|
1293 | # master in your cluster. | |
|
1294 | # | |
|
1295 | # Default is 1 (replicas migrate only if their masters remain with at least | |
|
1296 | # one replica). To disable migration just set it to a very large value. | |
|
1297 | # A value of 0 can be set but is useful only for debugging and dangerous | |
|
1298 | # in production. | |
|
1299 | # | |
|
1300 | # cluster-migration-barrier 1 | |
|
1301 | ||
|
1302 | # By default Redis Cluster nodes stop accepting queries if they detect there | |
|
1303 | # is at least a hash slot uncovered (no available node is serving it). | |
|
1304 | # This way if the cluster is partially down (for example a range of hash slots | |
|
1305 | # are no longer covered) all the cluster becomes, eventually, unavailable. | |
|
1306 | # It automatically returns available as soon as all the slots are covered again. | |
|
1307 | # | |
|
1308 | # However sometimes you want the subset of the cluster which is working, | |
|
1309 | # to continue to accept queries for the part of the key space that is still | |
|
1310 | # covered. In order to do so, just set the cluster-require-full-coverage | |
|
1311 | # option to no. | |
|
1312 | # | |
|
1313 | # cluster-require-full-coverage yes | |
|
1314 | ||
|
1315 | # This option, when set to yes, prevents replicas from trying to failover its | |
|
1316 | # master during master failures. However the master can still perform a | |
|
1317 | # manual failover, if forced to do so. | |
|
1318 | # | |
|
1319 | # This is useful in different scenarios, especially in the case of multiple | |
|
1320 | # data center operations, where we want one side to never be promoted if not | |
|
1321 | # in the case of a total DC failure. | |
|
1322 | # | |
|
1323 | # cluster-replica-no-failover no | |
|
1324 | ||
|
1325 | # This option, when set to yes, allows nodes to serve read traffic while the | |
|
1326 | # the cluster is in a down state, as long as it believes it owns the slots. | |
|
1327 | # | |
|
1328 | # This is useful for two cases. The first case is for when an application | |
|
1329 | # doesn't require consistency of data during node failures or network partitions. | |
|
1330 | # One example of this is a cache, where as long as the node has the data it | |
|
1331 | # should be able to serve it. | |
|
1332 | # | |
|
1333 | # The second use case is for configurations that don't meet the recommended | |
|
1334 | # three shards but want to enable cluster mode and scale later. A | |
|
1335 | # master outage in a 1 or 2 shard configuration causes a read/write outage to the | |
|
1336 | # entire cluster without this option set, with it set there is only a write outage. | |
|
1337 | # Without a quorum of masters, slot ownership will not change automatically. | |
|
1338 | # | |
|
1339 | # cluster-allow-reads-when-down no | |
|
1340 | ||
|
1341 | # In order to setup your cluster make sure to read the documentation | |
|
1342 | # available at http://redis.io web site. | |
|
1343 | ||
|
1344 | ########################## CLUSTER DOCKER/NAT support ######################## | |
|
1345 | ||
|
1346 | # In certain deployments, Redis Cluster nodes address discovery fails, because | |
|
1347 | # addresses are NAT-ted or because ports are forwarded (the typical case is | |
|
1348 | # Docker and other containers). | |
|
1349 | # | |
|
1350 | # In order to make Redis Cluster working in such environments, a static | |
|
1351 | # configuration where each node knows its public address is needed. The | |
|
1352 | # following two options are used for this scope, and are: | |
|
1353 | # | |
|
1354 | # * cluster-announce-ip | |
|
1355 | # * cluster-announce-port | |
|
1356 | # * cluster-announce-bus-port | |
|
1357 | # | |
|
1358 | # Each instructs the node about its address, client port, and cluster message | |
|
1359 | # bus port. The information is then published in the header of the bus packets | |
|
1360 | # so that other nodes will be able to correctly map the address of the node | |
|
1361 | # publishing the information. | |
|
1362 | # | |
|
1363 | # If the above options are not used, the normal Redis Cluster auto-detection | |
|
1364 | # will be used instead. | |
|
1365 | # | |
|
1366 | # Note that when remapped, the bus port may not be at the fixed offset of | |
|
1367 | # clients port + 10000, so you can specify any port and bus-port depending | |
|
1368 | # on how they get remapped. If the bus-port is not set, a fixed offset of | |
|
1369 | # 10000 will be used as usual. | |
|
1370 | # | |
|
1371 | # Example: | |
|
1372 | # | |
|
1373 | # cluster-announce-ip 10.1.1.5 | |
|
1374 | # cluster-announce-port 6379 | |
|
1375 | # cluster-announce-bus-port 6380 | |
|
1376 | ||
|
1377 | ################################## SLOW LOG ################################### | |
|
1378 | ||
|
1379 | # The Redis Slow Log is a system to log queries that exceeded a specified | |
|
1380 | # execution time. The execution time does not include the I/O operations | |
|
1381 | # like talking with the client, sending the reply and so forth, | |
|
1382 | # but just the time needed to actually execute the command (this is the only | |
|
1383 | # stage of command execution where the thread is blocked and can not serve | |
|
1384 | # other requests in the meantime). | |
|
1385 | # | |
|
1386 | # You can configure the slow log with two parameters: one tells Redis | |
|
1387 | # what is the execution time, in microseconds, to exceed in order for the | |
|
1388 | # command to get logged, and the other parameter is the length of the | |
|
1389 | # slow log. When a new command is logged the oldest one is removed from the | |
|
1390 | # queue of logged commands. | |
|
1391 | ||
|
1392 | # The following time is expressed in microseconds, so 1000000 is equivalent | |
|
1393 | # to one second. Note that a negative number disables the slow log, while | |
|
1394 | # a value of zero forces the logging of every command. | |
|
1395 | slowlog-log-slower-than 10000 | |
|
1396 | ||
|
1397 | # There is no limit to this length. Just be aware that it will consume memory. | |
|
1398 | # You can reclaim memory used by the slow log with SLOWLOG RESET. | |
|
1399 | slowlog-max-len 128 | |
|
1400 | ||
|
1401 | ################################ LATENCY MONITOR ############################## | |
|
1402 | ||
|
1403 | # The Redis latency monitoring subsystem samples different operations | |
|
1404 | # at runtime in order to collect data related to possible sources of | |
|
1405 | # latency of a Redis instance. | |
|
1406 | # | |
|
1407 | # Via the LATENCY command this information is available to the user that can | |
|
1408 | # print graphs and obtain reports. | |
|
1409 | # | |
|
1410 | # The system only logs operations that were performed in a time equal or | |
|
1411 | # greater than the amount of milliseconds specified via the | |
|
1412 | # latency-monitor-threshold configuration directive. When its value is set | |
|
1413 | # to zero, the latency monitor is turned off. | |
|
1414 | # | |
|
1415 | # By default latency monitoring is disabled since it is mostly not needed | |
|
1416 | # if you don't have latency issues, and collecting data has a performance | |
|
1417 | # impact, that while very small, can be measured under big load. Latency | |
|
1418 | # monitoring can easily be enabled at runtime using the command | |
|
1419 | # "CONFIG SET latency-monitor-threshold <milliseconds>" if needed. | |
|
1420 | latency-monitor-threshold 0 | |
|
1421 | ||
|
1422 | ############################# EVENT NOTIFICATION ############################## | |
|
1423 | ||
|
1424 | # Redis can notify Pub/Sub clients about events happening in the key space. | |
|
1425 | # This feature is documented at http://redis.io/topics/notifications | |
|
1426 | # | |
|
1427 | # For instance if keyspace events notification is enabled, and a client | |
|
1428 | # performs a DEL operation on key "foo" stored in the Database 0, two | |
|
1429 | # messages will be published via Pub/Sub: | |
|
1430 | # | |
|
1431 | # PUBLISH __keyspace@0__:foo del | |
|
1432 | # PUBLISH __keyevent@0__:del foo | |
|
1433 | # | |
|
1434 | # It is possible to select the events that Redis will notify among a set | |
|
1435 | # of classes. Every class is identified by a single character: | |
|
1436 | # | |
|
1437 | # K Keyspace events, published with __keyspace@<db>__ prefix. | |
|
1438 | # E Keyevent events, published with __keyevent@<db>__ prefix. | |
|
1439 | # g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... | |
|
1440 | # $ String commands | |
|
1441 | # l List commands | |
|
1442 | # s Set commands | |
|
1443 | # h Hash commands | |
|
1444 | # z Sorted set commands | |
|
1445 | # x Expired events (events generated every time a key expires) | |
|
1446 | # e Evicted events (events generated when a key is evicted for maxmemory) | |
|
1447 | # t Stream commands | |
|
1448 | # m Key-miss events (Note: It is not included in the 'A' class) | |
|
1449 | # A Alias for g$lshzxet, so that the "AKE" string means all the events | |
|
1450 | # (Except key-miss events which are excluded from 'A' due to their | |
|
1451 | # unique nature). | |
|
1452 | # | |
|
1453 | # The "notify-keyspace-events" takes as argument a string that is composed | |
|
1454 | # of zero or multiple characters. The empty string means that notifications | |
|
1455 | # are disabled. | |
|
1456 | # | |
|
1457 | # Example: to enable list and generic events, from the point of view of the | |
|
1458 | # event name, use: | |
|
1459 | # | |
|
1460 | # notify-keyspace-events Elg | |
|
1461 | # | |
|
1462 | # Example 2: to get the stream of the expired keys subscribing to channel | |
|
1463 | # name __keyevent@0__:expired use: | |
|
1464 | # | |
|
1465 | # notify-keyspace-events Ex | |
|
1466 | # | |
|
1467 | # By default all notifications are disabled because most users don't need | |
|
1468 | # this feature and the feature has some overhead. Note that if you don't | |
|
1469 | # specify at least one of K or E, no events will be delivered. | |
|
1470 | notify-keyspace-events "" | |
|
1471 | ||
|
1472 | ############################### GOPHER SERVER ################################# | |
|
1473 | ||
|
1474 | # Redis contains an implementation of the Gopher protocol, as specified in | |
|
1475 | # the RFC 1436 (https://www.ietf.org/rfc/rfc1436.txt). | |
|
1476 | # | |
|
1477 | # The Gopher protocol was very popular in the late '90s. It is an alternative | |
|
1478 | # to the web, and the implementation both server and client side is so simple | |
|
1479 | # that the Redis server has just 100 lines of code in order to implement this | |
|
1480 | # support. | |
|
1481 | # | |
|
1482 | # What do you do with Gopher nowadays? Well Gopher never *really* died, and | |
|
1483 | # lately there is a movement in order for the Gopher more hierarchical content | |
|
1484 | # composed of just plain text documents to be resurrected. Some want a simpler | |
|
1485 | # internet, others believe that the mainstream internet became too much | |
|
1486 | # controlled, and it's cool to create an alternative space for people that | |
|
1487 | # want a bit of fresh air. | |
|
1488 | # | |
|
1489 | # Anyway for the 10nth birthday of the Redis, we gave it the Gopher protocol | |
|
1490 | # as a gift. | |
|
1491 | # | |
|
1492 | # --- HOW IT WORKS? --- | |
|
1493 | # | |
|
1494 | # The Redis Gopher support uses the inline protocol of Redis, and specifically | |
|
1495 | # two kind of inline requests that were anyway illegal: an empty request | |
|
1496 | # or any request that starts with "/" (there are no Redis commands starting | |
|
1497 | # with such a slash). Normal RESP2/RESP3 requests are completely out of the | |
|
1498 | # path of the Gopher protocol implementation and are served as usual as well. | |
|
1499 | # | |
|
1500 | # If you open a connection to Redis when Gopher is enabled and send it | |
|
1501 | # a string like "/foo", if there is a key named "/foo" it is served via the | |
|
1502 | # Gopher protocol. | |
|
1503 | # | |
|
1504 | # In order to create a real Gopher "hole" (the name of a Gopher site in Gopher | |
|
1505 | # talking), you likely need a script like the following: | |
|
1506 | # | |
|
1507 | # https://github.com/antirez/gopher2redis | |
|
1508 | # | |
|
1509 | # --- SECURITY WARNING --- | |
|
1510 | # | |
|
1511 | # If you plan to put Redis on the internet in a publicly accessible address | |
|
1512 | # to server Gopher pages MAKE SURE TO SET A PASSWORD to the instance. | |
|
1513 | # Once a password is set: | |
|
1514 | # | |
|
1515 | # 1. The Gopher server (when enabled, not by default) will still serve | |
|
1516 | # content via Gopher. | |
|
1517 | # 2. However other commands cannot be called before the client will | |
|
1518 | # authenticate. | |
|
1519 | # | |
|
1520 | # So use the 'requirepass' option to protect your instance. | |
|
1521 | # | |
|
1522 | # Note that Gopher is not currently supported when 'io-threads-do-reads' | |
|
1523 | # is enabled. | |
|
1524 | # | |
|
1525 | # To enable Gopher support, uncomment the following line and set the option | |
|
1526 | # from no (the default) to yes. | |
|
1527 | # | |
|
1528 | # gopher-enabled no | |
|
1529 | ||
|
1530 | ############################### ADVANCED CONFIG ############################### | |
|
1531 | ||
|
1532 | # Hashes are encoded using a memory efficient data structure when they have a | |
|
1533 | # small number of entries, and the biggest entry does not exceed a given | |
|
1534 | # threshold. These thresholds can be configured using the following directives. | |
|
1535 | hash-max-ziplist-entries 512 | |
|
1536 | hash-max-ziplist-value 64 | |
|
1537 | ||
|
1538 | # Lists are also encoded in a special way to save a lot of space. | |
|
1539 | # The number of entries allowed per internal list node can be specified | |
|
1540 | # as a fixed maximum size or a maximum number of elements. | |
|
1541 | # For a fixed maximum size, use -5 through -1, meaning: | |
|
1542 | # -5: max size: 64 Kb <-- not recommended for normal workloads | |
|
1543 | # -4: max size: 32 Kb <-- not recommended | |
|
1544 | # -3: max size: 16 Kb <-- probably not recommended | |
|
1545 | # -2: max size: 8 Kb <-- good | |
|
1546 | # -1: max size: 4 Kb <-- good | |
|
1547 | # Positive numbers mean store up to _exactly_ that number of elements | |
|
1548 | # per list node. | |
|
1549 | # The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), | |
|
1550 | # but if your use case is unique, adjust the settings as necessary. | |
|
1551 | list-max-ziplist-size -2 | |
|
1552 | ||
|
1553 | # Lists may also be compressed. | |
|
1554 | # Compress depth is the number of quicklist ziplist nodes from *each* side of | |
|
1555 | # the list to *exclude* from compression. The head and tail of the list | |
|
1556 | # are always uncompressed for fast push/pop operations. Settings are: | |
|
1557 | # 0: disable all list compression | |
|
1558 | # 1: depth 1 means "don't start compressing until after 1 node into the list, | |
|
1559 | # going from either the head or tail" | |
|
1560 | # So: [head]->node->node->...->node->[tail] | |
|
1561 | # [head], [tail] will always be uncompressed; inner nodes will compress. | |
|
1562 | # 2: [head]->[next]->node->node->...->node->[prev]->[tail] | |
|
1563 | # 2 here means: don't compress head or head->next or tail->prev or tail, | |
|
1564 | # but compress all nodes between them. | |
|
1565 | # 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] | |
|
1566 | # etc. | |
|
1567 | list-compress-depth 0 | |
|
1568 | ||
|
1569 | # Sets have a special encoding in just one case: when a set is composed | |
|
1570 | # of just strings that happen to be integers in radix 10 in the range | |
|
1571 | # of 64 bit signed integers. | |
|
1572 | # The following configuration setting sets the limit in the size of the | |
|
1573 | # set in order to use this special memory saving encoding. | |
|
1574 | set-max-intset-entries 512 | |
|
1575 | ||
|
1576 | # Similarly to hashes and lists, sorted sets are also specially encoded in | |
|
1577 | # order to save a lot of space. This encoding is only used when the length and | |
|
1578 | # elements of a sorted set are below the following limits: | |
|
1579 | zset-max-ziplist-entries 128 | |
|
1580 | zset-max-ziplist-value 64 | |
|
1581 | ||
|
1582 | # HyperLogLog sparse representation bytes limit. The limit includes the | |
|
1583 | # 16 bytes header. When an HyperLogLog using the sparse representation crosses | |
|
1584 | # this limit, it is converted into the dense representation. | |
|
1585 | # | |
|
1586 | # A value greater than 16000 is totally useless, since at that point the | |
|
1587 | # dense representation is more memory efficient. | |
|
1588 | # | |
|
1589 | # The suggested value is ~ 3000 in order to have the benefits of | |
|
1590 | # the space efficient encoding without slowing down too much PFADD, | |
|
1591 | # which is O(N) with the sparse encoding. The value can be raised to | |
|
1592 | # ~ 10000 when CPU is not a concern, but space is, and the data set is | |
|
1593 | # composed of many HyperLogLogs with cardinality in the 0 - 15000 range. | |
|
1594 | hll-sparse-max-bytes 3000 | |
|
1595 | ||
|
1596 | # Streams macro node max size / items. The stream data structure is a radix | |
|
1597 | # tree of big nodes that encode multiple items inside. Using this configuration | |
|
1598 | # it is possible to configure how big a single node can be in bytes, and the | |
|
1599 | # maximum number of items it may contain before switching to a new node when | |
|
1600 | # appending new stream entries. If any of the following settings are set to | |
|
1601 | # zero, the limit is ignored, so for instance it is possible to set just a | |
|
1602 | # max entires limit by setting max-bytes to 0 and max-entries to the desired | |
|
1603 | # value. | |
|
1604 | stream-node-max-bytes 4096 | |
|
1605 | stream-node-max-entries 100 | |
|
1606 | ||
|
1607 | # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in | |
|
1608 | # order to help rehashing the main Redis hash table (the one mapping top-level | |
|
1609 | # keys to values). The hash table implementation Redis uses (see dict.c) | |
|
1610 | # performs a lazy rehashing: the more operation you run into a hash table | |
|
1611 | # that is rehashing, the more rehashing "steps" are performed, so if the | |
|
1612 | # server is idle the rehashing is never complete and some more memory is used | |
|
1613 | # by the hash table. | |
|
1614 | # | |
|
1615 | # The default is to use this millisecond 10 times every second in order to | |
|
1616 | # actively rehash the main dictionaries, freeing memory when possible. | |
|
1617 | # | |
|
1618 | # If unsure: | |
|
1619 | # use "activerehashing no" if you have hard latency requirements and it is | |
|
1620 | # not a good thing in your environment that Redis can reply from time to time | |
|
1621 | # to queries with 2 milliseconds delay. | |
|
1622 | # | |
|
1623 | # use "activerehashing yes" if you don't have such hard requirements but | |
|
1624 | # want to free memory asap when possible. | |
|
1625 | activerehashing yes | |
|
1626 | ||
|
1627 | # The client output buffer limits can be used to force disconnection of clients | |
|
1628 | # that are not reading data from the server fast enough for some reason (a | |
|
1629 | # common reason is that a Pub/Sub client can't consume messages as fast as the | |
|
1630 | # publisher can produce them). | |
|
1631 | # | |
|
1632 | # The limit can be set differently for the three different classes of clients: | |
|
1633 | # | |
|
1634 | # normal -> normal clients including MONITOR clients | |
|
1635 | # replica -> replica clients | |
|
1636 | # pubsub -> clients subscribed to at least one pubsub channel or pattern | |
|
1637 | # | |
|
1638 | # The syntax of every client-output-buffer-limit directive is the following: | |
|
1639 | # | |
|
1640 | # client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds> | |
|
1641 | # | |
|
1642 | # A client is immediately disconnected once the hard limit is reached, or if | |
|
1643 | # the soft limit is reached and remains reached for the specified number of | |
|
1644 | # seconds (continuously). | |
|
1645 | # So for instance if the hard limit is 32 megabytes and the soft limit is | |
|
1646 | # 16 megabytes / 10 seconds, the client will get disconnected immediately | |
|
1647 | # if the size of the output buffers reach 32 megabytes, but will also get | |
|
1648 | # disconnected if the client reaches 16 megabytes and continuously overcomes | |
|
1649 | # the limit for 10 seconds. | |
|
1650 | # | |
|
1651 | # By default normal clients are not limited because they don't receive data | |
|
1652 | # without asking (in a push way), but just after a request, so only | |
|
1653 | # asynchronous clients may create a scenario where data is requested faster | |
|
1654 | # than it can read. | |
|
1655 | # | |
|
1656 | # Instead there is a default limit for pubsub and replica clients, since | |
|
1657 | # subscribers and replicas receive data in a push fashion. | |
|
1658 | # | |
|
1659 | # Both the hard or the soft limit can be disabled by setting them to zero. | |
|
1660 | client-output-buffer-limit normal 0 0 0 | |
|
1661 | client-output-buffer-limit replica 256mb 64mb 60 | |
|
1662 | client-output-buffer-limit pubsub 32mb 8mb 60 | |
|
1663 | ||
|
1664 | # Client query buffers accumulate new commands. They are limited to a fixed | |
|
1665 | # amount by default in order to avoid that a protocol desynchronization (for | |
|
1666 | # instance due to a bug in the client) will lead to unbound memory usage in | |
|
1667 | # the query buffer. However you can configure it here if you have very special | |
|
1668 | # needs, such us huge multi/exec requests or alike. | |
|
1669 | # | |
|
1670 | # client-query-buffer-limit 1gb | |
|
1671 | ||
|
1672 | # In the Redis protocol, bulk requests, that are, elements representing single | |
|
1673 | # strings, are normally limited to 512 mb. However you can change this limit | |
|
1674 | # here, but must be 1mb or greater | |
|
1675 | # | |
|
1676 | # proto-max-bulk-len 512mb | |
|
1677 | ||
|
1678 | # Redis calls an internal function to perform many background tasks, like | |
|
1679 | # closing connections of clients in timeout, purging expired keys that are | |
|
1680 | # never requested, and so forth. | |
|
1681 | # | |
|
1682 | # Not all tasks are performed with the same frequency, but Redis checks for | |
|
1683 | # tasks to perform according to the specified "hz" value. | |
|
1684 | # | |
|
1685 | # By default "hz" is set to 10. Raising the value will use more CPU when | |
|
1686 | # Redis is idle, but at the same time will make Redis more responsive when | |
|
1687 | # there are many keys expiring at the same time, and timeouts may be | |
|
1688 | # handled with more precision. | |
|
1689 | # | |
|
1690 | # The range is between 1 and 500, however a value over 100 is usually not | |
|
1691 | # a good idea. Most users should use the default of 10 and raise this up to | |
|
1692 | # 100 only in environments where very low latency is required. | |
|
1693 | hz 10 | |
|
1694 | ||
|
1695 | # Normally it is useful to have an HZ value which is proportional to the | |
|
1696 | # number of clients connected. This is useful in order, for instance, to | |
|
1697 | # avoid too many clients are processed for each background task invocation | |
|
1698 | # in order to avoid latency spikes. | |
|
1699 | # | |
|
1700 | # Since the default HZ value by default is conservatively set to 10, Redis | |
|
1701 | # offers, and enables by default, the ability to use an adaptive HZ value | |
|
1702 | # which will temporarily raise when there are many connected clients. | |
|
1703 | # | |
|
1704 | # When dynamic HZ is enabled, the actual configured HZ will be used | |
|
1705 | # as a baseline, but multiples of the configured HZ value will be actually | |
|
1706 | # used as needed once more clients are connected. In this way an idle | |
|
1707 | # instance will use very little CPU time while a busy instance will be | |
|
1708 | # more responsive. | |
|
1709 | dynamic-hz yes | |
|
1710 | ||
|
1711 | # When a child rewrites the AOF file, if the following option is enabled | |
|
1712 | # the file will be fsync-ed every 32 MB of data generated. This is useful | |
|
1713 | # in order to commit the file to the disk more incrementally and avoid | |
|
1714 | # big latency spikes. | |
|
1715 | aof-rewrite-incremental-fsync yes | |
|
1716 | ||
|
1717 | # When redis saves RDB file, if the following option is enabled | |
|
1718 | # the file will be fsync-ed every 32 MB of data generated. This is useful | |
|
1719 | # in order to commit the file to the disk more incrementally and avoid | |
|
1720 | # big latency spikes. | |
|
1721 | rdb-save-incremental-fsync yes | |
|
1722 | ||
|
1723 | # Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good | |
|
1724 | # idea to start with the default settings and only change them after investigating | |
|
1725 | # how to improve the performances and how the keys LFU change over time, which | |
|
1726 | # is possible to inspect via the OBJECT FREQ command. | |
|
1727 | # | |
|
1728 | # There are two tunable parameters in the Redis LFU implementation: the | |
|
1729 | # counter logarithm factor and the counter decay time. It is important to | |
|
1730 | # understand what the two parameters mean before changing them. | |
|
1731 | # | |
|
1732 | # The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis | |
|
1733 | # uses a probabilistic increment with logarithmic behavior. Given the value | |
|
1734 | # of the old counter, when a key is accessed, the counter is incremented in | |
|
1735 | # this way: | |
|
1736 | # | |
|
1737 | # 1. A random number R between 0 and 1 is extracted. | |
|
1738 | # 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1). | |
|
1739 | # 3. The counter is incremented only if R < P. | |
|
1740 | # | |
|
1741 | # The default lfu-log-factor is 10. This is a table of how the frequency | |
|
1742 | # counter changes with a different number of accesses with different | |
|
1743 | # logarithmic factors: | |
|
1744 | # | |
|
1745 | # +--------+------------+------------+------------+------------+------------+ | |
|
1746 | # | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | | |
|
1747 | # +--------+------------+------------+------------+------------+------------+ | |
|
1748 | # | 0 | 104 | 255 | 255 | 255 | 255 | | |
|
1749 | # +--------+------------+------------+------------+------------+------------+ | |
|
1750 | # | 1 | 18 | 49 | 255 | 255 | 255 | | |
|
1751 | # +--------+------------+------------+------------+------------+------------+ | |
|
1752 | # | 10 | 10 | 18 | 142 | 255 | 255 | | |
|
1753 | # +--------+------------+------------+------------+------------+------------+ | |
|
1754 | # | 100 | 8 | 11 | 49 | 143 | 255 | | |
|
1755 | # +--------+------------+------------+------------+------------+------------+ | |
|
1756 | # | |
|
1757 | # NOTE: The above table was obtained by running the following commands: | |
|
1758 | # | |
|
1759 | # redis-benchmark -n 1000000 incr foo | |
|
1760 | # redis-cli object freq foo | |
|
1761 | # | |
|
1762 | # NOTE 2: The counter initial value is 5 in order to give new objects a chance | |
|
1763 | # to accumulate hits. | |
|
1764 | # | |
|
1765 | # The counter decay time is the time, in minutes, that must elapse in order | |
|
1766 | # for the key counter to be divided by two (or decremented if it has a value | |
|
1767 | # less <= 10). | |
|
1768 | # | |
|
1769 | # The default value for the lfu-decay-time is 1. A special value of 0 means to | |
|
1770 | # decay the counter every time it happens to be scanned. | |
|
1771 | # | |
|
1772 | # lfu-log-factor 10 | |
|
1773 | # lfu-decay-time 1 | |
|
1774 | ||
|
1775 | ########################### ACTIVE DEFRAGMENTATION ####################### | |
|
1776 | # | |
|
1777 | # What is active defragmentation? | |
|
1778 | # ------------------------------- | |
|
1779 | # | |
|
1780 | # Active (online) defragmentation allows a Redis server to compact the | |
|
1781 | # spaces left between small allocations and deallocations of data in memory, | |
|
1782 | # thus allowing to reclaim back memory. | |
|
1783 | # | |
|
1784 | # Fragmentation is a natural process that happens with every allocator (but | |
|
1785 | # less so with Jemalloc, fortunately) and certain workloads. Normally a server | |
|
1786 | # restart is needed in order to lower the fragmentation, or at least to flush | |
|
1787 | # away all the data and create it again. However thanks to this feature | |
|
1788 | # implemented by Oran Agra for Redis 4.0 this process can happen at runtime | |
|
1789 | # in a "hot" way, while the server is running. | |
|
1790 | # | |
|
1791 | # Basically when the fragmentation is over a certain level (see the | |
|
1792 | # configuration options below) Redis will start to create new copies of the | |
|
1793 | # values in contiguous memory regions by exploiting certain specific Jemalloc | |
|
1794 | # features (in order to understand if an allocation is causing fragmentation | |
|
1795 | # and to allocate it in a better place), and at the same time, will release the | |
|
1796 | # old copies of the data. This process, repeated incrementally for all the keys | |
|
1797 | # will cause the fragmentation to drop back to normal values. | |
|
1798 | # | |
|
1799 | # Important things to understand: | |
|
1800 | # | |
|
1801 | # 1. This feature is disabled by default, and only works if you compiled Redis | |
|
1802 | # to use the copy of Jemalloc we ship with the source code of Redis. | |
|
1803 | # This is the default with Linux builds. | |
|
1804 | # | |
|
1805 | # 2. You never need to enable this feature if you don't have fragmentation | |
|
1806 | # issues. | |
|
1807 | # | |
|
1808 | # 3. Once you experience fragmentation, you can enable this feature when | |
|
1809 | # needed with the command "CONFIG SET activedefrag yes". | |
|
1810 | # | |
|
1811 | # The configuration parameters are able to fine tune the behavior of the | |
|
1812 | # defragmentation process. If you are not sure about what they mean it is | |
|
1813 | # a good idea to leave the defaults untouched. | |
|
1814 | ||
|
1815 | # Enabled active defragmentation | |
|
1816 | # activedefrag no | |
|
1817 | ||
|
1818 | # Minimum amount of fragmentation waste to start active defrag | |
|
1819 | # active-defrag-ignore-bytes 100mb | |
|
1820 | ||
|
1821 | # Minimum percentage of fragmentation to start active defrag | |
|
1822 | # active-defrag-threshold-lower 10 | |
|
1823 | ||
|
1824 | # Maximum percentage of fragmentation at which we use maximum effort | |
|
1825 | # active-defrag-threshold-upper 100 | |
|
1826 | ||
|
1827 | # Minimal effort for defrag in CPU percentage, to be used when the lower | |
|
1828 | # threshold is reached | |
|
1829 | # active-defrag-cycle-min 1 | |
|
1830 | ||
|
1831 | # Maximal effort for defrag in CPU percentage, to be used when the upper | |
|
1832 | # threshold is reached | |
|
1833 | # active-defrag-cycle-max 25 | |
|
1834 | ||
|
1835 | # Maximum number of set/hash/zset/list fields that will be processed from | |
|
1836 | # the main dictionary scan | |
|
1837 | # active-defrag-max-scan-fields 1000 | |
|
1838 | ||
|
1839 | # Jemalloc background thread for purging will be enabled by default | |
|
1840 | jemalloc-bg-thread yes | |
|
1841 | ||
|
1842 | # It is possible to pin different threads and processes of Redis to specific | |
|
1843 | # CPUs in your system, in order to maximize the performances of the server. | |
|
1844 | # This is useful both in order to pin different Redis threads in different | |
|
1845 | # CPUs, but also in order to make sure that multiple Redis instances running | |
|
1846 | # in the same host will be pinned to different CPUs. | |
|
1847 | # | |
|
1848 | # Normally you can do this using the "taskset" command, however it is also | |
|
1849 | # possible to this via Redis configuration directly, both in Linux and FreeBSD. | |
|
1850 | # | |
|
1851 | # You can pin the server/IO threads, bio threads, aof rewrite child process, and | |
|
1852 | # the bgsave child process. The syntax to specify the cpu list is the same as | |
|
1853 | # the taskset command: | |
|
1854 | # | |
|
1855 | # Set redis server/io threads to cpu affinity 0,2,4,6: | |
|
1856 | # server_cpulist 0-7:2 | |
|
1857 | # | |
|
1858 | # Set bio threads to cpu affinity 1,3: | |
|
1859 | # bio_cpulist 1,3 | |
|
1860 | # | |
|
1861 | # Set aof rewrite child process to cpu affinity 8,9,10,11: | |
|
1862 | # aof_rewrite_cpulist 8-11 | |
|
1863 | # | |
|
1864 | # Set bgsave child process to cpu affinity 1,10,11 | |
|
1865 | # bgsave_cpulist 1,10-11 No newline at end of file |
@@ -0,0 +1,3 b'' | |||
|
1 | FROM library/redis:6.0.9 | |
|
2 | COPY service/redis/redis.conf /etc/redis/redis-rc.conf | |
|
3 | CMD ["redis-server", "/etc/redis/redis-rc.conf"] |
@@ -0,0 +1,31 b'' | |||
|
1 | """ | |
|
2 | echo "%run path/create_docs_repo.py" | rc-ishell .dev/dev.ini | |
|
3 | """ | |
|
4 | ||
|
5 | import os | |
|
6 | from rhodecode.model.db import Session | |
|
7 | ||
|
8 | LICENSE_FILE_NAME = 'rhodecode_enterprise.license' | |
|
9 | ||
|
10 | ||
|
11 | def main(): | |
|
12 | license_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), LICENSE_FILE_NAME) | |
|
13 | ||
|
14 | if not os.path.isfile(license_file): | |
|
15 | print('No license file at {}'.format(license_file)) | |
|
16 | return | |
|
17 | ||
|
18 | try: | |
|
19 | from rc_license.models import apply_license | |
|
20 | except ImportError: | |
|
21 | print('Cannot import apply_license') | |
|
22 | return | |
|
23 | ||
|
24 | with open(license_file, 'r') as f: | |
|
25 | license_data = f.read() | |
|
26 | ||
|
27 | apply_license(license_data) | |
|
28 | Session().commit() | |
|
29 | ||
|
30 | ||
|
31 | main() |
@@ -0,0 +1,24 b'' | |||
|
1 | """ | |
|
2 | echo "%run path/enable_svn_proxy.py" | RC_SETTING='{"vcs_svn_proxy_http_requests_enabled":true, "vcs_svn_proxy_http_server_url": "http://localhost:8090"}' rc-ishell .dev/dev.ini | |
|
3 | """ | |
|
4 | ||
|
5 | import os | |
|
6 | import json | |
|
7 | from rhodecode.model.db import Session | |
|
8 | from rhodecode.model.settings import VcsSettingsModel | |
|
9 | ||
|
10 | defaults = json.dumps({ | |
|
11 | 'vcs_svn_proxy_http_requests_enabled': True, | |
|
12 | 'vcs_svn_proxy_http_server_url': 'http://svn:8090' | |
|
13 | }) | |
|
14 | ||
|
15 | ||
|
16 | def main(json_args): | |
|
17 | model = VcsSettingsModel() | |
|
18 | model.create_or_update_global_svn_settings(json_args) | |
|
19 | Session().commit() | |
|
20 | print('ok') | |
|
21 | ||
|
22 | ||
|
23 | args = json.loads(os.environ.get('RC_SETTING') or defaults) | |
|
24 | main(args) |
@@ -0,0 +1,30 b'' | |||
|
1 | """ | |
|
2 | echo "%run path/enable_diff_cache.py" | RC_SETTING='{"rhodecode_git_close_branch_before_merging": false, "rhodecode_pr_merge_enabled": true, "rhodecode_hg_close_branch_before_merging": false, "rhodecode_use_outdated_comments": true, "rhodecode_git_use_rebase_for_merging": false, "rhodecode_diff_cache": true, "rhodecode_hg_use_rebase_for_merging": false}' rc-ishell .dev/dev.ini | |
|
3 | """ | |
|
4 | ||
|
5 | import os | |
|
6 | import json | |
|
7 | from rhodecode.model.db import Session | |
|
8 | from rhodecode.model.settings import VcsSettingsModel | |
|
9 | ||
|
10 | ||
|
11 | defaults = json.dumps({ | |
|
12 | 'rhodecode_diff_cache': True, | |
|
13 | 'rhodecode_git_close_branch_before_merging': False, | |
|
14 | 'rhodecode_git_use_rebase_for_merging': False, | |
|
15 | 'rhodecode_hg_close_branch_before_merging': False, | |
|
16 | 'rhodecode_hg_use_rebase_for_merging': False, | |
|
17 | 'rhodecode_pr_merge_enabled': True, | |
|
18 | 'rhodecode_use_outdated_comments': True | |
|
19 | }) | |
|
20 | ||
|
21 | ||
|
22 | def main(json_args): | |
|
23 | model = VcsSettingsModel() | |
|
24 | model.create_or_update_global_pr_settings(json_args) | |
|
25 | Session().commit() | |
|
26 | print('ok') | |
|
27 | ||
|
28 | ||
|
29 | args = json.loads(os.environ.get('RC_SETTING') or defaults) | |
|
30 | main(args) |
@@ -0,0 +1,22 b'' | |||
|
1 | """ | |
|
2 | echo "%run path/generate_svn_apache_conf.py" | RC_SETTING='{"setting1":"key1"}' rc-ishell .dev/dev.ini | |
|
3 | """ | |
|
4 | ||
|
5 | import os | |
|
6 | import json | |
|
7 | from rhodecode.apps.svn_support.utils import generate_mod_dav_svn_config | |
|
8 | from rhodecode.lib.base import bootstrap_request | |
|
9 | ||
|
10 | defaults = json.dumps({ | |
|
11 | ||
|
12 | }) | |
|
13 | ||
|
14 | ||
|
15 | def main(json_args): | |
|
16 | request = bootstrap_request() | |
|
17 | generate_mod_dav_svn_config(request.registry) | |
|
18 | print('ok') | |
|
19 | ||
|
20 | ||
|
21 | args = json.loads(os.environ.get('RC_SETTING') or defaults) | |
|
22 | main(args) |
@@ -0,0 +1,24 b'' | |||
|
1 | """ | |
|
2 | echo "%run path/set_default_renderer_to_md.py" | RC_SETTING='[["markup_renderer", "markdown", "unicode"]]' rc-ishell .dev/dev.ini | |
|
3 | """ | |
|
4 | ||
|
5 | import os | |
|
6 | import json | |
|
7 | from rhodecode.model.db import Session | |
|
8 | from rhodecode.model.settings import SettingsModel | |
|
9 | ||
|
10 | defaults = json.dumps([ | |
|
11 | ('markup_renderer', 'markdown', 'unicode') | |
|
12 | ]) | |
|
13 | ||
|
14 | ||
|
15 | def main(json_args): | |
|
16 | model = SettingsModel() | |
|
17 | for setting_name, value, type_ in json_args: | |
|
18 | model.create_or_update_setting(setting_name, value, type_) | |
|
19 | Session().commit() | |
|
20 | print('ok') | |
|
21 | ||
|
22 | ||
|
23 | args = json.loads(os.environ.get('RC_SETTING') or defaults) | |
|
24 | main(args) |
@@ -0,0 +1,245 b'' | |||
|
1 | FROM ubuntu:groovy | |
|
2 | MAINTAINER RhodeCode Inc. <support@rhodecode.com> | |
|
3 | ||
|
4 | ARG TZ="UTC" | |
|
5 | ARG LOCALE_TYPE=en_US.UTF-8 | |
|
6 | ARG RHODECODE_TYPE=Enterprise | |
|
7 | ARG RHODECODE_VERSION=4.24.1 | |
|
8 | ARG RHODECODE_DB=sqlite | |
|
9 | ARG RHODECODE_USER_NAME=admin | |
|
10 | ARG RHODECODE_USER_PASS=secret4 | |
|
11 | ARG RHODECODE_USER_EMAIL=support@rhodecode.com | |
|
12 | ||
|
13 | # env are runtime | |
|
14 | ENV \ | |
|
15 | TZ=${TZ} \ | |
|
16 | LOCALE_TYPE=${LOCALE_TYPE} \ | |
|
17 | \ | |
|
18 | ## Define type we build, and the instance we'll create | |
|
19 | RHODECODE_TYPE=${RHODECODE_TYPE} \ | |
|
20 | RC_TYPE_ID=enterprise-1 \ | |
|
21 | \ | |
|
22 | ## SETUP ARGS FOR INSTALLATION ## | |
|
23 | ## set version we build on, get from .env or set default ver | |
|
24 | RHODECODE_VERSION=${RHODECODE_VERSION} \ | |
|
25 | \ | |
|
26 | ## set DB, default sqlite | |
|
27 | RHODECODE_DB=${RHODECODE_DB} \ | |
|
28 | \ | |
|
29 | ## set app bootstrap required data | |
|
30 | RHODECODE_USER_NAME=${RHODECODE_USER_NAME} \ | |
|
31 | RHODECODE_USER_PASS=${RHODECODE_USER_PASS} \ | |
|
32 | RHODECODE_USER_EMAIL=${RHODECODE_USER_EMAIL} \ | |
|
33 | \ | |
|
34 | RC_USER=rhodecode \ | |
|
35 | \ | |
|
36 | # SVN CONFIG | |
|
37 | MOD_DAV_SVN_CONF_FILE=/etc/rhodecode/conf/svn/mod_dav_svn.conf \ | |
|
38 | MOD_DAV_SVN_PORT=8090 \ | |
|
39 | \ | |
|
40 | # SSHD CONFIG | |
|
41 | SSHD_CONF_FILE=/etc/rhodecode/sshd_config \ | |
|
42 | \ | |
|
43 | BUILD_CONF=/etc/rhodecode/conf_build \ | |
|
44 | BUILD_BIN_DIR=/var/opt/rhodecode_bin \ | |
|
45 | RHODECODE_DATA_DIR=/var/opt/rhodecode_data \ | |
|
46 | RHODECODE_REPO_DIR=/var/opt/rhodecode_repo_store \ | |
|
47 | RHODECODE_HTTP_PORT=10020 \ | |
|
48 | RHODECODE_VCS_PORT=10010 \ | |
|
49 | RHODECODE_HOST=0.0.0.0 \ | |
|
50 | RHODECODE_VCS_HOST=127.0.0.1 | |
|
51 | ||
|
52 | ENV \ | |
|
53 | RCCONTROL=/home/$RC_USER/.rccontrol-profile/bin/rccontrol \ | |
|
54 | SUPERVISOR_CONF=/home/$RC_USER/.rccontrol/supervisor/supervisord.ini \ | |
|
55 | # make application scripts visible | |
|
56 | PATH=$PATH:/home/$RC_USER/.rccontrol-profile/bin | |
|
57 | ||
|
58 | ENV SVN_LOCALE_DEPS apache2 apache2-utils libapache2-mod-svn | |
|
59 | ENV SSH_LOCALE_DEPS openssh-server | |
|
60 | ENV PYTHON_DEPS python2 | |
|
61 | ||
|
62 | RUN \ | |
|
63 | echo "** install base packages **" && \ | |
|
64 | set -eux; \ | |
|
65 | \ | |
|
66 | savedAptMark="$(apt-mark showmanual)"; \ | |
|
67 | apt-get update; \ | |
|
68 | DEBIAN_FRONTEND="noninteractive" \ | |
|
69 | apt-get install -y --no-install-recommends \ | |
|
70 | tini \ | |
|
71 | bash \ | |
|
72 | binutils \ | |
|
73 | tzdata \ | |
|
74 | locales \ | |
|
75 | openssl \ | |
|
76 | curl \ | |
|
77 | sudo \ | |
|
78 | gosu \ | |
|
79 | $PYTHON_DEPS \ | |
|
80 | $SSH_LOCALE_DEPS \ | |
|
81 | $SVN_LOCALE_DEPS \ | |
|
82 | ; \ | |
|
83 | rm -rf /var/lib/apt/lists/*; | |
|
84 | ||
|
85 | RUN \ | |
|
86 | echo "** Configure the python executable for py2/3 compat **" && \ | |
|
87 | ISPY=$(which python3 || which python2) && \ | |
|
88 | if [ -n $ISPY ] ; then ln -s $ISPY /usr/bin/python ; fi | |
|
89 | ||
|
90 | RUN \ | |
|
91 | echo "** Configure the locales **" && \ | |
|
92 | sed -i "s/^# ${LOCALE_TYPE}/${LOCALE_TYPE}/g" /etc/locale.gen && \ | |
|
93 | locale-gen | |
|
94 | ||
|
95 | # locale-archive is a fix for old nix glibc2.26 locales available | |
|
96 | ENV \ | |
|
97 | LOCALE_ARCHIVE=/var/opt/locale-archive \ | |
|
98 | LANG=${LOCALE_TYPE} \ | |
|
99 | LANGUAGE=${LOCALE_TYPE} \ | |
|
100 | LC_ALL=${LOCALE_TYPE} | |
|
101 | ||
|
102 | # configure the system user | |
|
103 | # explicitly set uid/gid to guarantee that it won't change in the future | |
|
104 | # the values 999:999 are identical to the current user/group id assigned | |
|
105 | RUN \ | |
|
106 | echo "** Create system user $RC_USER **" && \ | |
|
107 | groupadd --system --gid 999 $RC_USER && \ | |
|
108 | useradd --system --gid $RC_USER --uid 999 --shell /bin/bash $RC_USER | |
|
109 | ||
|
110 | # set the defult bash shell | |
|
111 | SHELL ["/bin/bash", "-c"] | |
|
112 | ||
|
113 | # Fix and set a timezone | |
|
114 | RUN \ | |
|
115 | echo "** configure the timezone **" && \ | |
|
116 | rm /etc/localtime && cp /usr/share/zoneinfo/$TZ /etc/localtime && \ | |
|
117 | echo $TZ > /etc/timezone | |
|
118 | ||
|
119 | ||
|
120 | RUN \ | |
|
121 | echo "** prepare rhodecode store and cache **" && \ | |
|
122 | install -d -m 0755 -o $RC_USER -g $RC_USER /opt/rhodecode && \ | |
|
123 | install -d -m 0755 -o $RC_USER -g $RC_USER /var/opt/rhodecode_bin && \ | |
|
124 | install -d -m 0755 -o $RC_USER -g $RC_USER $RHODECODE_REPO_DIR && \ | |
|
125 | install -d -m 0755 -o $RC_USER -g $RC_USER $RHODECODE_DATA_DIR && \ | |
|
126 | install -d -m 0755 -o $RC_USER -g $RC_USER $BUILD_CONF && \ | |
|
127 | install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/ && \ | |
|
128 | install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/.rccontrol && \ | |
|
129 | install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/.rccontrol/cache && \ | |
|
130 | install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/.rccontrol/bootstrap && \ | |
|
131 | install -d -m 0700 -o $RC_USER -g $RC_USER /home/$RC_USER/.ssh && \ | |
|
132 | install -d -m 0700 -o $RC_USER -g $RC_USER /home/$RC_USER/.rhoderc | |
|
133 | ||
|
134 | # expose our custom sshd config | |
|
135 | COPY service/sshd/sshd_config $SSHD_CONF_FILE | |
|
136 | ||
|
137 | # Apache SVN setup | |
|
138 | RUN \ | |
|
139 | echo "**** Apache config cleanup ****" && \ | |
|
140 | rm -f /etc/apache2/conf.d/info.conf \ | |
|
141 | /etc/apache2/conf.d/mpm.conf \ | |
|
142 | /etc/apache2/conf.d/userdir.conf && \ | |
|
143 | rm -f /etc/apache2/sites-enabled/* && \ | |
|
144 | rm -f /etc/apache2/sites-available/* | |
|
145 | ||
|
146 | # custom SVN virtualhost | |
|
147 | COPY service/svn/virtualhost.conf /etc/apache2/sites-enabled/ | |
|
148 | ||
|
149 | RUN \ | |
|
150 | echo "**** Apache config ****" && \ | |
|
151 | echo $(strings /usr/lib/apache2/modules/mod_dav_svn.so | grep 'Powered by') > /var/opt/dav.version && \ | |
|
152 | mkdir -p /run/apache2 && \ | |
|
153 | mkdir -p /var/opt/www && \ | |
|
154 | echo "unset HOME" > /etc/apache2/envvars && \ | |
|
155 | echo "export APACHE_RUN_USER=${RC_USER}" >> /etc/apache2/envvars && \ | |
|
156 | echo "export APACHE_PID_FILE=/var/run/apache2/apache2.pid" >> /etc/apache2/envvars && \ | |
|
157 | echo "export APACHE_RUN_DIR=/var/run/apache2" >> /etc/apache2/envvars && \ | |
|
158 | echo "export APACHE_LOCK_DIR=/var/lock/apache2" >> /etc/apache2/envvars && \ | |
|
159 | echo "export APACHE_RUN_USER=${RC_USER}" >> /etc/apache2/envvars && \ | |
|
160 | echo "export APACHE_RUN_GROUP=${RC_USER}" >> /etc/apache2/envvars && \ | |
|
161 | sed -i "s/Listen 80/Listen ${MOD_DAV_SVN_PORT}/g" /etc/apache2/ports.conf | |
|
162 | ||
|
163 | ||
|
164 | # Copy artifacts | |
|
165 | COPY --chown=$RC_USER:$RC_USER .cache/* /home/$RC_USER/.rccontrol/cache/ | |
|
166 | COPY --chown=$RC_USER:$RC_USER service/rhodecode/bootstrap/* /home/$RC_USER/.rccontrol/bootstrap/ | |
|
167 | COPY --chown=$RC_USER:$RC_USER config/compose/rhodecode_enterprise.license /home/$RC_USER/.rccontrol/bootstrap/ | |
|
168 | ||
|
169 | RUN \ | |
|
170 | echo "**** locale-archive path ****" && \ | |
|
171 | mv -v /home/$RC_USER/.rccontrol/cache/locale-archive /var/opt/locale-archive | |
|
172 | ||
|
173 | # change to non-root user for RUN commands | |
|
174 | USER $RC_USER | |
|
175 | WORKDIR /home/$RC_USER | |
|
176 | ||
|
177 | RUN \ | |
|
178 | echo "** install rhodecode control **" && \ | |
|
179 | cd /home/$RC_USER/.rccontrol/cache && \ | |
|
180 | INSTALLER=$(ls -Art /home/$RC_USER/.rccontrol/cache/RhodeCode-installer-* | tail -n 1) && \ | |
|
181 | chmod +x ${INSTALLER} && \ | |
|
182 | ${INSTALLER} --accept-license && \ | |
|
183 | ${RCCONTROL} self-init && \ | |
|
184 | cp -v /home/$RC_USER/.rccontrol-profile/etc/ca-bundle.crt $BUILD_CONF/ | |
|
185 | ||
|
186 | RUN \ | |
|
187 | echo "** install vcsserver ${RHODECODE_VERSION} **" && \ | |
|
188 | ${RCCONTROL} install VCSServer --version ${RHODECODE_VERSION} --start-at-boot=yes --accept-license --offline \ | |
|
189 | '{"host":"'"$RHODECODE_VCS_HOST"'", "port":"'"$RHODECODE_VCS_PORT"'"}' && \ | |
|
190 | VCSSERVER_PATH=/home/$RC_USER/.rccontrol/vcsserver-1 && \ | |
|
191 | cp -v ${VCSSERVER_PATH}/vcsserver.ini $BUILD_CONF/ | |
|
192 | ||
|
193 | RUN \ | |
|
194 | echo "** install rhodecode ${RHODECODE_TYPE} ${RHODECODE_VERSION} **" && \ | |
|
195 | RHODECODE_DB_INIT=sqlite && \ | |
|
196 | ${RCCONTROL} install ${RHODECODE_TYPE} --version ${RHODECODE_VERSION} --start-at-boot=yes --accept-license --offline \ | |
|
197 | '{"host":"'"$RHODECODE_HOST"'", "port":"'"$RHODECODE_HTTP_PORT"'", "username":"'"$RHODECODE_USER_NAME"'", "password":"'"$RHODECODE_USER_PASS"'", "email":"'"$RHODECODE_USER_EMAIL"'", "repo_dir":"'"$RHODECODE_REPO_DIR"'", "database": "'"$RHODECODE_DB_INIT"'", "skip_existing_db": "1"}' && \ | |
|
198 | RHODECODE_PATH=/home/$RC_USER/.rccontrol/${RC_TYPE_ID} && \ | |
|
199 | cp -v ${RHODECODE_PATH}/rhodecode.ini $BUILD_CONF/ && \ | |
|
200 | cp -v ${RHODECODE_PATH}/search_mapping.ini $BUILD_CONF/ && \ | |
|
201 | cp -v ${RHODECODE_PATH}/gunicorn_conf.py $BUILD_CONF/ && \ | |
|
202 | rm -rf $BUILD_BIN_DIR/bin && ln -s ${RHODECODE_PATH}/profile/bin $BUILD_BIN_DIR && \ | |
|
203 | mkdir -p $RHODECODE_DATA_DIR/static && cp -r ${RHODECODE_PATH}/public/* $RHODECODE_DATA_DIR/static/ && \ | |
|
204 | rm ${RHODECODE_PATH}/rhodecode.db | |
|
205 | ||
|
206 | ||
|
207 | RUN \ | |
|
208 | echo "** configure supervisord **" && \ | |
|
209 | cp -v ${SUPERVISOR_CONF} $BUILD_CONF/ && \ | |
|
210 | sed -i "s/self_managed_supervisor = False/self_managed_supervisor = True/g" /home/$RC_USER/.rccontrol.ini | |
|
211 | ||
|
212 | USER root | |
|
213 | ||
|
214 | ||
|
215 | RUN \ | |
|
216 | echo "**** cleanup ****" && \ | |
|
217 | apt-get remove -y $PYTHON_DEPS && \ | |
|
218 | apt-get autoclean -y && \ | |
|
219 | rm -f /tmp/* && \ | |
|
220 | rm -f /home/$RC_USER/.rccontrol/cache/RhodeCode-installer-* && \ | |
|
221 | rm -f /home/$RC_USER/.rccontrol/cache/*.bz2 && \ | |
|
222 | rm -rf /var/lib/apt/lists/* \ | |
|
223 | rm -rf /var/cache/apk/* \ | |
|
224 | rm ${SUPERVISOR_CONF} | |
|
225 | ||
|
226 | # copy entrypoints | |
|
227 | COPY entrypoints.d/entrypoint.sh /opt/entrypoints.d/entrypoint.sh | |
|
228 | RUN chmod +x /opt/entrypoints.d/entrypoint.sh | |
|
229 | ||
|
230 | # config volume | |
|
231 | VOLUME /etc/rhodecode/conf | |
|
232 | ||
|
233 | # repo store volume | |
|
234 | VOLUME /var/opt/rhodecode_repo_store | |
|
235 | ||
|
236 | # data volume | |
|
237 | VOLUME /var/opt/rhodecode_data | |
|
238 | ||
|
239 | # logs volume | |
|
240 | VOLUME /var/log/rhodecode | |
|
241 | ||
|
242 | ENTRYPOINT ["/opt/entrypoints.d/entrypoint.sh"] | |
|
243 | ||
|
244 | # compose can override this | |
|
245 | CMD ["supervisord", "--nodaemon", "-c", "/etc/rhodecode/conf/supervisord.ini"] |
@@ -0,0 +1,76 b'' | |||
|
1 | FROM debian:buster | |
|
2 | MAINTAINER RhodeCode Inc. <support@rhodecode.com> | |
|
3 | ||
|
4 | # env are runtime/build | |
|
5 | ENV \ | |
|
6 | TZ="UTC" \ | |
|
7 | RC_USER=rhodecode \ | |
|
8 | RHODECODE_USER_NAME=rhodecode \ | |
|
9 | SSHD_CONF_FILE=/etc/rhodecode/sshd_config | |
|
10 | ||
|
11 | RUN \ | |
|
12 | echo "** install base packages **" && \ | |
|
13 | set -eux; \ | |
|
14 | \ | |
|
15 | savedAptMark="$(apt-mark showmanual)"; \ | |
|
16 | apt-get update; \ | |
|
17 | apt-get install -y --no-install-recommends \ | |
|
18 | bash \ | |
|
19 | tzdata \ | |
|
20 | vim \ | |
|
21 | openssl \ | |
|
22 | openssh-server \ | |
|
23 | ; \ | |
|
24 | rm -rf /var/lib/apt/lists/*; | |
|
25 | ||
|
26 | # # reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies | |
|
27 | # apt-mark auto '.*' > /dev/null; \ | |
|
28 | # [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; \ | |
|
29 | # apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false | |
|
30 | ||
|
31 | # configure the system user | |
|
32 | # explicitly set uid/gid to guarantee that it won't change in the future | |
|
33 | # the values 999:999 are identical to the current user/group id assigned | |
|
34 | RUN \ | |
|
35 | echo "** Create system user $RC_USER **" && \ | |
|
36 | groupadd --system --gid 999 $RC_USER && \ | |
|
37 | useradd --system --gid $RC_USER --uid 999 --shell /bin/bash $RC_USER | |
|
38 | ||
|
39 | ||
|
40 | RUN \ | |
|
41 | echo "** prepare rhodecode store and cache **" && \ | |
|
42 | install -d -m 0700 -o $RC_USER -g $RC_USER /home/$RC_USER/.ssh | |
|
43 | ||
|
44 | # set the defult bash shell | |
|
45 | SHELL ["/bin/bash", "-c"] | |
|
46 | ||
|
47 | # Fix and set a timezone | |
|
48 | RUN \ | |
|
49 | echo "** configure the timezone **" && \ | |
|
50 | echo $TZ > /etc/timezone | |
|
51 | ||
|
52 | # expose our custom sshd config | |
|
53 | COPY service/sshd/sshd_config $SSHD_CONF_FILE | |
|
54 | ||
|
55 | USER root | |
|
56 | ||
|
57 | RUN \ | |
|
58 | echo "**** cleanup ****" && \ | |
|
59 | rm -f /tmp/* && \ | |
|
60 | rm -rf /var/lib/apt/lists/* \ | |
|
61 | rm -rf /var/cache/apk/* | |
|
62 | ||
|
63 | # copy entrypoints | |
|
64 | COPY entrypoints.d/ssh-entrypoint.sh /opt/entrypoints.d/ssh-entrypoint.sh | |
|
65 | RUN chmod +x /opt/entrypoints.d/ssh-entrypoint.sh | |
|
66 | ||
|
67 | # config volume | |
|
68 | VOLUME /etc/rhodecode/conf | |
|
69 | ||
|
70 | # logs volume | |
|
71 | VOLUME /var/log/rhodecode | |
|
72 | ||
|
73 | ENTRYPOINT ["/opt/entrypoints.d/ssh-entrypoint.sh"] | |
|
74 | ||
|
75 | # compose can override this | |
|
76 | CMD ["/usr/sbin/sshd", "-f", "/etc/rhodecode/sshd_config", "-D", "-e"] |
@@ -0,0 +1,22 b'' | |||
|
1 | Port 22 | |
|
2 | ChallengeResponseAuthentication no | |
|
3 | ||
|
4 | HostKey /etc/rhodecode/conf/ssh/ssh_host_rsa_key | |
|
5 | HostKey /etc/rhodecode/conf/ssh/ssh_host_ecdsa_key | |
|
6 | HostKey /etc/rhodecode/conf/ssh/ssh_host_ed25519_key | |
|
7 | ||
|
8 | Protocol 2 | |
|
9 | PermitRootLogin no | |
|
10 | PasswordAuthentication no | |
|
11 | MaxStartups 100:30:200 | |
|
12 | AllowUsers rhodecode | |
|
13 | PrintMotd no | |
|
14 | PubkeyAuthentication yes | |
|
15 | AuthorizedKeysFile /etc/rhodecode/conf/ssh/authorized_keys /etc/rhodecode/conf/ssh/authorized_keys_rhodecode | |
|
16 | ||
|
17 | # Fixes: User USERNAME not allowed because account is locked | |
|
18 | # With "UsePAM yes" even if account gets locked because of password the key still works | |
|
19 | UsePAM yes | |
|
20 | ||
|
21 | # Disabling use DNS in ssh since it tends to slow connecting | |
|
22 | UseDNS no |
@@ -0,0 +1,70 b'' | |||
|
1 | FROM debian:buster | |
|
2 | MAINTAINER RhodeCode Inc. <support@rhodecode.com> | |
|
3 | ||
|
4 | ENV \ | |
|
5 | RC_USER=rhodecode \ | |
|
6 | MOD_DAV_SVN_CONF_FILE=/etc/rhodecode/conf/svn/mod_dav_svn.conf | |
|
7 | ||
|
8 | RUN \ | |
|
9 | echo "** install base svn packages **" && \ | |
|
10 | apk update && \ | |
|
11 | apk add --no-cache \ | |
|
12 | tini \ | |
|
13 | bash \ | |
|
14 | curl \ | |
|
15 | apache2 \ | |
|
16 | apache2-utils \ | |
|
17 | apache2-webdav \ | |
|
18 | mod_dav_svn \ | |
|
19 | subversion | |
|
20 | ||
|
21 | # configure the system user | |
|
22 | # explicitly set uid/gid to guarantee that it won't change in the future | |
|
23 | # the values 999:999 are identical to the current user/group id assigned | |
|
24 | RUN \ | |
|
25 | echo "** Create system user $RC_USER **" && \ | |
|
26 | groupadd --system --gid 999 $RC_USER && \ | |
|
27 | useradd --system --gid $RC_USER --uid 999 --shell /bin/bash $RC_USER | |
|
28 | ||
|
29 | ||
|
30 | RUN \ | |
|
31 | echo "**** cleanup ****" && \ | |
|
32 | apk del tzdata python2 && \ | |
|
33 | rm -f /tmp/* && \ | |
|
34 | rm -rf /var/lib/apt/lists/* \ | |
|
35 | rm -rf /var/cache/apk/* | |
|
36 | ||
|
37 | RUN \ | |
|
38 | echo "**** Apache config cleanup ****" && \ | |
|
39 | rm -f /etc/apache2/conf.d/info.conf \ | |
|
40 | /etc/apache2/conf.d/mpm.conf \ | |
|
41 | /etc/apache2/conf.d/userdir.conf | |
|
42 | ||
|
43 | ||
|
44 | COPY svn/virtualhost.conf /etc/apache2/conf.d/ | |
|
45 | ||
|
46 | # copy entrypoints | |
|
47 | COPY entrypoints.d/svn-entrypoint.sh /opt/entrypoints.d/svn-entrypoint.sh | |
|
48 | RUN chmod +x /opt/entrypoints.d/svn-entrypoint.sh | |
|
49 | ||
|
50 | RUN \ | |
|
51 | echo $(strings /usr/lib/apache2/mod_dav_svn.so | grep 'Powered by') > /var/opt/dav.version && \ | |
|
52 | mkdir -p /run/apache2 && \ | |
|
53 | mkdir -p /var/opt/www && \ | |
|
54 | echo "export APACHE_RUN_USER=${RC_USER}" > /etc/apache2/envvars && \ | |
|
55 | echo "export APACHE_RUN_GROUP=${RC_USER}" >> /etc/apache2/envvars && \ | |
|
56 | sed -i "s/User apache/User ${RC_USER}/g" /etc/apache2/httpd.conf && \ | |
|
57 | sed -i "s/Group apache/Group ${RC_USER}/g" /etc/apache2/httpd.conf | |
|
58 | ||
|
59 | # repo store volume | |
|
60 | VOLUME /var/opt/rhodecode_repo_store | |
|
61 | ||
|
62 | # config volume | |
|
63 | VOLUME /etc/rhodecode/conf | |
|
64 | ||
|
65 | # logs volume | |
|
66 | VOLUME /var/log/rhodecode | |
|
67 | ||
|
68 | ENTRYPOINT ["/opt/entrypoints.d/svn-entrypoint.sh"] | |
|
69 | ||
|
70 | CMD ["apachectl", "-D", "FOREGROUND"] |
@@ -0,0 +1,22 b'' | |||
|
1 | LoadModule headers_module /usr/lib/apache2/modules/mod_headers.so | |
|
2 | LoadModule authn_anon_module /usr/lib/apache2/modules/mod_authn_anon.so | |
|
3 | LoadModule dav_svn_module /usr/lib/apache2/modules/mod_dav_svn.so | |
|
4 | ||
|
5 | <VirtualHost *:${MOD_DAV_SVN_PORT}> | |
|
6 | ServerAdmin admin@localhost | |
|
7 | DocumentRoot /var/opt/www | |
|
8 | ErrorLog ${APACHE_LOG_DIR}/svn_error.log | |
|
9 | CustomLog ${APACHE_LOG_DIR}/svn_access.log combined | |
|
10 | LogLevel info | |
|
11 | ||
|
12 | <Location /_server_status> | |
|
13 | Require ip 127 | |
|
14 | SetHandler server-status | |
|
15 | Require all granted | |
|
16 | </Location> | |
|
17 | ||
|
18 | # allows custom host names, prevents 400 errors on checkout | |
|
19 | HttpProtocolOptions Unsafe | |
|
20 | ||
|
21 | Include ${MOD_DAV_SVN_CONF_FILE} | |
|
22 | </VirtualHost> No newline at end of file |
@@ -1,28 +1,64 b'' | |||
|
1 |
# RhodeCode |
|
|
1 | # RhodeCode Cluster | |
|
2 | ||
|
3 | RhodeCode Cluster is a multi-node highly-scalable setup to run RhodeCode and | |
|
4 | all its additional components in single environment using Docker. | |
|
5 | ||
|
6 | Using a docker-compose this setup creates following services for RhodeCode: | |
|
7 | ||
|
8 | - Nginx HTTP Server for load balancing and reverse proxy | |
|
9 | - RhodeCode HTTP | |
|
10 | - VCSServer for GIT/SVN/HG support | |
|
11 | - SSH Server for cloning over SSH | |
|
12 | - SVN webserver for HTTP support over SVN | |
|
13 | - Celery workers for asynchronous tasks | |
|
14 | - Celery beat for automation tasks | |
|
15 | - Redis Database for caching | |
|
16 | - Postgres database for persistent storage | |
|
17 | - Channelstream websocket server for live components | |
|
18 | ||
|
19 | ||
|
20 | ## Pre requisites | |
|
21 | ||
|
22 | Visit docker site and install docker (min version 20.10) and docker compose: | |
|
23 | ||
|
24 | - https://docs.docker.com/engine/install/ | |
|
25 | - https://docs.docker.com/compose/install/ | |
|
26 | ||
|
27 | ||
|
28 | # Installation steps | |
|
29 | Follow these steps to build and run the RhodeCode Cluster via Docker-compose. | |
|
2 | 30 | |
|
3 | 31 | ## Download installer binaries |
|
4 | 32 | |
|
5 | 33 | First start by fetching required installer binaries. This is required to create both |
|
6 | 34 | simple build and full compose setup. |
|
7 | Download needed installer files, version can be adjusted in the download script | |
|
8 | Currently this is version 4.23.2, version can be adjusted in `.env` file | |
|
35 | Please check the `.env` file to adjust the version if needed. | |
|
9 | 36 | |
|
10 | 37 | `cd .boostrap/; ./download-artifacts.sh; cd ../` |
|
11 | 38 | |
|
12 | ## docker compose: | |
|
39 | This will download required files and put them into the `.cache` directory. | |
|
40 | This directory should look similar to that after downloads have finish: | |
|
41 | ||
|
42 | ```shell | |
|
43 | drwxr-xr-x 8 rcdev rcdev 256B Feb 8 13:35 . | |
|
44 | drwxr-xr-x 14 rcdev rcdev 448B Feb 8 10:40 .. | |
|
45 | -rw-r--r-- 1 rcdev rcdev 0B Feb 8 20:44 .dirkeep | |
|
46 | -rwxr-xr-x 1 rcdev rcdev 241M Feb 8 13:35 RhodeCode-installer-linux-build20210208_0800 | |
|
47 | -rw-r--r-- 1 rcdev rcdev 156M Feb 8 13:35 RhodeCodeCommunity-4.24.1+x86_64-linux_build20210208_0800.tar.bz2 | |
|
48 | -rw-r--r-- 1 rcdev rcdev 171M Feb 8 13:35 RhodeCodeEnterprise-4.24.1+x86_64-linux_build20210208_0800.tar.bz2 | |
|
49 | -rw-r--r-- 1 rcdev rcdev 145M Feb 8 13:35 RhodeCodeVCSServer-4.24.1+x86_64-linux_build20210208_0800.tar.bz2 | |
|
50 | -rw-r--r-- 1 rcdev rcdev 109M Feb 8 13:35 locale-archive | |
|
51 | ``` | |
|
13 | 52 | |
|
14 | There's a more advanced high-performance setup using docker-compose. | |
|
15 | It bootstraps additional services for RhodeCode: | |
|
53 | ## Set License for EE version | |
|
16 | 54 | |
|
17 | - RhodeCode | |
|
18 | - VCSServer | |
|
19 | - SSH Server | |
|
20 | - Redis Database | |
|
21 | - Postgres database | |
|
22 | - Channelstream websocket server | |
|
23 | - Celery workers, and automation scheduler | |
|
24 | - SVN webserver for HTTP support | |
|
25 | - Nginx HTTP Server | |
|
55 | This setup would use a provided license from a file | |
|
56 | `config/compose/rhodecode_enterprise.license` If you have a full license, or a trial one | |
|
57 | please save the license data inside this file, so it will be applied at creation. | |
|
58 | This file can also be empty and license can be applied via a WEB interface. | |
|
59 | ||
|
60 | ||
|
61 | ## Run Docker compose build: | |
|
26 | 62 | |
|
27 | 63 | To create a full stack we need to run the database container, so it's ready to |
|
28 | 64 | build the docker image. |
@@ -37,7 +73,8 b' build the docker image.' | |||
|
37 | 73 | We can now run the full installation. Database needs to be running for the next build command. |
|
38 | 74 | |
|
39 | 75 | ```shell |
|
40 |
docker-compose build rhodecode |
|
|
76 | docker-compose build rhodecode | |
|
77 | docker-compose build | |
|
41 | 78 | ``` |
|
42 | 79 | |
|
43 | 80 | Once we build the rhodecode app, we can run the whole stack using `docker-compose up` |
@@ -56,6 +93,39 b' In case for bigger setups docker-compose can scale more rhodecode/vcsserver work' | |||
|
56 | 93 | docker-compose up --scale vcsserver=3 rhodecode=3 |
|
57 | 94 | ``` |
|
58 | 95 | |
|
96 | ## Data structure | |
|
97 | ||
|
98 | There are 4 volumes defined: | |
|
99 | ||
|
100 | - `/var/log/rhodecode` # all logs from RhodeCode are saved in this volume | |
|
101 | - `/etc/rhodecode/conf` # storing configuration files for rhodecode, vcsserver and supervisord, and some cache data | |
|
102 | - `/var/opt/rhodecode_repo_store` # main repository storage where repositories would be stored | |
|
103 | - `/var/opt/rhodecode_data` # data dir for rhodecode cache/lock files, or user sessions (for file backend) | |
|
104 | ||
|
105 | ||
|
106 | ||
|
107 | ||
|
108 | Upgrade: | |
|
109 | ||
|
110 | - pull the latest repo | |
|
111 | - check .env file for correct update version | |
|
112 | - re-build rhodecode | |
|
113 | - docker-compose build rhodecode | |
|
114 | - docker-compose stop | |
|
115 | - docker-compose up | |
|
116 | ||
|
117 | ||
|
118 | ||
|
119 | ||
|
120 | ||
|
121 | ||
|
122 | ||
|
123 | ||
|
124 | ||
|
125 | ||
|
126 | ||
|
127 | ||
|
128 | ||
|
59 | 129 | |
|
60 | 130 | |
|
61 | 131 | Logging is pushed to stdout from all services. |
@@ -68,12 +138,12 b' This is a fully running instance good for small use with 3-5 users.' | |||
|
68 | 138 | |
|
69 | 139 | ```shell |
|
70 | 140 | docker build -t rhodecode:4.23.2 -f rhodecode.dockerfile \ |
|
71 |
- |
|
|
72 |
- |
|
|
73 |
- |
|
|
74 |
- |
|
|
75 |
- |
|
|
76 |
- |
|
|
141 | -e RHODECODE_TYPE=Community \ | |
|
142 | -e RHODECODE_VERSION=4.23.2 \ | |
|
143 | -e RHODECODE_DB=sqlite \ | |
|
144 | -e RHODECODE_USER_NAME=admin \ | |
|
145 | -e RHODECODE_USER_PASS=secret4 \ | |
|
146 | -e RHODECODE_USER_EMAIL=support@rhodecode.com \ | |
|
77 | 147 | . |
|
78 | 148 | ``` |
|
79 | 149 | |
@@ -83,13 +153,6 b' To Build against existing running Postgres or MySQL you can specify:' | |||
|
83 | 153 | --build-arg RHODECODE_DB=postgresql://postgres:secret@database/rhodecode |
|
84 | 154 | --build-arg RHODECODE_DB=mysql://root:secret@localhost/rhodecode?charset=utf8 |
|
85 | 155 | |
|
86 | There are 4 volumes defined: | |
|
87 | ||
|
88 | - `/var/log/rhodecode` # all logs from RhodeCode are saved in this volume | |
|
89 | - `/etc/rhodecode/conf` # storing configuration files for rhodecode, vcsserver and supervisord, and some cache data | |
|
90 | - `/var/opt/rhodecode_repo_store` # main repository storage where repositories would be stored | |
|
91 | - `/var/opt/rhodecode_data` # data dir for rhodecode cache/lock files, or user sessions (for file backend) | |
|
92 | ||
|
93 | 156 | |
|
94 | 157 | To copy over the data into volumes use such command: |
|
95 | 158 | ```shell |
General Comments 0
You need to be logged in to leave comments.
Login now