##// END OF EJS Templates
release of new installer/docker stack updates...
super-admin -
Show More
@@ -0,0 +1,4 b''
1 .git/*
2
3 #.source/*
4 #.cache/* No newline at end of file
@@ -0,0 +1,19 b''
1 [security]
2 admin_user=admin
3 admin_password=admin
4
5 [users]
6 allow_sign_up=false
7
8 [smtp]
9 enabled=true
10 host=
11 user=
12 password=
13 from_address=alerting-grafana@grafana.localhost
14 from_name=[Grafana] Alerting
15
16 # subpath /_grafana
17 [server]
18 serve_from_sub_path=true
19 root_url=%(protocol)s://%(domain)s:%(http_port)s/_grafana/
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
@@ -0,0 +1,3 b''
1 Store your certificate files here.
2 your-domain.crt
3 your-domain.key No newline at end of file
@@ -0,0 +1,42 b''
1 ## Static configuration, needs traefik reload
2
3 entryPoints:
4 http:
5 address: ":80"
6
7 https:
8 address: ":443"
9
10 # ssh:
11 # address: ":${RC_SSH_PORT:?must-specify-ssh-port}"
12
13 traefik:
14 address: ":7000"
15
16 api:
17 dashboard: false
18 insecure: false # change to true to expose dashboard
19
20 accessLog: {}
21
22 log:
23 # DEBUG / INFO
24 level: INFO
25
26 metrics:
27 prometheus: {}
28
29
30 providers:
31 docker:
32 exposedByDefault: false
33 swarmMode: false
34
35 file:
36 filename: "/etc/traefik/traefik_dynamic.yaml"
37 watch: true
38
39
40 serversTransport:
41 #this disables SSL certificate verification for legacy own cert services like NextCloud etc
42 insecureSkipVerify: true
@@ -0,0 +1,24 b''
1
2 ## Setting up the middleware for redirect to https ##
3 http:
4
5 middlewares:
6
7 https-redirect:
8 redirectScheme:
9 scheme: https
10 # permanent: true
11
12 middlewares-rate-limit:
13 rateLimit:
14 average: 100
15 burst: 50
16
17 # Dynamic configuration for standard file based SSL cert
18 # create a custom copy of the traefik into .custom/ and remount this
19 # to enable custom certificates
20 #tls:
21 # certificates:
22 # # first certificate
23 # - certFile: /etc/traefik/certs/rhodecode-com.cert
24 # keyFile: /etc/traefik/certs/rhodecode-com.key
@@ -0,0 +1,312 b''
1 version: '3.9'
2
3 x-logging: &custom-logging
4 # docker plugin install grafana/loki-docker-driver:latest --alias loki --grant-all-permissions
5 # NOTE: loki logging driver ONLY works for host type networks...
6 driver: loki
7 options:
8 loki-url: "http://127.0.0.1:3100/loki/api/v1/push"
9 #loki-url: "http://loki:3100/loki/api/v1/push"
10 loki-retries: "5"
11 loki-timeout: "1s"
12 loki-max-backoff: "800ms"
13
14 volumes:
15
16 # volume for RhodeCode repo-store, it's where the repositories will be stored
17 rhodecode_repos:
18 labels:
19 "keep": 1
20
21
22 services:
23
24 rhodecode:
25 networks:
26 - rhodecode_network
27 image: rhodecode/rhodecode-${RC_EDITION}:${RC_VERSION:?specify-RC_VERSION-env-var}
28 env_file:
29 - .custom/.runtime.env
30 stdin_open: true
31 tty: true
32 restart: always
33 command: [
34 "/usr/local/bin/rhodecode_bin/bin/gunicorn",
35 "--name=gunicorn-rhodecode-1",
36 "--error-logfile=-",
37 "--paster=/etc/rhodecode/conf/rhodecode.optimized.ini",
38 "--config=/etc/rhodecode/conf/gunicorn_conf.py"
39 ]
40
41 # ports:
42 # - "127.0.0.1::10020"
43
44 build:
45 context: .
46 dockerfile: service/rhodecode/rhodecode.dockerfile
47 args:
48 TZ: ${TZ}
49 RHODECODE_VERSION: ${RC_VERSION:?specify-RC_VERSION-env-var}
50 RHODECODE_DB: postgresql://rhodecode:${DB_PASSWORD:?must-specify-db-password}@database/${DB_NAME:?must-specify-db-name}
51 RHODECODE_USER_NAME: ${RHODECODE_USER_NAME}
52 RHODECODE_USER_PASS: ${RHODECODE_USER_PASS}
53 RHODECODE_USER_EMAIL: ${RHODECODE_USER_EMAIL}
54
55 environment:
56 RC_APP_TYPE: rhodecode_http
57 RC_APP_PROC: 1
58 SSL_CERT_FILE: "/etc/rhodecode/conf/ca-bundle.crt"
59 REQUESTS_CA_BUNDLE: "/etc/rhodecode/conf/ca-bundle.crt"
60 GIT_SSL_CAINFO: "/etc/rhodecode/conf/ca-bundle.crt"
61 GEVENT_RESOLVER: "ares"
62
63 DB_UPGRADE: 1 # run the DB upgrade
64 SETUP_APP: 1 # run the application default settings setup, can be turned off after initial run
65 #FORCE_DB_INIT_FILE: 1 # force the database init, warning: destroys old DB
66 #FORCE_RC_SETUP_APP: 1 # force running setup scripts for configuration/license application
67 MAIN_INI_PATH: /etc/rhodecode/conf/rhodecode.optimized.ini
68
69 # SVN Specific
70 MOD_DAV_SVN_PORT: 8090
71 APACHE_LOG_DIR: /var/log/rhodecode/svn
72 MOD_DAV_SVN_CONF_FILE: /etc/rhodecode/conf/svn/mod_dav_svn.conf
73
74 healthcheck:
75 test: [ "CMD", "curl", "-A", "RhodeCode-Healthcheck", "-s", "-o", "/dev/null", "-w", "'%{http_code}'", "http://127.0.0.1:10020/_admin/ops/ping" ]
76 timeout: 30s
77 interval: 60s
78 retries: 10
79
80 # depends_on:
81 # - database
82 # - redis
83 # - channelstream
84
85 volumes:
86 - confvolume:/etc/rhodecode/conf
87 - logvolume:/var/log/rhodecode
88 - rhodecode_repos:/var/opt/rhodecode_repo_store
89 - rc_datavolume:/var/opt/rhodecode_data
90
91 tmpfs:
92 - /data_ramdisk:size=${RC_DATA_RAMDISK_SIZE:?specify-RC_DATA_RAMDISK_SIZE-env-var}
93
94 logging:
95 *custom-logging
96
97 labels:
98 - "traefik.enable=false"
99
100 vcsserver:
101 networks:
102 - rhodecode_network
103 image: rhodecode/rhodecode-${RC_EDITION}:${RC_VERSION:?specify-RC_VERSION-env-var}
104 env_file:
105 - .custom/.runtime.env
106 stdin_open: true
107 tty: true
108 restart: always
109 command: [
110 "/home/rhodecode/.rccontrol/vcsserver-1/profile/bin/gunicorn",
111 "--name=gunicorn-vcsserver-1",
112 "--error-logfile=-",
113 "--paster=/etc/rhodecode/conf/vcsserver.optimized.ini",
114 "--config=/etc/rhodecode/conf/gunicorn_conf_vcs.py"
115 ]
116
117 # ports:
118 # - "127.0.0.1::10010"
119
120 healthcheck:
121 test: [ "CMD", "curl", "-A", "RhodeCode-Healthcheck", "-s", "-o", "/dev/null", "-w", "'%{http_code}'", "http://127.0.0.1:10010/status" ]
122 timeout: 30s
123 interval: 60s
124 retries: 10
125
126 environment:
127 RC_APP_TYPE: rhodecode_vcsserver
128 RC_APP_PROC: 1
129 MAIN_INI_PATH: /etc/rhodecode/conf/vcsserver.optimized.ini
130 SSL_CERT_FILE: "/etc/rhodecode/conf/ca-bundle.crt"
131 REQUESTS_CA_BUNDLE: "/etc/rhodecode/conf/ca-bundle.crt"
132 GIT_SSL_CAINFO: "/etc/rhodecode/conf/ca-bundle.crt"
133
134 # depends_on:
135 # - redis
136
137 volumes:
138 - confvolume:/etc/rhodecode/conf
139 - logvolume:/var/log/rhodecode
140 - rhodecode_repos:/var/opt/rhodecode_repo_store
141 - rc_datavolume:/var/opt/rhodecode_data
142
143 logging:
144 *custom-logging
145
146 celery:
147 networks:
148 - rhodecode_network
149 image: rhodecode/rhodecode-${RC_EDITION}:${RC_VERSION:?specify-RC_VERSION-env-var}
150 env_file:
151 - .custom/.runtime.env
152 stdin_open: true
153 tty: true
154 restart: always
155 command: [
156 "/usr/local/bin/rhodecode_bin/bin/celery",
157 "worker",
158 "--task-events",
159 "--autoscale=10,2",
160 "--no-color",
161 "--app=rhodecode.lib.celerylib.loader",
162 "--loglevel=DEBUG",
163 "--ini=/etc/rhodecode/conf/rhodecode.optimized.ini"
164 ]
165
166 environment:
167 RC_APP_TYPE: rhodecode_celery
168 RC_APP_PROC: 1
169 MAIN_INI_PATH: /etc/rhodecode/conf/rhodecode.optimized.ini
170 SSL_CERT_FILE: "/etc/rhodecode/conf/ca-bundle.crt"
171 REQUESTS_CA_BUNDLE: "/etc/rhodecode/conf/ca-bundle.crt"
172 GIT_SSL_CAINFO: "/etc/rhodecode/conf/ca-bundle.crt"
173
174 # depends_on:
175 # - database
176 # - redis
177
178 volumes:
179 - confvolume:/etc/rhodecode/conf
180 - logvolume:/var/log/rhodecode
181 - rhodecode_repos:/var/opt/rhodecode_repo_store
182 - rc_datavolume:/var/opt/rhodecode_data
183
184 logging:
185 *custom-logging
186
187 labels:
188 - "traefik.enable=false"
189
190 celery-beat:
191 # This service is not scalable
192 networks:
193 - rhodecode_network
194 image: rhodecode/rhodecode-${RC_EDITION}:${RC_VERSION:?specify-RC_VERSION-env-var}
195 env_file:
196 - .custom/.runtime.env
197 stdin_open: true
198 tty: true
199 restart: always
200 command: [
201 "/usr/local/bin/rhodecode_bin/bin/celery",
202 "beat",
203 "--no-color",
204 "--app=rhodecode.lib.celerylib.loader",
205 "--scheduler=rhodecode.lib.celerylib.scheduler.RcScheduler",
206 "--loglevel=DEBUG",
207 "--ini=/etc/rhodecode/conf/rhodecode.optimized.ini"
208 ]
209
210 environment:
211 RC_APP_TYPE: rhodecode_beat
212 RC_APP_PROC: 1
213 MAIN_INI_PATH: /etc/rhodecode/conf/rhodecode.optimized.ini
214 SSL_CERT_FILE: "/etc/rhodecode/conf/ca-bundle.crt"
215 REQUESTS_CA_BUNDLE: "/etc/rhodecode/conf/ca-bundle.crt"
216 GIT_SSL_CAINFO: "/etc/rhodecode/conf/ca-bundle.crt"
217
218 # depends_on:
219 # - database
220 # - redis
221
222 volumes:
223 - confvolume:/etc/rhodecode/conf
224 - logvolume:/var/log/rhodecode
225 - rhodecode_repos:/var/opt/rhodecode_repo_store
226 - rc_datavolume:/var/opt/rhodecode_data
227
228 logging:
229 *custom-logging
230
231 labels:
232 - "traefik.enable=false"
233
234 svn:
235 networks:
236 - rhodecode_network
237 image: rhodecode/rhodecode-${RC_EDITION}:${RC_VERSION:?specify-RC_VERSION-env-var}
238 env_file:
239 - .custom/.runtime.env
240
241 # build:
242 # context: .
243 # dockerfile: service/svn/rhodecode_svn.dockerfile
244 # args:
245 # APACHE_VER: 1.3
246
247 stdin_open: true
248 tty: true
249 restart: always
250 command: ["apachectl", "-D", "FOREGROUND"]
251
252 environment:
253 RC_APP_TYPE: rhodecode_svn
254
255 # SVN Specific
256 MOD_DAV_SVN_PORT: 8090
257 APACHE_LOG_DIR: /var/log/rhodecode/svn
258 MOD_DAV_SVN_CONF_FILE: /etc/rhodecode/conf/svn/mod_dav_svn.conf
259
260 # ports:
261 # - "127.0.0.1::8090"
262
263 healthcheck:
264 test: [ "CMD", "curl", "-A", "RhodeCode-Healthcheck", "-s", "-o", "/dev/null", "-w", "'%{http_code}'", "http://127.0.0.1:8090/_server_status" ]
265 timeout: 30s
266 interval: 60s
267 retries: 10
268
269 volumes:
270 - confvolume:/etc/rhodecode/conf
271 - logvolume:/var/log/rhodecode
272 - rhodecode_repos:/var/opt/rhodecode_repo_store
273
274 logging:
275 *custom-logging
276
277 sshd:
278 networks:
279 - rhodecode_network
280 image: rhodecode/rhodecode-${RC_EDITION}:${RC_VERSION:?specify-RC_VERSION-env-var}
281 env_file:
282 - .custom/.runtime.env
283
284 stdin_open: true
285 tty: true
286 restart: always
287 command: ["/usr/sbin/sshd", "-f", "/etc/rhodecode/sshd_config", "-D", "-e"]
288
289 environment:
290 RC_APP_TYPE: rhodecode_sshd
291 SSH_BOOTSTRAP: 1
292
293 # ports:
294 # # set from .env file
295 # - "${RC_SSH_PORT:?must-specify-ssh-port}:22"
296
297 volumes:
298 - confvolume:/etc/rhodecode/conf
299 - logvolume:/var/log/rhodecode
300 - rhodecode_repos:/var/opt/rhodecode_repo_store
301 - rc_datavolume:/var/opt/rhodecode_data
302
303 logging:
304 *custom-logging
305
306 labels:
307 - "traefik.enable=true"
308 - "traefik.http.routers.sshd.entrypoints=ssh"
309 - "traefik.http.routers.sshd.rule=Host(`*`)"
310 - "traefik.http.services.sshd.loadbalancer.server.port=${RC_SSH_PORT:?must-specify-ssh-port}"
311
312
@@ -0,0 +1,46 b''
1 version: '3.9'
2
3 ## Shared base stuff for all compose files in stack
4
5 x-logging: &custom-logging
6 # docker plugin install grafana/loki-docker-driver:latest --alias loki --grant-all-permissions
7 # NOTE: loki logging driver ONLY works for host type networks...
8 driver: loki
9 options:
10 loki-url: "http://127.0.0.1:3100/loki/api/v1/push"
11 #loki-url: "http://loki:3100/loki/api/v1/push"
12 loki-retries: "5"
13 loki-timeout: "1s"
14 loki-max-backoff: "800ms"
15
16 volumes:
17 # shared logvolume
18 logvolume:
19 driver: local
20 driver_opts:
21 type: none
22 o: bind
23 device: $PWD/logs
24
25 # bind-mount with configs
26 confvolume:
27 driver: local
28 driver_opts:
29 type: none
30 o: bind
31 device: $PWD/config/_shared
32
33 # SHARED volume for rhodecode caches, archive caches, nginx static,
34 # must be run via: docker volume create --name=rc_datavolume
35 rc_datavolume:
36 external: true
37
38 networks:
39
40 # SHARED network for all containers
41 # must be run via: docker network create --name=rhodecode_network
42 rhodecode_network:
43 name: rhodecode_network
44 driver: bridge
45 external: true
46
@@ -0,0 +1,78 b''
1 version: '3.9'
2
3 x-logging: &custom-logging
4 # docker plugin install grafana/loki-docker-driver:latest --alias loki --grant-all-permissions
5 # NOTE: loki logging driver ONLY works for host type networks...
6 driver: loki
7 options:
8 loki-url: "http://127.0.0.1:3100/loki/api/v1/push"
9 #loki-url: "http://loki:3100/loki/api/v1/push"
10 loki-retries: "5"
11 loki-timeout: "1s"
12 loki-max-backoff: "800ms"
13
14 ## docker network create -d overlay lb-net
15 services:
16
17 traefik:
18
19 image: traefik:v2.9.5
20
21 ports:
22 # The HTTP port, exposed as http entrypoint
23 - "80:80"
24 # The HTTPS port, exposed as https entrypoint
25 - "443:443"
26 # The SSH port
27 - "${RC_SSH_PORT}:${RC_SSH_PORT}"
28 # The Web UI (enabled by --api.insecure=true)
29 # uncomment to expose dashboard at port :7000
30 #- "7000:7000"
31 volumes:
32 # So that Traefik can listen to the Docker events
33 - /var/run/docker.sock:/var/run/docker.sock
34 - ./config/traefik:/etc/traefik
35 deploy:
36 placement:
37 constraints:
38 # limit swarm deploy to MANAGER only
39 - node.role == manager
40 networks:
41 - rhodecode_network
42
43 labels:
44 - "traefik.enable=true"
45
46 logging:
47 *custom-logging
48
49 portainer:
50 # Run with COMPOSE_PROFILES=portainer
51 # to access portainer set HEADER `X-Docker-Host=portainer`
52 image: portainer/portainer-ce:latest
53 restart: always
54 volumes:
55 - portainer_data:/data
56 - /var/run/docker.sock:/var/run/docker.sock
57 deploy:
58 mode: replicated
59 replicas: 1
60 placement:
61 constraints:
62 # limit swarm deploy to MANAGER only
63 - node.role == manager
64
65 networks:
66 - rhodecode_network
67 labels:
68 - "traefik.enable=true"
69 - "traefik.http.services.portainer.loadbalancer.server.port=9000"
70 - "traefik.http.routers.portainer.entrypoints=https"
71 - "traefik.http.routers.portainer.rule=Headers(`X-Docker-Host`, `portainer`)"
72
73 profiles:
74 ["portainer"]
75
76 volumes:
77 portainer_data:
78 external: true No newline at end of file
@@ -0,0 +1,240 b''
1 version: '3.9'
2
3 x-logging: &custom-logging
4 # docker plugin install grafana/loki-docker-driver:latest --alias loki --grant-all-permissions
5 # NOTE: loki logging driver ONLY works for host type networks...
6 driver: loki
7 options:
8 loki-url: "http://127.0.0.1:3100/loki/api/v1/push"
9 #loki-url: "http://loki:3100/loki/api/v1/push"
10 loki-retries: "5"
11 loki-timeout: "1s"
12 loki-max-backoff: "800ms"
13
14 volumes:
15
16 # volume for redis data store
17 redis_data:
18 labels:
19 "keep": 1
20
21 # volume for Postgres db store
22
23 # volume for Postgres Data
24 pg_data:
25 labels:
26 "keep": 1
27
28 # volume for rhodecode elasticsearch
29 es_data:
30 labels:
31 "keep": 1
32
33
34 services:
35
36 channelstream:
37 networks:
38 - rhodecode_network
39 image: channelstream/channelstream:0.7.1
40
41 restart: always
42
43 # ports:
44 # - "127.0.0.1:9800:9800"
45
46 command: ["channelstream", "-i", "/etc/rhodecode/conf/channelstream.ini"]
47
48 environment:
49 CHANNELSTREAM_ALLOW_POSTING_FROM: 0.0.0.0
50
51 healthcheck:
52 test: [ "CMD", "curl", "-s", "-o", "/dev/null", "-w", "'%{http_code}'", "http://channelstream:8000/admin/sign_in" ]
53 timeout: 5s
54 interval: 60s
55 retries: 10
56
57 volumes:
58 - confvolume:/etc/rhodecode/conf
59 - logvolume:/var/log/rhodecode
60
61 logging:
62 *custom-logging
63
64 labels:
65 - "traefik.enable=false"
66 #- "traefik.http.routers.channelstream.entrypoints=http"
67 #- "traefik.http.services.channelstream.loadbalancer.server.port=9800"
68
69 profiles:
70 ["channelstream"]
71
72 nginx:
73 networks:
74 - rhodecode_network
75 image: library/nginx:1.23.2
76
77 restart: always
78
79 environment:
80 NGINX_ENTRYPOINT_QUIET_LOGS: 1
81
82 healthcheck:
83 # change port 80 to 443 when only using SSL
84 test: [ "CMD", "curl", "-A", "RhodeCode-Healthcheck", "-s", "-o", "/dev/null", "-w", "'%{http_code}'", "http://127.0.0.1:80/_admin/ops/ping" ]
85 timeout: 30s
86 interval: 60s
87 retries: 10
88
89 # depends_on:
90 # - channelstream
91
92 volumes:
93 - ./config/nginx/nginx.conf:/etc/nginx/nginx.conf:ro
94 - ./config/nginx/http.conf:/etc/nginx/http.conf:ro
95 - ./config/nginx/proxy.conf:/etc/nginx/proxy.conf:ro
96 - logvolume:/var/log/rhodecode
97 - rc_datavolume:/var/opt/rhodecode_data
98
99 logging:
100 *custom-logging
101
102 labels:
103 - "traefik.enable=true"
104 - "traefik.http.routers.nginx.entrypoints=http"
105 - "traefik.http.services.nginx.loadbalancer.server.port=80"
106 - "traefik.http.routers.nginx.rule=Host(`${RHODECODE_HOSTNAME:?must-specify-rhodecode-hostname}`)"
107
108 elasticsearch:
109 networks:
110 - rhodecode_network
111 image: elasticsearch:6.8.23
112
113 environment:
114 - cluster.name=elasticsearch-cluster
115 - network.host=0.0.0.0
116 - bootstrap.memory_lock=true
117 - discovery.type=single-node
118 - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
119 healthcheck:
120 # change port 80 to 443 when only using SSL
121 test: [ "CMD", "curl", "-A", "RhodeCode-Healthcheck", "-s", "-o", "/dev/null", "-w", "'%{http_code}'", "elasticsearch:9200/_cat/health" ]
122 timeout: 30s
123 interval: 60s
124 retries: 10
125
126 ulimits:
127 memlock:
128 soft: -1
129 hard: -1
130
131 volumes:
132 - es_data:/usr/share/elasticsearch/data
133
134 logging:
135 *custom-logging
136
137 profiles:
138 ["elasticsearch"]
139
140 redis:
141 networks:
142 - rhodecode_network
143 image: library/redis:7.0.5
144
145 restart: always
146 command:
147 - "redis-server"
148 - "/etc/redis/redis.conf"
149 - "--maxmemory-policy allkeys-lru"
150 - "--maxmemory ${RC_REDIS_MAXMEMORY}"
151
152 # ports:
153 # - "127.0.0.1::6379"
154
155 healthcheck:
156 test: [ "CMD", "redis-cli", "--raw", "incr", "ping" ]
157 interval: 60s
158
159 volumes:
160 - ./config/redis/redis.conf:/etc/redis/redis.conf:ro
161 - logvolume:/var/log/rhodecode
162 - redis_data:/data
163
164 profiles:
165 ["redis"]
166
167 logging:
168 *custom-logging
169
170 database:
171 networks:
172 - rhodecode_network
173 image: library/postgres:14.6
174
175 environment:
176 POSTGRES_DB: ${DB_NAME:?must-specify-db-name}
177 POSTGRES_USER: ${DB_USER:?must-specify-db-user}
178 PGUSER: ${DB_USER:?must-specify-db-user}
179 POSTGRES_PASSWORD: ${DB_PASSWORD:?must-specify-db-password}
180 POSTGRES_HOST_AUTH_METHOD: md5
181 POSTGRES_INITDB_ARGS: "--auth-host=md5 --auth-local=md5"
182
183 restart: unless-stopped
184 command:
185 - "postgres"
186 - "-c"
187 - "log_statement=ddl"
188 - "-c"
189 - "config_file=/etc/conf.d/pg_customized.conf"
190
191 # ports:
192 # - "127.0.0.1::5432"
193
194 healthcheck:
195 test: ["CMD-SHELL", "pg_isready", '-U', "$DB_USER"]
196 interval: 10s
197 timeout: 5s
198 retries: 5
199
200 volumes:
201 - $PWD/config/database/pg_customized.conf:/etc/conf.d/pg_customized.conf:ro
202 # db dumps reverse mount
203 - $PWD/.custom/db_dump:/var/rc-data-dump
204 # save the pg_data volume
205 - pg_data:/var/lib/postgresql/data
206 - logvolume:/var/log/rhodecode
207
208 profiles:
209 ["postgres"]
210
211 logging:
212 *custom-logging
213
214 database-mysql:
215 networks:
216 - rhodecode_network
217 image: library/mysql:8.0.31
218
219 environment:
220 MYSQL_DATABASE: ${DB_NAME:?must-specify-db-name}
221 MYSQL_USER: ${DB_USER:?must-specify-db-user}
222 MYSQL_PASSWORD: ${DB_PASSWORD:?must-specify-db-password}
223 MYSQL_ROOT_PASSWORD: ${DB_PASSWORD:?must-specify-db-password}
224
225 restart: unless-stopped
226
227 # ports:
228 # - "127.0.0.1::3306"
229
230 volumes:
231 - ./config/database/mysql_customized.conf:/etc/mysql/conf.d/config-file.cnf:ro
232 # save the mysql_data volume
233 - $PWD/mysql_dir:/var/lib/mysql
234 - logvolume:/var/log/rhodecode
235
236 profiles:
237 ["mysql"]
238
239 logging:
240 *custom-logging
@@ -0,0 +1,1 b''
1 scripts/rccontrol/rccontrol No newline at end of file
This diff has been collapsed as it changes many lines, (2889 lines changed) Show them Hide them
@@ -0,0 +1,2889 b''
1 #!/usr/bin/env bash
2 # This script was generated by bashly 0.8.10 (https://bashly.dannyb.co)
3 # Modifying it manually is not recommended
4
5 # :wrapper.bash3_bouncer
6 if [[ "${BASH_VERSINFO:-0}" -lt 4 ]]; then
7 printf "bash version 4 or higher is required\n" >&2
8 exit 1
9 fi
10
11 # :command.master_script
12
13 # :command.version_command
14 version_command() {
15 echo "$version"
16 }
17
18 # :command.usage
19 rccontrol_usage() {
20 if [[ -n $long_usage ]]; then
21 printf "rccontrol - RhodeCode Control - CLI for manaing RhodeCode Cluster Stack\n"
22 echo
23
24 else
25 printf "rccontrol - RhodeCode Control - CLI for manaing RhodeCode Cluster Stack\n"
26 echo
27
28 fi
29
30 printf "Usage:\n"
31 printf " rccontrol [OPTIONS] COMMAND\n"
32 printf " rccontrol [COMMAND] --help | -h\n"
33 printf " rccontrol --version | -v\n"
34 echo
35 # :command.usage_commands
36 printf "Commands:\n"
37 echo " self-update update rccontrol and it's docker definitions"
38 echo " bootstrap Bootstrap this machine, check docker version and install rhodecode-network"
39 echo " stack run one of available cluster stacks, use -h for more details"
40 echo " stack-status show stack status"
41 echo " stack-upgrade upgrade ALL stack status"
42 echo
43 printf "Build Commands:\n"
44 echo " get-build-artifacts Fetch Artifacts to run installer based build"
45 echo " build Build RhodeCode image from installer"
46 echo " get-build-source Fetch RhodeCode sources, store in .source dir to run a source-based builds"
47 echo " build-source Build RhodeCode image from source, requires upgrade-source initially"
48 echo
49 printf "CLI Commands:\n"
50 echo " cli-redis CLI"
51 echo " cli-db CLI"
52 echo " cli-db-upgrade CLI"
53 echo
54 printf "Backup Commands:\n"
55 echo " backup-db CLI"
56 echo " backup-data CLI"
57 echo
58
59 # :command.long_usage
60 if [[ -n $long_usage ]]; then
61 printf "Options:\n"
62
63 # :command.usage_fixed_flags
64 echo " --help, -h"
65 printf " Show this help\n"
66 echo
67 echo " --version, -v"
68 printf " Show version number\n"
69 echo
70
71 # :command.usage_flags
72 # :flag.usage
73 echo " --debug"
74 printf " Enable debug and detailed output\n"
75 echo
76
77 # :command.usage_environment_variables
78 printf "Environment Variables:\n"
79
80 # :environment_variable.usage
81 echo " AUTH_TOKEN"
82 printf " Set your RhodeCode AUTH Token\n"
83 echo
84
85 # :environment_variable.usage
86 echo " RC_CLI_VERSION_NAME"
87 printf " default version to build and install\n"
88 printf " Default: 4.27.0\n"
89 echo
90
91 # :environment_variable.usage
92 echo " RC_STACK_ROUTER_EXT"
93 printf "\n"
94 printf " Default: .custom/docker-compose-router.override.yaml\n"
95 echo
96
97 # :environment_variable.usage
98 echo " RC_STACK_METRICS_EXT"
99 printf "\n"
100 printf " Default: .custom/docker-compose-metrics.override.yaml\n"
101 echo
102
103 # :environment_variable.usage
104 echo " RC_STACK_SERVICES_EXT"
105 printf "\n"
106 printf " Default: .custom/docker-compose-services.override.yaml\n"
107 echo
108
109 # :environment_variable.usage
110 echo " RC_STACK_RHODECODE_EXT"
111 printf "\n"
112 printf " Default: .custom/docker-compose-apps.override.yaml\n"
113 echo
114
115 # :command.footer
116 printf "RhodeCode Inc 2022\n\n"
117 echo
118
119 fi
120 }
121
122 # :command.usage
123 rccontrol_self_update_usage() {
124 if [[ -n $long_usage ]]; then
125 printf "rccontrol self-update - update rccontrol and it's docker definitions\n"
126 echo
127
128 else
129 printf "rccontrol self-update - update rccontrol and it's docker definitions\n"
130 echo
131
132 fi
133
134 printf "Usage:\n"
135 printf " rccontrol self-update [OPTIONS]\n"
136 printf " rccontrol self-update --help | -h\n"
137 echo
138
139 # :command.long_usage
140 if [[ -n $long_usage ]]; then
141 printf "Options:\n"
142
143 # :command.usage_fixed_flags
144 echo " --help, -h"
145 printf " Show this help\n"
146 echo
147
148 # :command.usage_flags
149 # :flag.usage
150 echo " --auth-token AUTH_TOKEN"
151 printf " Optionally specify AUTH TOKEN to obtain sources\n"
152 echo
153
154 # :flag.usage
155 echo " --server-url SERVER_URL"
156 printf " Specify RhodeCode server location where projects should be downloaded\n"
157 printf " Default: https://code.rhodecode.com\n"
158 echo
159
160 fi
161 }
162
163 # :command.usage
164 rccontrol_bootstrap_usage() {
165 if [[ -n $long_usage ]]; then
166 printf "rccontrol bootstrap - Bootstrap this machine, check docker version and install rhodecode-network\n"
167 echo
168
169 else
170 printf "rccontrol bootstrap - Bootstrap this machine, check docker version and install rhodecode-network\n"
171 echo
172
173 fi
174
175 printf "Alias: install\n"
176 echo
177
178 printf "Usage:\n"
179 printf " rccontrol bootstrap [OPTIONS]\n"
180 printf " rccontrol bootstrap --help | -h\n"
181 echo
182
183 # :command.long_usage
184 if [[ -n $long_usage ]]; then
185 printf "Options:\n"
186
187 # :command.usage_fixed_flags
188 echo " --help, -h"
189 printf " Show this help\n"
190 echo
191
192 # :command.usage_flags
193 # :flag.usage
194 echo " --force, -f"
195 printf " Overwrite existing files\n"
196 echo
197
198 # :flag.usage
199 echo " --auth-token AUTH_TOKEN"
200 printf " Optionally specify AUTH TOKEN to obtain sources\n"
201 echo
202
203 # :flag.usage
204 echo " --server-url SERVER_URL"
205 printf " Specify RhodeCode server location where projects should be downloaded\n"
206 printf " Default: https://code.rhodecode.com\n"
207 echo
208
209 # :command.usage_examples
210 printf "Examples:\n"
211 printf " rccontrol3 bootstrap\n"
212 printf " rccontrol3 bootstrap --force\n"
213 echo
214
215 fi
216 }
217
218 # :command.usage
219 rccontrol_get_build_artifacts_usage() {
220 if [[ -n $long_usage ]]; then
221 printf "rccontrol get-build-artifacts - Fetch Artifacts to run installer based build\n"
222 echo
223
224 else
225 printf "rccontrol get-build-artifacts - Fetch Artifacts to run installer based build\n"
226 echo
227
228 fi
229
230 printf "Usage:\n"
231 printf " rccontrol get-build-artifacts [OPTIONS]\n"
232 printf " rccontrol get-build-artifacts --help | -h\n"
233 echo
234
235 # :command.long_usage
236 if [[ -n $long_usage ]]; then
237 printf "Options:\n"
238
239 # :command.usage_fixed_flags
240 echo " --help, -h"
241 printf " Show this help\n"
242 echo
243
244 # :command.usage_flags
245 # :flag.usage
246 echo " --auth AUTH"
247 printf " Specify custom auth for curl e.g -u admin:secret\n"
248 printf " Default: \n"
249 echo
250
251 # :flag.usage
252 echo " --installer-url INSTALLER_URL"
253 printf " Installer Download URL\n"
254 printf " Default: https://dls.rhodecode.com/dls/N2E2ZTY1NzA3NjYxNDA2NTc1NjI3MTcyNzA2MjcxNzIyZTcwNjI3YQ==/rhodecode-control/latest-linux-ee\n"
255 echo
256
257 # :flag.usage
258 echo " --manifest-url MANIFEST_URL"
259 printf " Manifest file url\n"
260 printf " Default: https://dls.rhodecode.com/linux/MANIFEST\n"
261 echo
262
263 # :flag.usage
264 echo " --version-name VERSION_NAME"
265 printf " Specify custom build ver e.g $RC_CLI_VERSION_NAME\n"
266 printf " Default: $RC_CLI_VERSION_NAME\n"
267 echo
268
269 fi
270 }
271
272 # :command.usage
273 rccontrol_build_usage() {
274 if [[ -n $long_usage ]]; then
275 printf "rccontrol build - Build RhodeCode image from installer\n"
276 echo
277
278 else
279 printf "rccontrol build - Build RhodeCode image from installer\n"
280 echo
281
282 fi
283
284 printf "Usage:\n"
285 printf " rccontrol build [OPTIONS]\n"
286 printf " rccontrol build --help | -h\n"
287 echo
288
289 # :command.long_usage
290 if [[ -n $long_usage ]]; then
291 printf "Options:\n"
292
293 # :command.usage_fixed_flags
294 echo " --help, -h"
295 printf " Show this help\n"
296 echo
297
298 # :command.usage_flags
299 # :flag.usage
300 echo " --version-name VERSION_NAME"
301 printf " Specify custom build ver e.g $RC_CLI_VERSION_NAME\n"
302 printf " Default: $RC_CLI_VERSION_NAME\n"
303 echo
304
305 fi
306 }
307
308 # :command.usage
309 rccontrol_get_build_source_usage() {
310 if [[ -n $long_usage ]]; then
311 printf "rccontrol get-build-source - Fetch RhodeCode sources, store in .source dir to run a source-based builds\n"
312 echo
313
314 else
315 printf "rccontrol get-build-source - Fetch RhodeCode sources, store in .source dir to run a source-based builds\n"
316 echo
317
318 fi
319
320 printf "Usage:\n"
321 printf " rccontrol get-build-source REVISION [OPTIONS]\n"
322 printf " rccontrol get-build-source --help | -h\n"
323 echo
324
325 # :command.long_usage
326 if [[ -n $long_usage ]]; then
327 printf "Options:\n"
328
329 # :command.usage_fixed_flags
330 echo " --help, -h"
331 printf " Show this help\n"
332 echo
333
334 # :command.usage_flags
335 # :flag.usage
336 echo " --auth-token AUTH_TOKEN"
337 printf " Specify AUTH TOKEN to obtain sources\n"
338 echo
339
340 # :flag.usage
341 echo " --server-url SERVER_URL"
342 printf " Specify RhodeCode server location where projects should be downloaded\n"
343 printf " Default: https://code.rhodecode.com\n"
344 echo
345
346 # :command.usage_args
347 printf "Arguments:\n"
348
349 # :argument.usage
350 echo " REVISION"
351 printf " revision to download\n"
352 echo
353
354 # :command.usage_examples
355 printf "Examples:\n"
356 printf " rccontrol3 get-sources $RC_CLI_VERSION_NAME\n"
357 printf " rccontrol3 get-sources default --auth-token xyxyxyx --server-url\n https://secret.repo/\n"
358 echo
359
360 fi
361 }
362
363 # :command.usage
364 rccontrol_build_source_usage() {
365 if [[ -n $long_usage ]]; then
366 printf "rccontrol build-source - Build RhodeCode image from source, requires upgrade-source initially\n"
367 echo
368
369 else
370 printf "rccontrol build-source - Build RhodeCode image from source, requires upgrade-source initially\n"
371 echo
372
373 fi
374
375 printf "Usage:\n"
376 printf " rccontrol build-source [OPTIONS]\n"
377 printf " rccontrol build-source --help | -h\n"
378 echo
379
380 # :command.long_usage
381 if [[ -n $long_usage ]]; then
382 printf "Options:\n"
383
384 # :command.usage_fixed_flags
385 echo " --help, -h"
386 printf " Show this help\n"
387 echo
388
389 # :command.usage_flags
390 # :flag.usage
391 echo " --version-name VERSION_NAME"
392 printf " Specify custom build ver e.g $RC_CLI_VERSION_NAME\n"
393 printf " Default: $RC_CLI_VERSION_NAME\n"
394 echo
395
396 # :command.usage_examples
397 printf "Examples:\n"
398 printf " build foo\n"
399 echo
400
401 fi
402 }
403
404 # :command.usage
405 rccontrol_stack_usage() {
406 if [[ -n $long_usage ]]; then
407 printf "rccontrol stack - run one of available cluster stacks, use -h for more details\n"
408 echo
409
410 else
411 printf "rccontrol stack - run one of available cluster stacks, use -h for more details\n"
412 echo
413
414 fi
415
416 printf "Usage:\n"
417 printf " rccontrol stack NAME [SERVICES PARAMS...]\n"
418 printf " rccontrol stack --help | -h\n"
419 echo
420
421 # :command.long_usage
422 if [[ -n $long_usage ]]; then
423 printf "Options:\n"
424
425 # :command.usage_fixed_flags
426 echo " --help, -h"
427 printf " Show this help\n"
428 echo
429
430 # :command.usage_args
431 printf "Arguments:\n"
432
433 # :argument.usage
434 echo " NAME"
435 printf " Stack name\n"
436 printf " Allowed: router, metrics, services, rhodecode\n"
437 echo
438
439 echo " SERVICES PARAMS..."
440 printf " Additional arguments or flags for services command\n"
441 echo
442
443 # :command.usage_examples
444 printf "Examples:\n"
445 printf " - ./rccontrol3 stack router up # run router stack with output to\n console\n - ./rccontrol3 stack router up --detach # run router stack detached\n - ./rccontrol3 stack router down # stop whole router stack\n - ./rccontrol3 stack router ps # check status of router stack\n - ./rccontrol3 stack router -f docker-overrides.yaml up -d # run router stack\n with your overrides\n"
446 echo
447
448 fi
449 }
450
451 # :command.usage
452 rccontrol_stack_status_usage() {
453 if [[ -n $long_usage ]]; then
454 printf "rccontrol stack-status - show stack status\n"
455 echo
456
457 else
458 printf "rccontrol stack-status - show stack status\n"
459 echo
460
461 fi
462
463 printf "Alias: status\n"
464 echo
465
466 printf "Usage:\n"
467 printf " rccontrol stack-status\n"
468 printf " rccontrol stack-status --help | -h\n"
469 echo
470
471 # :command.long_usage
472 if [[ -n $long_usage ]]; then
473 printf "Options:\n"
474
475 # :command.usage_fixed_flags
476 echo " --help, -h"
477 printf " Show this help\n"
478 echo
479
480 fi
481 }
482
483 # :command.usage
484 rccontrol_stack_upgrade_usage() {
485 if [[ -n $long_usage ]]; then
486 printf "rccontrol stack-upgrade - upgrade ALL stack status\n"
487 echo
488
489 else
490 printf "rccontrol stack-upgrade - upgrade ALL stack status\n"
491 echo
492
493 fi
494
495 printf "Usage:\n"
496 printf " rccontrol stack-upgrade\n"
497 printf " rccontrol stack-upgrade --help | -h\n"
498 echo
499
500 # :command.long_usage
501 if [[ -n $long_usage ]]; then
502 printf "Options:\n"
503
504 # :command.usage_fixed_flags
505 echo " --help, -h"
506 printf " Show this help\n"
507 echo
508
509 fi
510 }
511
512 # :command.usage
513 rccontrol_cli_redis_usage() {
514 if [[ -n $long_usage ]]; then
515 printf "rccontrol cli-redis - CLI\n"
516 echo
517
518 else
519 printf "rccontrol cli-redis - CLI\n"
520 echo
521
522 fi
523
524 printf "Usage:\n"
525 printf " rccontrol cli-redis\n"
526 printf " rccontrol cli-redis --help | -h\n"
527 echo
528
529 # :command.long_usage
530 if [[ -n $long_usage ]]; then
531 printf "Options:\n"
532
533 # :command.usage_fixed_flags
534 echo " --help, -h"
535 printf " Show this help\n"
536 echo
537
538 fi
539 }
540
541 # :command.usage
542 rccontrol_cli_db_usage() {
543 if [[ -n $long_usage ]]; then
544 printf "rccontrol cli-db - CLI\n"
545 echo
546
547 else
548 printf "rccontrol cli-db - CLI\n"
549 echo
550
551 fi
552
553 printf "Usage:\n"
554 printf " rccontrol cli-db\n"
555 printf " rccontrol cli-db --help | -h\n"
556 echo
557
558 # :command.long_usage
559 if [[ -n $long_usage ]]; then
560 printf "Options:\n"
561
562 # :command.usage_fixed_flags
563 echo " --help, -h"
564 printf " Show this help\n"
565 echo
566
567 fi
568 }
569
570 # :command.usage
571 rccontrol_cli_db_upgrade_usage() {
572 if [[ -n $long_usage ]]; then
573 printf "rccontrol cli-db-upgrade - CLI\n"
574 echo
575
576 else
577 printf "rccontrol cli-db-upgrade - CLI\n"
578 echo
579
580 fi
581
582 printf "Usage:\n"
583 printf " rccontrol cli-db-upgrade\n"
584 printf " rccontrol cli-db-upgrade --help | -h\n"
585 echo
586
587 # :command.long_usage
588 if [[ -n $long_usage ]]; then
589 printf "Options:\n"
590
591 # :command.usage_fixed_flags
592 echo " --help, -h"
593 printf " Show this help\n"
594 echo
595
596 fi
597 }
598
599 # :command.usage
600 rccontrol__completions_usage() {
601 if [[ -n $long_usage ]]; then
602 printf "rccontrol _completions - Generate completions, eval "$(./rccontrol _completions)"\n"
603 echo
604
605 else
606 printf "rccontrol _completions - Generate completions, eval "$(./rccontrol _completions)"\n"
607 echo
608
609 fi
610
611 printf "Usage:\n"
612 printf " rccontrol _completions\n"
613 printf " rccontrol _completions --help | -h\n"
614 echo
615
616 # :command.long_usage
617 if [[ -n $long_usage ]]; then
618 printf "Options:\n"
619
620 # :command.usage_fixed_flags
621 echo " --help, -h"
622 printf " Show this help\n"
623 echo
624
625 fi
626 }
627
628 # :command.usage
629 rccontrol_backup_db_usage() {
630 if [[ -n $long_usage ]]; then
631 printf "rccontrol backup-db - CLI\n"
632 echo
633
634 else
635 printf "rccontrol backup-db - CLI\n"
636 echo
637
638 fi
639
640 printf "Usage:\n"
641 printf " rccontrol backup-db\n"
642 printf " rccontrol backup-db --help | -h\n"
643 echo
644
645 # :command.long_usage
646 if [[ -n $long_usage ]]; then
647 printf "Options:\n"
648
649 # :command.usage_fixed_flags
650 echo " --help, -h"
651 printf " Show this help\n"
652 echo
653
654 # :command.usage_examples
655 printf "Examples:\n"
656 printf " ./rccontrol backup-db\n"
657 echo
658
659 fi
660 }
661
662 # :command.usage
663 rccontrol_backup_data_usage() {
664 if [[ -n $long_usage ]]; then
665 printf "rccontrol backup-data - CLI\n"
666 echo
667
668 else
669 printf "rccontrol backup-data - CLI\n"
670 echo
671
672 fi
673
674 printf "Usage:\n"
675 printf " rccontrol backup-data\n"
676 printf " rccontrol backup-data --help | -h\n"
677 echo
678
679 # :command.long_usage
680 if [[ -n $long_usage ]]; then
681 printf "Options:\n"
682
683 # :command.usage_fixed_flags
684 echo " --help, -h"
685 printf " Show this help\n"
686 echo
687
688 fi
689 }
690
691 # :command.normalize_input
692 normalize_input() {
693 local arg flags
694
695 while [[ $# -gt 0 ]]; do
696 arg="$1"
697 if [[ $arg =~ ^(--[a-zA-Z0-9_\-]+)=(.+)$ ]]; then
698 input+=("${BASH_REMATCH[1]}")
699 input+=("${BASH_REMATCH[2]}")
700 elif [[ $arg =~ ^(-[a-zA-Z0-9])=(.+)$ ]]; then
701 input+=("${BASH_REMATCH[1]}")
702 input+=("${BASH_REMATCH[2]}")
703 elif [[ $arg =~ ^-([a-zA-Z0-9][a-zA-Z0-9]+)$ ]]; then
704 flags="${BASH_REMATCH[1]}"
705 for (( i=0 ; i < ${#flags} ; i++ )); do
706 input+=("-${flags:i:1}")
707 done
708 else
709 input+=("$arg")
710 fi
711
712 shift
713 done
714 }
715 # :command.inspect_args
716 inspect_args() {
717 readarray -t sorted_keys < <(printf '%s\n' "${!args[@]}" | sort)
718 if (( ${#args[@]} )); then
719 echo args:
720 for k in "${sorted_keys[@]}"; do echo "- \${args[$k]} = ${args[$k]}"; done
721 else
722 echo args: none
723 fi
724
725 if (( ${#other_args[@]} )); then
726 echo
727 echo other_args:
728 echo "- \${other_args[*]} = ${other_args[*]}"
729 for i in "${!other_args[@]}"; do
730 echo "- \${other_args[$i]} = ${other_args[$i]}"
731 done
732 fi
733 }
734
735 # :command.user_lib
736 # src/lib/colors.sh
737 print_in_color() {
738 local color="$1"
739 shift
740 if [[ -z ${NO_COLOR+x} ]]; then
741 printf "$color%b\e[0m\n" "$*";
742 else
743 printf "%b\n" "$*";
744 fi
745 }
746
747 red() { print_in_color "\e[31m" "$*"; }
748 green() { print_in_color "\e[32m" "$*"; }
749 yellow() { print_in_color "\e[33m" "$*"; }
750 blue() { print_in_color "\e[34m" "$*"; }
751 magenta() { print_in_color "\e[35m" "$*"; }
752 cyan() { print_in_color "\e[36m" "$*"; }
753 bold() { print_in_color "\e[1m" "$*"; }
754 underlined() { print_in_color "\e[4m" "$*"; }
755 red_bold() { print_in_color "\e[1;31m" "$*"; }
756 green_bold() { print_in_color "\e[1;32m" "$*"; }
757 yellow_bold() { print_in_color "\e[1;33m" "$*"; }
758 blue_bold() { print_in_color "\e[1;34m" "$*"; }
759 magenta_bold() { print_in_color "\e[1;35m" "$*"; }
760 cyan_bold() { print_in_color "\e[1;36m" "$*"; }
761 red_underlined() { print_in_color "\e[4;31m" "$*"; }
762 green_underlined() { print_in_color "\e[4;32m" "$*"; }
763 yellow_underlined() { print_in_color "\e[4;33m" "$*"; }
764 blue_underlined() { print_in_color "\e[4;34m" "$*"; }
765 magenta_underlined() { print_in_color "\e[4;35m" "$*"; }
766 cyan_underlined() { print_in_color "\e[4;36m" "$*"; }
767
768 # src/lib/config.sh
769 config_init() {
770 CONFIG_FILE=${CONFIG_FILE:=config.ini}
771 [[ -f "$CONFIG_FILE" ]] || touch "$CONFIG_FILE"
772 }
773
774 config_get() {
775 local key=$1
776 local regex="^$key *= *(.+)$"
777 local value=""
778
779 config_init
780
781 while IFS= read -r line || [ -n "$line" ]; do
782 if [[ $line =~ $regex ]]; then
783 value="${BASH_REMATCH[1]}"
784 break
785 fi
786 done < "$CONFIG_FILE"
787
788 echo "$value"
789 }
790
791 config_set() {
792 local key=$1
793 shift
794 local value="$*"
795
796 config_init
797
798 local regex="^($key) *= *.+$"
799 local output=""
800 local found_key=""
801 local newline
802
803 while IFS= read -r line || [ -n "$line" ]; do
804 newline=$line
805 if [[ $line =~ $regex ]]; then
806 found_key="${BASH_REMATCH[1]}"
807 newline="$key = $value"
808 output="$output$newline\n"
809 elif [[ $line ]]; then
810 output="$output$line\n"
811 fi
812 done < "$CONFIG_FILE"
813
814 if [[ -z $found_key ]]; then
815 output="$output$key = $value\n"
816 fi
817
818 printf "%b\n" "$output" > "$CONFIG_FILE"
819 }
820
821 config_del() {
822 local key=$1
823
824 local regex="^($key) *="
825 local output=""
826
827 config_init
828
829 while IFS= read -r line || [ -n "$line" ]; do
830 if [[ $line ]] && [[ ! $line =~ $regex ]]; then
831 output="$output$line\n"
832 fi
833 done < "$CONFIG_FILE"
834
835 printf "%b\n" "$output" > "$CONFIG_FILE"
836 }
837
838 config_show() {
839 config_init
840 cat "$CONFIG_FILE"
841 }
842
843 config_keys() {
844 local regex="^([a-zA-Z0-9_\-\/\.]+) *="
845
846 config_init
847
848 local keys=()
849 local key
850
851 while IFS= read -r line || [ -n "$line" ]; do
852 if [[ $line =~ $regex ]]; then
853 key="${BASH_REMATCH[1]}"
854 keys+=("$key")
855 fi
856 done < "$CONFIG_FILE"
857 echo "${keys[@]}"
858 }
859
860 config_has_key() {
861 [[ $(config_get "$1") ]]
862 }
863
864 # src/lib/sample_function.sh
865
866 docker_ping_host() {
867 PING_HOST="$1"
868 docker run --network rhodecode_network --rm alpine ping "$PING_HOST"
869 }
870
871 check_bootstrap() {
872 # Avoid destroying bootstrapping by simple start/stop
873 if [[ ! -e $BOOTSTRAP_FILE ]]; then
874 echo "$(yellow WARNING:) initial bootstrap file $BOOTSTRAP_FILE not found !"
875 echo "$(yellow NOTICE:) Please run ./rccontrol bootstrap first"
876 exit
877 fi
878 }
879
880 # src/lib/send_completions.sh
881 send_completions() {
882 echo $'# rccontrol3 completion -*- shell-script -*-'
883 echo $''
884 echo $'# This bash completions script was generated by'
885 echo $'# completely (https://github.com/dannyben/completely)'
886 echo $'# Modifying it manually is not recommended'
887 echo $''
888 echo $'_rccontrol3_completions_filter() {'
889 echo $' local words="$1"'
890 echo $' local cur=${COMP_WORDS[COMP_CWORD]}'
891 echo $' local result=()'
892 echo $''
893 echo $' if [[ "${cur:0:1}" == "-" ]]; then'
894 echo $' echo "$words"'
895 echo $' '
896 echo $' else'
897 echo $' for word in $words; do'
898 echo $' [[ "${word:0:1}" != "-" ]] && result+=("$word")'
899 echo $' done'
900 echo $''
901 echo $' echo "${result[*]}"'
902 echo $''
903 echo $' fi'
904 echo $'}'
905 echo $''
906 echo $'_rccontrol3_completions() {'
907 echo $' local cur=${COMP_WORDS[COMP_CWORD]}'
908 echo $' local compwords=("${COMP_WORDS[@]:1:$COMP_CWORD-1}")'
909 echo $' local compline="${compwords[*]}"'
910 echo $''
911 echo $' case "$compline" in'
912 echo $' \'upgrade-source\'*)'
913 echo $' while read -r; do COMPREPLY+=( "$REPLY" ); done < <( compgen -W "$(_rccontrol3_completions_filter "--auth-token --help --server-url -h")" -- "$cur" )'
914 echo $' ;;'
915 echo $''
916 echo $' \'build-source\'*)'
917 echo $' while read -r; do COMPREPLY+=( "$REPLY" ); done < <( compgen -W "$(_rccontrol3_completions_filter "--help --version-name -h")" -- "$cur" )'
918 echo $' ;;'
919 echo $''
920 echo $' \'self-update\'*)'
921 echo $' while read -r; do COMPREPLY+=( "$REPLY" ); done < <( compgen -W "$(_rccontrol3_completions_filter "--auth-token --help --server-url -h")" -- "$cur" )'
922 echo $' ;;'
923 echo $''
924 echo $' \'bootstrap\'*)'
925 echo $' while read -r; do COMPREPLY+=( "$REPLY" ); done < <( compgen -W "$(_rccontrol3_completions_filter "--auth-token --force --help --server-url -f -h")" -- "$cur" )'
926 echo $' ;;'
927 echo $''
928 echo $' \'build\'*)'
929 echo $' while read -r; do COMPREPLY+=( "$REPLY" ); done < <( compgen -W "$(_rccontrol3_completions_filter "--help --version-name -h")" -- "$cur" )'
930 echo $' ;;'
931 echo $''
932 echo $' \'stack\'*)'
933 echo $' while read -r; do COMPREPLY+=( "$REPLY" ); done < <( compgen -W "$(_rccontrol3_completions_filter "--help -h metrics rhodecode router services")" -- "$cur" )'
934 echo $' ;;'
935 echo $''
936 echo $' *)'
937 echo $' while read -r; do COMPREPLY+=( "$REPLY" ); done < <( compgen -W "$(_rccontrol3_completions_filter "--help --version -h -v bootstrap build build-source self-update stack upgrade-source")" -- "$cur" )'
938 echo $' ;;'
939 echo $''
940 echo $' esac'
941 echo $'} &&'
942 echo $'complete -F _rccontrol3_completions rccontrol3'
943 echo $''
944 echo $'# ex: filetype=sh'
945 }
946
947 # src/lib/validate_stack_exists.sh
948
949 validate_stack_exists() {
950 err=""
951 invalid="1"
952
953 for item in $VALID_SERVICES
954 do
955 if [ "$1" == "$item" ]; then
956 invalid=""
957 break
958 fi
959 done
960
961 if [[ -n $invalid ]]; then
962 err="command '$1' not in list of $VALID_SERVICES"
963 fi
964
965 echo $err
966 }
967
968 # :command.command_functions
969 # :command.function
970 rccontrol_self_update_command() {
971 # src/self_update_command.sh
972 check_bootstrap
973
974 echo "# this file is located in 'src/self_update_command.sh'"
975 echo "# code for 'rccontrol3 self-update' goes here"
976 echo "# you can edit it freely and regenerate (it will not be overwritten)"
977 inspect_args
978
979 }
980
981 # :command.function
982 rccontrol_bootstrap_command() {
983 # src/bootstrap_command.sh
984 DEBUG=${args[--debug]}
985 force=${args[--force]}
986
987 check_bash_version() {
988
989 if [ ! "${BASH_VERSINFO:-0}" -ge 4 ]; then
990 echo "$(red Bash version 4 or greater is required, please update your bash version!)"
991 exit
992 fi
993 }
994
995 check_docker() {
996 (which docker || which docker.io) &>/dev/null
997 }
998
999 check_and_install_docker() {
1000 failMsg="Failed to find docker on your PATH"
1001
1002 if ! check_docker; then
1003 echo "$failMsg"
1004 read -p "Enter to install Docker directly from https://get.docker.com/ or Ctrl+C to exit"
1005 curl https://get.docker.com/ | sh
1006
1007 if ! check_docker; then
1008 echo "$failMsg"
1009 echo "Docker install failed. Quitting."
1010 exit
1011 fi
1012 fi
1013 }
1014
1015 docker_bootstrap() {
1016 check_and_install_docker
1017 echo 'Docker: Running bootstrap.'
1018
1019 echo "Docker: creating network 'rc_datavolume'"
1020 docker volume create --name=rc_datavolume
1021 echo "Docker: done"
1022 echo ""
1023
1024 echo "Docker: creating network 'rhodecode_network'"
1025 docker network inspect rhodecode_network >/dev/null 2>&1 || docker network create rhodecode_network
1026 echo "Docker: done"
1027 echo ""
1028
1029 echo "Docker: creating loki logging"
1030 loki_driver=$(docker plugin ls --format {{.Name}} --filter enabled=true | grep loki || echo "")
1031 if [[ $loki_driver == "" ]]; then
1032 docker plugin install grafana/loki-docker-driver:latest --alias loki --grant-all-permissions
1033 else
1034 echo "Docker: loki driver already exists"
1035 fi
1036 }
1037
1038 definitions_bootstrap() {
1039
1040 SOURCE_DIR=$PWD
1041 RHODECODE_DOCKER_HASH='master'
1042
1043 AUTH_TOKEN=${args[--auth-token]}
1044 SERVER_URL=${args[--server-url]}
1045
1046 DEFINITIONS_EXIST=""
1047 CHECK_FILES="rccontrol .env docker-compose-services.yaml docker-compose-apps.yaml"
1048 for check_file in $CHECK_FILES; do
1049 if [[ -f "$check_file" ]]; then
1050 DEFINITIONS_EXIST="1"
1051 fi
1052 done
1053
1054 if [[ -n $DEFINITIONS_EXIST && ! $force ]]; then
1055 echo "$(yellow skipping docker defs creation, existing files found. Use --force to create them anyway)"
1056 return
1057 fi
1058
1059 if [[ -n $DEFINITIONS_EXIST ]]; then
1060 echo "$(yellow docker definitions exists, are you sure to force re-create them?)"
1061 while true; do
1062 read -p "Would you like to continue with overriding file? [yn] " yn
1063 case $yn in
1064 [Yy]*) return 2 ;;
1065 [Nn]*) exit ;;
1066 *) echo "Please answer y or n." ;;
1067 esac
1068 done
1069 fi
1070
1071 exit
1072
1073 # download sources
1074 echo "Files: download rhodecode docker definitions from $SERVER_URL"
1075 echo ""
1076
1077 if [ $DEBUG ]; then
1078 echo "downloading: $SERVER_URL/rhodecode-enterprise-docker/archive/$RHODECODE_DOCKER_HASH.tgz?with_hash=0"
1079 curl --header "X-Rc-Auth-Token: $AUTH_TOKEN" -L $SERVER_URL/rhodecode-enterprise-docker/archive/$RHODECODE_DOCKER_HASH.tgz?with_hash=0 | tar -xz -C $SOURCE_DIR
1080
1081 echo "running CP $SOURCE_DIR/*rhodecode-enterprise-docker-plain/* $SOURCE_DIR"
1082 cp -v -r -f --update --backup=numbered $SOURCE_DIR/*rhodecode-enterprise-docker-plain/* $SOURCE_DIR
1083
1084 echo "removing $SOURCE_DIR/*rhodecode-enterprise-docker-plain"
1085 rm -r $SOURCE_DIR/*rhodecode-enterprise-docker-plain
1086 else
1087 curl -s --header "X-Rc-Auth-Token: $AUTH_TOKEN" -L $SERVER_URL/rhodecode-enterprise-docker/archive/$RHODECODE_DOCKER_HASH.tgz?with_hash=0 | tar -xz -C $SOURCE_DIR
1088
1089 cp -r -f --update --backup=numbered $SOURCE_DIR/*rhodecode-enterprise-docker-plain/* $SOURCE_DIR
1090
1091 rm -r $SOURCE_DIR/*rhodecode-enterprise-docker-plain
1092 fi
1093
1094 echo "$(green_bold DONE: docker definitions extracted to $SOURCE_DIR)"
1095 }
1096
1097 config_bootstrap() {
1098
1099 if [[ ! -f "$CONFIG_FILE" ]]; then
1100 echo "init config at: $CONFIG_FILE"
1101 config_init
1102 else
1103 echo "re-using config at: $CONFIG_FILE"
1104 fi
1105
1106 if ! config_has_key "rc_encrypted_secret" ; then
1107 key=$(echo $RANDOM | md5sum | head -c 32)
1108 config_set "rc_encrypted_secret" $key
1109 fi
1110
1111 if ! config_has_key "rc_db_url" ; then
1112 key=$(echo $RANDOM | md5sum | head -c 32)
1113 config_set "rc_db_url" "postgresql://$DB_USER:$key@database/$DB_NAME"
1114 fi
1115
1116 if ! config_has_key "rc_license_token" ; then
1117 config_set "rc_license_token" abra-cada-bra1-rce4
1118 fi
1119
1120 if ! config_has_key "rc_base_url" ; then
1121 config_set "rc_base_url" http://docker-dev
1122 fi
1123
1124 if ! config_has_key "rc_log_formatter" ; then
1125 # json is another option
1126 config_set "rc_log_formatter" generic
1127 fi
1128
1129 if ! config_has_key "rc_use_celery" ; then
1130 config_set "rc_use_celery" true
1131 fi
1132
1133 BOOTSTRAP_RUNTIME_ENV=$PWD/.custom/.runtime.env
1134 if [[ ! -f "$BOOTSTRAP_RUNTIME_ENV" ]]; then
1135 echo "init runtime env config at: $BOOTSTRAP_RUNTIME_ENV"
1136 touch BOOTSTRAP_RUNTIME_ENV
1137
1138 #ENV_EXPAND=""
1139 for k in $(config_keys); do
1140 k_upper=${k^^}
1141 echo "$k_upper='$(config_get "$k")'" >> $BOOTSTRAP_RUNTIME_ENV
1142 done
1143
1144 fi
1145 }
1146
1147 cur_date=$(date '+%Y-%m-%d %H:%M:%S')
1148
1149 check_bash_version
1150
1151 if [[ ! -e $BOOTSTRAP_FILE ]]; then
1152 echo "initial bootstrap file $BOOTSTRAP_FILE not found !"
1153
1154 docker_bootstrap
1155 definitions_bootstrap
1156 config_bootstrap
1157
1158 echo "$cur_date" > "$BOOTSTRAP_FILE"
1159
1160 else
1161 if [ $force ]; then
1162
1163 docker_bootstrap $force
1164 definitions_bootstrap $force ## TODO: remove old
1165 config_bootstrap $force
1166
1167 echo "$cur_date" > "$BOOTSTRAP_FILE"
1168
1169 exit
1170 fi
1171
1172 echo "bootstrap file $BOOTSTRAP_FILE was found add --force to force bootstrap"
1173 fi
1174
1175 get_started
1176 }
1177
1178 # :command.function
1179 rccontrol_get_build_artifacts_command() {
1180 # src/get_build_artifacts_command.sh
1181 check_bootstrap
1182
1183 DEBUG=${args[--debug]}
1184 AUTH=${args[--auth]}
1185 INSTALLER_URL=${args[--installer-url]}
1186 MANIFEST_URL=${args[--manifest-url]}
1187 RC_VERSION=${args[--version-name]}
1188 VER=$RC_VERSION
1189
1190 CACHE_DIR=$PWD/.cache
1191 VER_REGEX="$VER+x86_64"
1192
1193 echo "Downloading Artifacts for version: $VER"
1194
1195 echo "1/4 Checking available downloads from MANIFEST file"
1196
1197 ARTS=$(curl -s $AUTH $MANIFEST_URL | grep --ignore-case "$VER_REGEX" | cut -d ' ' -f 2)
1198
1199 if [[ $DEBUG ]]; then
1200 echo "DEBUG START"
1201 curl -s $AUTH $MANIFEST_URL | grep --ignore-case "$VER_REGEX" || echo "no regex match"
1202 curl -s $AUTH $MANIFEST_URL | grep --ignore-case "$VER_REGEX" | cut -d ' ' -f 2
1203 echo "Found following artifacts:"
1204 echo $ARTS
1205 echo "DEBUG END"
1206 fi
1207
1208 if [[ $ARTS == "" ]]; then
1209 MSG="Failed to found any MANIFEST entries for version $VER make sure there exists release with that version or use --version to specify different version"
1210 echo "$(red $MSG)"
1211 exit
1212 fi
1213
1214 echo "2/4 Downloading locale-archive"
1215 curl -L https://dls.rhodecode.com/assets/locale-archive -J -O
1216 mv -v locale-archive "$CACHE_DIR"
1217
1218 # vcsserver/ce/ee
1219 echo "3/4 Downloading installer artifacts"
1220 for url in $ARTS; do
1221 echo "Downloading $url with $AUTH"
1222 curl $AUTH --fail-early -L ${url} -J -O
1223 done
1224
1225 #for url in $(curl -s $MANIFEST_URL | grep --ignore-case -E 'control.+\+x86_64' | cut -d ' ' -f 2); do
1226 # echo "Downloading $url"
1227 # curl -L ${url} -J -O
1228 #done
1229
1230 echo "4/4 Downloading installer from $INSTALLER_URL"
1231 curl $AUTH -L $INSTALLER_URL -J -O
1232
1233 INSTALLER=$(ls -Art RhodeCode-installer-* | tail -n 1)
1234 if [[ -n $INSTALLER ]]; then
1235 chmod +x "${INSTALLER}"
1236 fi
1237
1238 echo "Copying artifacts into $CACHE_DIR"
1239
1240 mv -v "${INSTALLER}" $CACHE_DIR
1241 mv -v *.bz2 $CACHE_DIR
1242 ls -lh $CACHE_DIR
1243
1244 }
1245
1246 # :command.function
1247 rccontrol_build_command() {
1248 # src/build_command.sh
1249 check_bootstrap
1250
1251 rc_version=${args[--version-name]}
1252 export RC_VERSION=$rc_version
1253 eval "echo INSTALLER BASED BUILDING${RC_VERSION}"
1254
1255 RC_VERSION=$rc_version ./rccontrol stack rhodecode build --progress plain rhodecode
1256
1257 }
1258
1259 # :command.function
1260 rccontrol_get_build_source_command() {
1261 # src/get_build_source_command.sh
1262 check_bootstrap
1263
1264 AUTH_TOKEN=${args[--auth-token]}
1265 SERVER_URL=${args[--server-url]}
1266 revision=${args[revision]}
1267
1268 SOURCE_DIR=$PWD/.source
1269
1270 RHODECODE_VCS_HASH=$revision
1271 RHODECODE_CE_HASH=$revision
1272 RHODECODE_EE_HASH=$revision
1273
1274 # download sources
1275 echo "** download rhodecode source for build from $SERVER_URL **"
1276
1277 curl --header "X-Rc-Auth-Token: $AUTH_TOKEN" -L $SERVER_URL/rhodecode-vcsserver/archive/$RHODECODE_VCS_HASH.tgz?with_hash=0 | tar -xz -C $SOURCE_DIR
1278 curl --header "X-Rc-Auth-Token: $AUTH_TOKEN" -L $SERVER_URL/rhodecode-enterprise-ce/archive/$RHODECODE_CE_HASH.tgz?with_hash=0 | tar -xz -C $SOURCE_DIR
1279 #TODO: fix just CE build...
1280 curl --header "X-Rc-Auth-Token: $AUTH_TOKEN" -L $SERVER_URL/rhodecode-enterprise-ee/archive/$RHODECODE_EE_HASH.tgz?with_hash=0 | tar -xz -C $SOURCE_DIR
1281
1282 rm -rf $SOURCE_DIR/rhodecode-vcsserver && mv $SOURCE_DIR/*rhodecode-vcsserver-plain $SOURCE_DIR/rhodecode-vcsserver
1283 rm -rf $SOURCE_DIR/rhodecode-enterprise-ce && mv $SOURCE_DIR/*rhodecode-enterprise-ce-plain $SOURCE_DIR/rhodecode-enterprise-ce
1284 rm -rf $SOURCE_DIR/rhodecode-enterprise-ee && cp -r $SOURCE_DIR/*rhodecode-enterprise-ee-plain $SOURCE_DIR/rhodecode-enterprise-ee
1285
1286 echo "downloading sources done to $SOURCE_DIR"
1287
1288 }
1289
1290 # :command.function
1291 rccontrol_build_source_command() {
1292 # src/build_source_command.sh
1293 check_bootstrap
1294
1295 rc_version=${args[--version-name]}
1296 export RC_VERSION=$rc_version
1297 eval "echo SOURCE BASED BUILDING${RC_VERSION}"
1298
1299 RC_VERSION=$rc_version ./rccontrol stack rhodecode -f docker-compose-apps.source.yaml build --progress plain rhodecode
1300
1301 }
1302
1303 # :command.function
1304 rccontrol_stack_command() {
1305 # src/stack_command.sh
1306 check_bootstrap
1307
1308 DEBUG=${args[--debug]}
1309 service_name=${args[name]}
1310
1311 if [[ ! -f $RC_STACK_ROUTER_EXT ]]; then
1312 RC_STACK_ROUTER_EXT=""
1313 else
1314 RC_STACK_ROUTER_EXT="-f $RC_STACK_ROUTER_EXT"
1315 fi
1316
1317 CMD_ROUTER="\
1318 docker compose \
1319 --env-file $ENV_FILE \
1320 $ENV_EXPAND \
1321 -p rc_cluster_router \
1322 -f docker-compose-base.yaml \
1323 -f docker-compose-router.yaml $RC_STACK_ROUTER_EXT"
1324
1325 if [[ ! -f $RC_STACK_SERVICES_EXT ]]; then
1326 RC_STACK_SERVICES_EXT_LCL=""
1327 else
1328 RC_STACK_SERVICES_EXT_LCL="-f $RC_STACK_SERVICES_EXT"
1329 fi
1330
1331 RC_STACK_PROFILES="--profile postgres --profile redis --profile elasticsearch --profile channelstream"
1332
1333 CMD_SERVICES="\
1334 docker compose \
1335 --env-file $ENV_FILE \
1336 $ENV_EXPAND \
1337 $RC_STACK_PROFILES \
1338 -p rc_cluster_services \
1339 -f docker-compose-base.yaml \
1340 -f docker-compose-services.yaml $RC_STACK_SERVICES_EXT_LCL"
1341
1342 if [[ ! -f $RC_STACK_METRICS_EXT ]]; then
1343 RC_STACK_METRICS_EXT_LCL=""
1344 else
1345 RC_STACK_METRICS_EXT_LCL="-f $RC_STACK_METRICS_EXT"
1346 fi
1347
1348 CMD_METRICS="\
1349 docker compose \
1350 --env-file $ENV_FILE \
1351 $ENV_EXPAND \
1352 -p rc_cluster_metrics \
1353 -f docker-compose-base.yaml \
1354 -f docker-compose-metrics.yaml $RC_STACK_METRICS_EXT_LCL"
1355
1356 if [[ ! -f $RC_STACK_RHODECODE_EXT ]]; then
1357 RC_STACK_RHODECODE_EXT_LCL=""
1358 else
1359 RC_STACK_RHODECODE_EXT_LCL="-f $RC_STACK_RHODECODE_EXT"
1360 fi
1361
1362 CMD_RHODECODE="\
1363 docker compose \
1364 --env-file $ENV_FILE \
1365 $ENV_EXPAND \
1366 -p rc_cluster_apps \
1367 -f docker-compose-base.yaml \
1368 -f docker-compose-apps.yaml $RC_STACK_RHODECODE_EXT_LCL"
1369
1370 CMD_RHODECODE_SOURCE="\
1371 docker compose \
1372 --env-file $ENV_FILE \
1373 $ENV_EXPAND \
1374 -p rc_cluster_apps \
1375 -f docker-compose-base.yaml \
1376 -f docker-compose-apps.yaml \
1377 -f docker-compose-apps.source.yaml $RC_STACK_RHODECODE_EXT_LCL"
1378
1379 case $service_name in
1380
1381 services )
1382 if [[ $DEBUG ]]; then
1383 echo "---"
1384 echo "stacks docker: $RC_STACK_SERVICES_EXT_LCL"
1385 echo "running command: ${CMD_SERVICES}"
1386 echo "ARGS: ${other_args[*]}"
1387 echo "---"
1388 fi
1389
1390 eval "${CMD_SERVICES} ${other_args[*]}"
1391 exit
1392 ;;
1393 router )
1394 if [[ $DEBUG ]]; then
1395 echo "---"
1396 echo "stacks docker: $RC_STACK_ROUTER_EXT_LCL"
1397 echo "running command: ${CMD_ROUTER}"
1398 echo "ARGS: ${other_args[*]}"
1399 echo "---"
1400 fi
1401 eval "${CMD_ROUTER} ${other_args[*]}"
1402 exit
1403 ;;
1404 metrics )
1405 if [[ $DEBUG ]]; then
1406 echo "---"
1407 echo "stacks docker: $RC_STACK_METRICS_EXT_LCL"
1408 echo "running command: ${CMD_METRICS}"
1409 echo "ARGS: ${other_args[*]}"
1410 echo "---"
1411 fi
1412 eval "${CMD_METRICS} ${other_args[*]}"
1413 exit
1414 ;;
1415 rhodecode )
1416 if [[ $DEBUG ]]; then
1417 echo "---"
1418 echo "stacks docker: $RC_STACK_RHODECODE_EXT_LCL"
1419 echo "running command: ${CMD_RHODECODE}"
1420 echo "ARGS: ${other_args[*]}"
1421 echo "---"
1422 fi
1423 eval "${CMD_RHODECODE} ${other_args[*]}"
1424 exit
1425 ;;
1426 esac
1427
1428 }
1429
1430 # :command.function
1431 rccontrol_stack_status_command() {
1432 # src/stack_status_command.sh
1433 check_bootstrap
1434 DEBUG=${args[--debug]}
1435 if [[ $DEBUG ]]; then
1436 echo "---"
1437
1438 ps_cmd=$(docker ps --filter=name=rc_cluster --format="{{.ID}}")
1439
1440 for service in $ps_cmd; do
1441
1442 servicename=`docker inspect --format '{{ .Name }}' $service`
1443 servicename=${servicename:1}
1444 echo $servicename
1445 docker inspect $service --format='{{.State.Status}}: {{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}'
1446 echo ""
1447
1448 done
1449 fi
1450 echo "---"
1451 docker ps --filter=name=rc_cluster --format="table {{.ID}}\\t{{.Image}}\\t{{.Status}}\\t{{.Names}}\\t{{.Ports}}"
1452 }
1453
1454 # :command.function
1455 rccontrol_stack_upgrade_command() {
1456 # src/stack_upgrade_command.sh
1457 check_bootstrap
1458
1459 echo "# this file is located in 'src/stack_upgrade_command.sh'"
1460 echo "# code for 'rccontrol stack-upgrade' goes here"
1461 echo "# you can edit it freely and regenerate (it will not be overwritten)"
1462 inspect_args
1463
1464 }
1465
1466 # :command.function
1467 rccontrol_cli_redis_command() {
1468 # src/cli_redis_command.sh
1469 check_bootstrap
1470
1471 DEBUG=${args[--debug]}
1472
1473 target_container=rc_cluster_services-redis-1
1474 docker_id=$(docker ps --filter name=$target_container -q)
1475
1476 if [[ $DEBUG ]]; then
1477 echo "container id: $docker_id, based on $target_container filter"
1478 docker ps
1479 fi
1480
1481 if [ "$docker_id" == "" ]; then
1482 MSG="Cannot find container ID with name $target_container"
1483 echo "$(red $MSG)"
1484 exit
1485 fi
1486
1487 if [[ $DEBUG ]]; then
1488 echo "Running |docker exec --interactive --tty $docker_id $CMD|"
1489 echo "docker exec --interactive --tty $docker_id /bin/bash -c 'redis-cli'"
1490 fi
1491
1492 eval "docker exec --interactive --tty $docker_id /bin/bash -c 'redis-cli'"
1493
1494 }
1495
1496 # :command.function
1497 rccontrol_cli_db_command() {
1498 # src/cli_db_command.sh
1499 check_bootstrap
1500
1501 DEBUG=${args[--debug]}
1502
1503 target_container=rc_cluster_services-database-1
1504 docker_id=$(docker ps --filter name=$target_container -q)
1505
1506 if [[ $DEBUG ]]; then
1507 echo "container id: $docker_id, based on $target_container filter"
1508 docker ps
1509 fi
1510
1511 if [ "$docker_id" == "" ]; then
1512 MSG="Cannot find container ID with name $target_container"
1513 echo "$(red $MSG)"
1514 exit
1515 fi
1516
1517 if [[ $DEBUG ]]; then
1518 echo "Running |docker exec --interactive --tty $docker_id $CMD|"
1519 echo "docker exec --interactive --tty $docker_id /bin/bash -c 'PGPASSWORD=$DB_PASSWORD psql --username=$DB_USER --dbname=$DB_NAME'"
1520 fi
1521
1522 eval "docker exec --interactive --tty $docker_id /bin/bash -c 'PGPASSWORD=$DB_PASSWORD psql --username=$DB_USER --dbname=$DB_NAME'"
1523
1524 }
1525
1526 # :command.function
1527 rccontrol_cli_db_upgrade_command() {
1528 # src/cli_db_upgrade_command.sh
1529 check_bootstrap
1530
1531 DEBUG=${args[--debug]}
1532
1533 target_container=rc_cluster_apps-rhodecode-1
1534 docker_id=$(docker ps --filter name=$target_container -q)
1535
1536 if [[ $DEBUG ]]; then
1537 echo "container id: $docker_id, based on $target_container filter"
1538 docker ps
1539 fi
1540
1541 if [ "$docker_id" == "" ]; then
1542 MSG="Cannot find container ID with name $target_container"
1543 echo "$(red $MSG)"
1544 exit
1545 fi
1546
1547 if [[ $DEBUG ]]; then
1548 echo "./rccontrol stack rhodecode exec rhodecode /usr/local/bin/rhodecode_bin/bin/rc-upgrade-db /etc/rhodecode/conf/rhodecode.optimized.ini --force-yes"
1549 fi
1550
1551 ./rccontrol stack rhodecode exec rhodecode /usr/local/bin/rhodecode_bin/bin/rc-upgrade-db /etc/rhodecode/conf/rhodecode.optimized.ini --force-yes
1552 }
1553
1554 # :command.function
1555 rccontrol__completions_command() {
1556 # src/_completions_command.sh
1557 send_completions
1558 }
1559
1560 # :command.function
1561 rccontrol_backup_db_command() {
1562 # src/backup_db_command.sh
1563 check_bootstrap
1564
1565 DEBUG=${args[--debug]}
1566 DESTINATION=${args[destination]}
1567
1568 target_container=rc_cluster_services-database-1
1569
1570 docker_id=$(docker ps --filter name=$target_container -q)
1571 backup_name=rc_db_dump-$(date +%Y-%m-%d).tar.gz
1572
1573 echo "creating backup $backup_name"
1574
1575 if [[ $DEBUG ]]; then
1576 echo "container id: $docker_id, based on $target_container filter"
1577 docker ps
1578 echo "---"
1579 fi
1580
1581 #image_id=$(docker inspect "$docker_id" --format {{.Image}} | cut -c 8-)
1582 #
1583 #if [[ $DEBUG ]]; then
1584 # echo "image id: $image_id, based on $target_container filter"
1585 # docker image ls
1586 # echo "---"
1587 #fi
1588
1589 if [ "$docker_id" == "" ]; then
1590 MSG="Cannot find container ID with name $target_container"
1591 echo "$(red $MSG)"
1592 exit
1593 fi
1594
1595 #if [ "image_id" == "" ]; then
1596 # MSG="Cannot find image ID with name $target_container"
1597 # echo "$(red $MSG)"
1598 # exit
1599 #fi
1600
1601 if [[ $DEBUG ]]; then
1602 echo "docker exec -e PGPASSWORD=$DB_PASSWORD $docker_id /bin/bash -c 'pg_dump --inserts -U $DB_USER -h 127.0.0.1 --dbname=$DB_NAME | gzip > /var/rc-data-dump/$backup_name'"
1603 fi
1604
1605 eval "docker exec -e PGPASSWORD=$DB_PASSWORD $docker_id /bin/bash -c 'pg_dump --inserts -U $DB_USER -h 127.0.0.1 --dbname=$DB_NAME | gzip > /var/rc-data-dump/$backup_name'"
1606 echo "$(green Backup created in /var/rc-data-dump/ mount !)"
1607
1608 }
1609
1610 # :command.function
1611 rccontrol_backup_data_command() {
1612 # src/backup_data_command.sh
1613 echo "# this file is located in 'src/backup_data_command.sh'"
1614 echo "# code for 'rccontrol backup-data' goes here"
1615 echo "# you can edit it freely and regenerate (it will not be overwritten)"
1616 inspect_args
1617
1618 }
1619
1620 # :command.parse_requirements
1621 parse_requirements() {
1622 # :command.fixed_flags_filter
1623 case "${1:-}" in
1624 --version | -v )
1625 version_command
1626 exit
1627 ;;
1628
1629 --help | -h )
1630 long_usage=yes
1631 rccontrol_usage
1632 exit
1633 ;;
1634
1635 # :flag.case
1636 --debug )
1637
1638 # :flag.case_no_arg
1639 args[--debug]=1
1640 shift
1641 ;;
1642
1643 esac
1644
1645 # :command.environment_variables_filter
1646 # :command.environment_variables_default
1647 export RC_CLI_VERSION_NAME="${RC_CLI_VERSION_NAME:-4.27.0}"
1648 export RC_STACK_ROUTER_EXT="${RC_STACK_ROUTER_EXT:-.custom/docker-compose-router.override.yaml}"
1649 export RC_STACK_METRICS_EXT="${RC_STACK_METRICS_EXT:-.custom/docker-compose-metrics.override.yaml}"
1650 export RC_STACK_SERVICES_EXT="${RC_STACK_SERVICES_EXT:-.custom/docker-compose-services.override.yaml}"
1651 export RC_STACK_RHODECODE_EXT="${RC_STACK_RHODECODE_EXT:-.custom/docker-compose-apps.override.yaml}"
1652
1653 # :command.command_filter
1654 action=${1:-}
1655
1656 case $action in
1657 -* )
1658 ;;
1659
1660 self-update )
1661 action="self-update"
1662 shift
1663 rccontrol_self_update_parse_requirements "$@"
1664 shift $#
1665 ;;
1666
1667 bootstrap | install )
1668 action="bootstrap"
1669 shift
1670 rccontrol_bootstrap_parse_requirements "$@"
1671 shift $#
1672 ;;
1673
1674 get-build-artifacts )
1675 action="get-build-artifacts"
1676 shift
1677 rccontrol_get_build_artifacts_parse_requirements "$@"
1678 shift $#
1679 ;;
1680
1681 build )
1682 action="build"
1683 shift
1684 rccontrol_build_parse_requirements "$@"
1685 shift $#
1686 ;;
1687
1688 get-build-source )
1689 action="get-build-source"
1690 shift
1691 rccontrol_get_build_source_parse_requirements "$@"
1692 shift $#
1693 ;;
1694
1695 build-source )
1696 action="build-source"
1697 shift
1698 rccontrol_build_source_parse_requirements "$@"
1699 shift $#
1700 ;;
1701
1702 stack )
1703 action="stack"
1704 shift
1705 rccontrol_stack_parse_requirements "$@"
1706 shift $#
1707 ;;
1708
1709 stack-status | status )
1710 action="stack-status"
1711 shift
1712 rccontrol_stack_status_parse_requirements "$@"
1713 shift $#
1714 ;;
1715
1716 stack-upgrade )
1717 action="stack-upgrade"
1718 shift
1719 rccontrol_stack_upgrade_parse_requirements "$@"
1720 shift $#
1721 ;;
1722
1723 cli-redis )
1724 action="cli-redis"
1725 shift
1726 rccontrol_cli_redis_parse_requirements "$@"
1727 shift $#
1728 ;;
1729
1730 cli-db )
1731 action="cli-db"
1732 shift
1733 rccontrol_cli_db_parse_requirements "$@"
1734 shift $#
1735 ;;
1736
1737 cli-db-upgrade )
1738 action="cli-db-upgrade"
1739 shift
1740 rccontrol_cli_db_upgrade_parse_requirements "$@"
1741 shift $#
1742 ;;
1743
1744 _completions )
1745 action="_completions"
1746 shift
1747 rccontrol__completions_parse_requirements "$@"
1748 shift $#
1749 ;;
1750
1751 backup-db )
1752 action="backup-db"
1753 shift
1754 rccontrol_backup_db_parse_requirements "$@"
1755 shift $#
1756 ;;
1757
1758 backup-data )
1759 action="backup-data"
1760 shift
1761 rccontrol_backup_data_parse_requirements "$@"
1762 shift $#
1763 ;;
1764
1765 # :command.command_fallback
1766 "" )
1767 rccontrol_usage >&2
1768 exit 1
1769 ;;
1770
1771 * )
1772 printf "invalid command: %s\n" "$action" >&2
1773 exit 1
1774 ;;
1775
1776 esac
1777
1778 # :command.parse_requirements_while
1779 while [[ $# -gt 0 ]]; do
1780 key="$1"
1781 case "$key" in
1782
1783 -?* )
1784 printf "invalid option: %s\n" "$key" >&2
1785 exit 1
1786 ;;
1787
1788 * )
1789 # :command.parse_requirements_case
1790 # :command.parse_requirements_case_simple
1791 printf "invalid argument: %s\n" "$key" >&2
1792 exit 1
1793
1794 ;;
1795
1796 esac
1797 done
1798
1799 }
1800
1801 # :command.parse_requirements
1802 rccontrol_self_update_parse_requirements() {
1803 # :command.fixed_flags_filter
1804 case "${1:-}" in
1805 --help | -h )
1806 long_usage=yes
1807 rccontrol_self_update_usage
1808 exit
1809 ;;
1810
1811 esac
1812
1813 # :command.dependencies_filter
1814 if ! [[ -x "$(command -v curl)" ]]; then
1815 printf "missing dependency: curl\n" >&2
1816 exit 1
1817 fi
1818 if ! [[ -x "$(command -v tar)" ]]; then
1819 printf "missing dependency: tar\n" >&2
1820 exit 1
1821 fi
1822
1823 # :command.command_filter
1824 action="self-update"
1825
1826 # :command.parse_requirements_while
1827 while [[ $# -gt 0 ]]; do
1828 key="$1"
1829 case "$key" in
1830 # :flag.case
1831 --auth-token )
1832
1833 # :flag.case_arg
1834 if [[ -n ${2+x} ]]; then
1835
1836 args[--auth-token]="$2"
1837 shift
1838 shift
1839 else
1840 printf "%s\n" "--auth-token requires an argument: --auth-token AUTH_TOKEN" >&2
1841 exit 1
1842 fi
1843 ;;
1844
1845 # :flag.case
1846 --server-url )
1847
1848 # :flag.case_arg
1849 if [[ -n ${2+x} ]]; then
1850
1851 args[--server-url]="$2"
1852 shift
1853 shift
1854 else
1855 printf "%s\n" "--server-url requires an argument: --server-url SERVER_URL" >&2
1856 exit 1
1857 fi
1858 ;;
1859
1860 -?* )
1861 printf "invalid option: %s\n" "$key" >&2
1862 exit 1
1863 ;;
1864
1865 * )
1866 # :command.parse_requirements_case
1867 # :command.parse_requirements_case_simple
1868 printf "invalid argument: %s\n" "$key" >&2
1869 exit 1
1870
1871 ;;
1872
1873 esac
1874 done
1875
1876 # :command.default_assignments
1877 [[ -n ${args[--server-url]:-} ]] || args[--server-url]="https://code.rhodecode.com"
1878
1879 }
1880
1881 # :command.parse_requirements
1882 rccontrol_bootstrap_parse_requirements() {
1883 # :command.fixed_flags_filter
1884 case "${1:-}" in
1885 --help | -h )
1886 long_usage=yes
1887 rccontrol_bootstrap_usage
1888 exit
1889 ;;
1890
1891 esac
1892
1893 # :command.dependencies_filter
1894 if ! [[ -x "$(command -v curl)" ]]; then
1895 printf "missing dependency: curl\n" >&2
1896 exit 1
1897 fi
1898 if ! [[ -x "$(command -v tar)" ]]; then
1899 printf "missing dependency: tar\n" >&2
1900 exit 1
1901 fi
1902 if ! [[ -x "$(command -v md5sum)" ]]; then
1903 printf "missing dependency: md5sum\n" >&2
1904 exit 1
1905 fi
1906
1907 # :command.command_filter
1908 action="bootstrap"
1909
1910 # :command.parse_requirements_while
1911 while [[ $# -gt 0 ]]; do
1912 key="$1"
1913 case "$key" in
1914 # :flag.case
1915 --force | -f )
1916
1917 # :flag.case_no_arg
1918 args[--force]=1
1919 shift
1920 ;;
1921
1922 # :flag.case
1923 --auth-token )
1924
1925 # :flag.case_arg
1926 if [[ -n ${2+x} ]]; then
1927
1928 args[--auth-token]="$2"
1929 shift
1930 shift
1931 else
1932 printf "%s\n" "--auth-token requires an argument: --auth-token AUTH_TOKEN" >&2
1933 exit 1
1934 fi
1935 ;;
1936
1937 # :flag.case
1938 --server-url )
1939
1940 # :flag.case_arg
1941 if [[ -n ${2+x} ]]; then
1942
1943 args[--server-url]="$2"
1944 shift
1945 shift
1946 else
1947 printf "%s\n" "--server-url requires an argument: --server-url SERVER_URL" >&2
1948 exit 1
1949 fi
1950 ;;
1951
1952 -?* )
1953 printf "invalid option: %s\n" "$key" >&2
1954 exit 1
1955 ;;
1956
1957 * )
1958 # :command.parse_requirements_case
1959 # :command.parse_requirements_case_simple
1960 printf "invalid argument: %s\n" "$key" >&2
1961 exit 1
1962
1963 ;;
1964
1965 esac
1966 done
1967
1968 # :command.default_assignments
1969 [[ -n ${args[--server-url]:-} ]] || args[--server-url]="https://code.rhodecode.com"
1970
1971 }
1972
1973 # :command.parse_requirements
1974 rccontrol_get_build_artifacts_parse_requirements() {
1975 # :command.fixed_flags_filter
1976 case "${1:-}" in
1977 --help | -h )
1978 long_usage=yes
1979 rccontrol_get_build_artifacts_usage
1980 exit
1981 ;;
1982
1983 esac
1984
1985 # :command.command_filter
1986 action="get-build-artifacts"
1987
1988 # :command.parse_requirements_while
1989 while [[ $# -gt 0 ]]; do
1990 key="$1"
1991 case "$key" in
1992 # :flag.case
1993 --auth )
1994
1995 # :flag.case_arg
1996 if [[ -n ${2+x} ]]; then
1997
1998 args[--auth]="$2"
1999 shift
2000 shift
2001 else
2002 printf "%s\n" "--auth requires an argument: --auth AUTH" >&2
2003 exit 1
2004 fi
2005 ;;
2006
2007 # :flag.case
2008 --installer-url )
2009
2010 # :flag.case_arg
2011 if [[ -n ${2+x} ]]; then
2012
2013 args[--installer-url]="$2"
2014 shift
2015 shift
2016 else
2017 printf "%s\n" "--installer-url requires an argument: --installer-url INSTALLER_URL" >&2
2018 exit 1
2019 fi
2020 ;;
2021
2022 # :flag.case
2023 --manifest-url )
2024
2025 # :flag.case_arg
2026 if [[ -n ${2+x} ]]; then
2027
2028 args[--manifest-url]="$2"
2029 shift
2030 shift
2031 else
2032 printf "%s\n" "--manifest-url requires an argument: --manifest-url MANIFEST_URL" >&2
2033 exit 1
2034 fi
2035 ;;
2036
2037 # :flag.case
2038 --version-name )
2039
2040 # :flag.case_arg
2041 if [[ -n ${2+x} ]]; then
2042
2043 args[--version-name]="$2"
2044 shift
2045 shift
2046 else
2047 printf "%s\n" "--version-name requires an argument: --version-name VERSION_NAME" >&2
2048 exit 1
2049 fi
2050 ;;
2051
2052 -?* )
2053 printf "invalid option: %s\n" "$key" >&2
2054 exit 1
2055 ;;
2056
2057 * )
2058 # :command.parse_requirements_case
2059 # :command.parse_requirements_case_simple
2060 printf "invalid argument: %s\n" "$key" >&2
2061 exit 1
2062
2063 ;;
2064
2065 esac
2066 done
2067
2068 # :command.default_assignments
2069 [[ -n ${args[--auth]:-} ]] || args[--auth]=""
2070 [[ -n ${args[--installer-url]:-} ]] || args[--installer-url]="https://dls.rhodecode.com/dls/N2E2ZTY1NzA3NjYxNDA2NTc1NjI3MTcyNzA2MjcxNzIyZTcwNjI3YQ==/rhodecode-control/latest-linux-ee"
2071 [[ -n ${args[--manifest-url]:-} ]] || args[--manifest-url]="https://dls.rhodecode.com/linux/MANIFEST"
2072 [[ -n ${args[--version-name]:-} ]] || args[--version-name]="$RC_CLI_VERSION_NAME"
2073
2074 }
2075
2076 # :command.parse_requirements
2077 rccontrol_build_parse_requirements() {
2078 # :command.fixed_flags_filter
2079 case "${1:-}" in
2080 --help | -h )
2081 long_usage=yes
2082 rccontrol_build_usage
2083 exit
2084 ;;
2085
2086 esac
2087
2088 # :command.command_filter
2089 action="build"
2090
2091 # :command.parse_requirements_while
2092 while [[ $# -gt 0 ]]; do
2093 key="$1"
2094 case "$key" in
2095 # :flag.case
2096 --version-name )
2097
2098 # :flag.case_arg
2099 if [[ -n ${2+x} ]]; then
2100
2101 args[--version-name]="$2"
2102 shift
2103 shift
2104 else
2105 printf "%s\n" "--version-name requires an argument: --version-name VERSION_NAME" >&2
2106 exit 1
2107 fi
2108 ;;
2109
2110 -?* )
2111 printf "invalid option: %s\n" "$key" >&2
2112 exit 1
2113 ;;
2114
2115 * )
2116 # :command.parse_requirements_case
2117 # :command.parse_requirements_case_simple
2118 printf "invalid argument: %s\n" "$key" >&2
2119 exit 1
2120
2121 ;;
2122
2123 esac
2124 done
2125
2126 # :command.default_assignments
2127 [[ -n ${args[--version-name]:-} ]] || args[--version-name]="$RC_CLI_VERSION_NAME"
2128
2129 }
2130
2131 # :command.parse_requirements
2132 rccontrol_get_build_source_parse_requirements() {
2133 # :command.fixed_flags_filter
2134 case "${1:-}" in
2135 --help | -h )
2136 long_usage=yes
2137 rccontrol_get_build_source_usage
2138 exit
2139 ;;
2140
2141 esac
2142
2143 # :command.dependencies_filter
2144 if ! [[ -x "$(command -v curl)" ]]; then
2145 printf "missing dependency: curl\n" >&2
2146 exit 1
2147 fi
2148 if ! [[ -x "$(command -v tar)" ]]; then
2149 printf "missing dependency: tar\n" >&2
2150 exit 1
2151 fi
2152
2153 # :command.command_filter
2154 action="get-build-source"
2155
2156 # :command.parse_requirements_while
2157 while [[ $# -gt 0 ]]; do
2158 key="$1"
2159 case "$key" in
2160 # :flag.case
2161 --auth-token )
2162
2163 # :flag.case_arg
2164 if [[ -n ${2+x} ]]; then
2165
2166 args[--auth-token]="$2"
2167 shift
2168 shift
2169 else
2170 printf "%s\n" "--auth-token requires an argument: --auth-token AUTH_TOKEN" >&2
2171 exit 1
2172 fi
2173 ;;
2174
2175 # :flag.case
2176 --server-url )
2177
2178 # :flag.case_arg
2179 if [[ -n ${2+x} ]]; then
2180
2181 args[--server-url]="$2"
2182 shift
2183 shift
2184 else
2185 printf "%s\n" "--server-url requires an argument: --server-url SERVER_URL" >&2
2186 exit 1
2187 fi
2188 ;;
2189
2190 -?* )
2191 printf "invalid option: %s\n" "$key" >&2
2192 exit 1
2193 ;;
2194
2195 * )
2196 # :command.parse_requirements_case
2197 # :command.parse_requirements_case_simple
2198 if [[ -z ${args[revision]+x} ]]; then
2199
2200 args[revision]=$1
2201 shift
2202 else
2203 printf "invalid argument: %s\n" "$key" >&2
2204 exit 1
2205 fi
2206
2207 ;;
2208
2209 esac
2210 done
2211
2212 # :command.required_args_filter
2213 if [[ -z ${args[revision]+x} ]]; then
2214 printf "missing required argument: REVISION\nusage: rccontrol get-build-source REVISION [OPTIONS]\n" >&2
2215 exit 1
2216 fi
2217
2218 # :command.default_assignments
2219 [[ -n ${args[--server-url]:-} ]] || args[--server-url]="https://code.rhodecode.com"
2220
2221 }
2222
2223 # :command.parse_requirements
2224 rccontrol_build_source_parse_requirements() {
2225 # :command.fixed_flags_filter
2226 case "${1:-}" in
2227 --help | -h )
2228 long_usage=yes
2229 rccontrol_build_source_usage
2230 exit
2231 ;;
2232
2233 esac
2234
2235 # :command.command_filter
2236 action="build-source"
2237
2238 # :command.parse_requirements_while
2239 while [[ $# -gt 0 ]]; do
2240 key="$1"
2241 case "$key" in
2242 # :flag.case
2243 --version-name )
2244
2245 # :flag.case_arg
2246 if [[ -n ${2+x} ]]; then
2247
2248 args[--version-name]="$2"
2249 shift
2250 shift
2251 else
2252 printf "%s\n" "--version-name requires an argument: --version-name VERSION_NAME" >&2
2253 exit 1
2254 fi
2255 ;;
2256
2257 -?* )
2258 printf "invalid option: %s\n" "$key" >&2
2259 exit 1
2260 ;;
2261
2262 * )
2263 # :command.parse_requirements_case
2264 # :command.parse_requirements_case_simple
2265 printf "invalid argument: %s\n" "$key" >&2
2266 exit 1
2267
2268 ;;
2269
2270 esac
2271 done
2272
2273 # :command.default_assignments
2274 [[ -n ${args[--version-name]:-} ]] || args[--version-name]="$RC_CLI_VERSION_NAME"
2275
2276 }
2277
2278 # :command.parse_requirements
2279 rccontrol_stack_parse_requirements() {
2280 # :command.fixed_flags_filter
2281 case "${1:-}" in
2282 --help | -h )
2283 long_usage=yes
2284 rccontrol_stack_usage
2285 exit
2286 ;;
2287
2288 esac
2289
2290 # :command.command_filter
2291 action="stack"
2292
2293 # :command.parse_requirements_while
2294 while [[ $# -gt 0 ]]; do
2295 key="$1"
2296 case "$key" in
2297
2298 -?* )
2299 other_args+=("$1")
2300 shift
2301 ;;
2302
2303 * )
2304 # :command.parse_requirements_case
2305 # :command.parse_requirements_case_catch_all
2306 if [[ -z ${args[name]+x} ]]; then
2307
2308 args[name]=$1
2309 shift
2310 else
2311 other_args+=("$1")
2312 shift
2313 fi
2314
2315 ;;
2316
2317 esac
2318 done
2319
2320 # :command.required_args_filter
2321 if [[ -z ${args[name]+x} ]]; then
2322 printf "missing required argument: NAME\nusage: rccontrol stack NAME [SERVICES PARAMS...]\n" >&2
2323 exit 1
2324 fi
2325
2326 # :command.whitelist_filter
2327 if [[ ! ${args[name]} =~ ^(router|metrics|services|rhodecode)$ ]]; then
2328 printf "%s\n" "name must be one of: router, metrics, services, rhodecode" >&2
2329 exit 1
2330 fi
2331
2332 }
2333
2334 # :command.parse_requirements
2335 rccontrol_stack_status_parse_requirements() {
2336 # :command.fixed_flags_filter
2337 case "${1:-}" in
2338 --help | -h )
2339 long_usage=yes
2340 rccontrol_stack_status_usage
2341 exit
2342 ;;
2343
2344 esac
2345
2346 # :command.command_filter
2347 action="stack-status"
2348
2349 # :command.parse_requirements_while
2350 while [[ $# -gt 0 ]]; do
2351 key="$1"
2352 case "$key" in
2353
2354 -?* )
2355 printf "invalid option: %s\n" "$key" >&2
2356 exit 1
2357 ;;
2358
2359 * )
2360 # :command.parse_requirements_case
2361 # :command.parse_requirements_case_simple
2362 printf "invalid argument: %s\n" "$key" >&2
2363 exit 1
2364
2365 ;;
2366
2367 esac
2368 done
2369
2370 }
2371
2372 # :command.parse_requirements
2373 rccontrol_stack_upgrade_parse_requirements() {
2374 # :command.fixed_flags_filter
2375 case "${1:-}" in
2376 --help | -h )
2377 long_usage=yes
2378 rccontrol_stack_upgrade_usage
2379 exit
2380 ;;
2381
2382 esac
2383
2384 # :command.command_filter
2385 action="stack-upgrade"
2386
2387 # :command.parse_requirements_while
2388 while [[ $# -gt 0 ]]; do
2389 key="$1"
2390 case "$key" in
2391
2392 -?* )
2393 printf "invalid option: %s\n" "$key" >&2
2394 exit 1
2395 ;;
2396
2397 * )
2398 # :command.parse_requirements_case
2399 # :command.parse_requirements_case_simple
2400 printf "invalid argument: %s\n" "$key" >&2
2401 exit 1
2402
2403 ;;
2404
2405 esac
2406 done
2407
2408 }
2409
2410 # :command.parse_requirements
2411 rccontrol_cli_redis_parse_requirements() {
2412 # :command.fixed_flags_filter
2413 case "${1:-}" in
2414 --help | -h )
2415 long_usage=yes
2416 rccontrol_cli_redis_usage
2417 exit
2418 ;;
2419
2420 esac
2421
2422 # :command.command_filter
2423 action="cli-redis"
2424
2425 # :command.parse_requirements_while
2426 while [[ $# -gt 0 ]]; do
2427 key="$1"
2428 case "$key" in
2429
2430 -?* )
2431 printf "invalid option: %s\n" "$key" >&2
2432 exit 1
2433 ;;
2434
2435 * )
2436 # :command.parse_requirements_case
2437 # :command.parse_requirements_case_simple
2438 printf "invalid argument: %s\n" "$key" >&2
2439 exit 1
2440
2441 ;;
2442
2443 esac
2444 done
2445
2446 }
2447
2448 # :command.parse_requirements
2449 rccontrol_cli_db_parse_requirements() {
2450 # :command.fixed_flags_filter
2451 case "${1:-}" in
2452 --help | -h )
2453 long_usage=yes
2454 rccontrol_cli_db_usage
2455 exit
2456 ;;
2457
2458 esac
2459
2460 # :command.command_filter
2461 action="cli-db"
2462
2463 # :command.parse_requirements_while
2464 while [[ $# -gt 0 ]]; do
2465 key="$1"
2466 case "$key" in
2467
2468 -?* )
2469 printf "invalid option: %s\n" "$key" >&2
2470 exit 1
2471 ;;
2472
2473 * )
2474 # :command.parse_requirements_case
2475 # :command.parse_requirements_case_simple
2476 printf "invalid argument: %s\n" "$key" >&2
2477 exit 1
2478
2479 ;;
2480
2481 esac
2482 done
2483
2484 }
2485
2486 # :command.parse_requirements
2487 rccontrol_cli_db_upgrade_parse_requirements() {
2488 # :command.fixed_flags_filter
2489 case "${1:-}" in
2490 --help | -h )
2491 long_usage=yes
2492 rccontrol_cli_db_upgrade_usage
2493 exit
2494 ;;
2495
2496 esac
2497
2498 # :command.command_filter
2499 action="cli-db-upgrade"
2500
2501 # :command.parse_requirements_while
2502 while [[ $# -gt 0 ]]; do
2503 key="$1"
2504 case "$key" in
2505
2506 -?* )
2507 printf "invalid option: %s\n" "$key" >&2
2508 exit 1
2509 ;;
2510
2511 * )
2512 # :command.parse_requirements_case
2513 # :command.parse_requirements_case_simple
2514 printf "invalid argument: %s\n" "$key" >&2
2515 exit 1
2516
2517 ;;
2518
2519 esac
2520 done
2521
2522 }
2523
2524 # :command.parse_requirements
2525 rccontrol__completions_parse_requirements() {
2526 # :command.fixed_flags_filter
2527 case "${1:-}" in
2528 --help | -h )
2529 long_usage=yes
2530 rccontrol__completions_usage
2531 exit
2532 ;;
2533
2534 esac
2535
2536 # :command.command_filter
2537 action="_completions"
2538
2539 # :command.parse_requirements_while
2540 while [[ $# -gt 0 ]]; do
2541 key="$1"
2542 case "$key" in
2543
2544 -?* )
2545 printf "invalid option: %s\n" "$key" >&2
2546 exit 1
2547 ;;
2548
2549 * )
2550 # :command.parse_requirements_case
2551 # :command.parse_requirements_case_simple
2552 printf "invalid argument: %s\n" "$key" >&2
2553 exit 1
2554
2555 ;;
2556
2557 esac
2558 done
2559
2560 }
2561
2562 # :command.parse_requirements
2563 rccontrol_backup_db_parse_requirements() {
2564 # :command.fixed_flags_filter
2565 case "${1:-}" in
2566 --help | -h )
2567 long_usage=yes
2568 rccontrol_backup_db_usage
2569 exit
2570 ;;
2571
2572 esac
2573
2574 # :command.dependencies_filter
2575 if ! [[ -x "$(command -v tar)" ]]; then
2576 printf "missing dependency: tar\n" >&2
2577 exit 1
2578 fi
2579 if ! [[ -x "$(command -v gzip)" ]]; then
2580 printf "missing dependency: gzip\n" >&2
2581 exit 1
2582 fi
2583
2584 # :command.command_filter
2585 action="backup-db"
2586
2587 # :command.parse_requirements_while
2588 while [[ $# -gt 0 ]]; do
2589 key="$1"
2590 case "$key" in
2591
2592 -?* )
2593 printf "invalid option: %s\n" "$key" >&2
2594 exit 1
2595 ;;
2596
2597 * )
2598 # :command.parse_requirements_case
2599 # :command.parse_requirements_case_simple
2600 printf "invalid argument: %s\n" "$key" >&2
2601 exit 1
2602
2603 ;;
2604
2605 esac
2606 done
2607
2608 }
2609
2610 # :command.parse_requirements
2611 rccontrol_backup_data_parse_requirements() {
2612 # :command.fixed_flags_filter
2613 case "${1:-}" in
2614 --help | -h )
2615 long_usage=yes
2616 rccontrol_backup_data_usage
2617 exit
2618 ;;
2619
2620 esac
2621
2622 # :command.dependencies_filter
2623 if ! [[ -x "$(command -v tar)" ]]; then
2624 printf "missing dependency: tar\n" >&2
2625 exit 1
2626 fi
2627
2628 # :command.command_filter
2629 action="backup-data"
2630
2631 # :command.parse_requirements_while
2632 while [[ $# -gt 0 ]]; do
2633 key="$1"
2634 case "$key" in
2635
2636 -?* )
2637 printf "invalid option: %s\n" "$key" >&2
2638 exit 1
2639 ;;
2640
2641 * )
2642 # :command.parse_requirements_case
2643 # :command.parse_requirements_case_simple
2644 printf "invalid argument: %s\n" "$key" >&2
2645 exit 1
2646
2647 ;;
2648
2649 esac
2650 done
2651
2652 }
2653
2654 # :command.initialize
2655 initialize() {
2656 version="4.28.0.REL20221101"
2657 long_usage=''
2658 set -e
2659
2660 # :command.environment_variables_default
2661 export RC_CLI_VERSION_NAME="${RC_CLI_VERSION_NAME:-4.27.0}"
2662 export RC_STACK_ROUTER_EXT="${RC_STACK_ROUTER_EXT:-.custom/docker-compose-router.override.yaml}"
2663 export RC_STACK_METRICS_EXT="${RC_STACK_METRICS_EXT:-.custom/docker-compose-metrics.override.yaml}"
2664 export RC_STACK_SERVICES_EXT="${RC_STACK_SERVICES_EXT:-.custom/docker-compose-services.override.yaml}"
2665 export RC_STACK_RHODECODE_EXT="${RC_STACK_RHODECODE_EXT:-.custom/docker-compose-apps.override.yaml}"
2666
2667 # src/initialize.sh
2668
2669 ENV_FILE=$PWD/.env
2670
2671 # bootstrap file is a config file at the same time
2672 BOOTSTRAP_FILE='.rccontrol-bootstrap'
2673 CONFIG_FILE='.rccontrol.ini'
2674
2675 VALID_SERVICES="router metrics services rhodecode"
2676 DOCKER_DEFS_WORK_DIR="docker_defs"
2677
2678 #echo "1 ----"
2679 #echo $RC_STACK_SERVICES_EXT
2680 #echo $RC_STACK_METRICS_EXT
2681 #echo "1 -----"
2682
2683 #echo '2 ----'
2684 #grep -v -e '^#' "$ENV_FILE" | xargs -I {} echo \'{}\'
2685 #echo '2 ----'
2686
2687 #expose env vars from rccontrol donfig
2688 #if [[ -f $ENV_FILE ]]; then
2689 # eval $(grep -v -e '^#' "$ENV_FILE" | xargs -I {} echo export \'{}\')
2690 #fi
2691
2692 #ENV_EXPAND=""
2693 #for k in $(config_keys); do
2694 # k_upper=${k^^}
2695 # env_entry="-e $k_upper='$(config_get "$k")' "
2696 # ENV_EXPAND+=$env_entry;
2697 # #echo $env_expand
2698 #done
2699
2700 #
2701 #echo "3 ----"
2702 #echo $RC_STACK_SERVICES_EXT
2703 #echo $RC_STACK_METRICS_EXT
2704 #echo $RC_STACK_RHODECODE_EXT
2705 #echo "3 -----"
2706
2707 #env | grep RC_
2708
2709 # backup files from a docker volume into /tmp/backup.tar.gz
2710 docker-volume-backup-compressed() {
2711 docker run --rm -v /tmp:/backup --volumes-from "$1" debian:jessie tar -czvf /backup/backup.tar.gz "${@:2}"
2712 }
2713
2714 # restore files from /tmp/backup.tar.gz into a docker volume
2715 docker-volume-restore-compressed() {
2716 docker run --rm -v /tmp:/backup --volumes-from "$1" debian:jessie tar -xzvf /backup/backup.tar.gz "${@:2}"
2717 echo "Double checking files..."
2718 docker run --rm -v /tmp:/backup --volumes-from "$1" debian:jessie ls -lh "${@:2}"
2719 }
2720
2721 # backup files from a docker volume into /tmp/backup.tar
2722 docker-volume-backup() {
2723 docker run --rm -v /tmp:/backup --volumes-from "$1" busybox tar -cvf /backup/backup.tar "${@:2}"
2724 }
2725
2726 # restore files from /tmp/backup.tar into a docker volume
2727 docker-volume-restore() {
2728 docker run --rm -v /tmp:/backup --volumes-from "$1" busybox tar -xvf /backup/backup.tar "${@:2}"
2729 echo "Double checking files..."
2730 docker run --rm -v /tmp:/backup --volumes-from "$1" busybox ls -lh "${@:2}"
2731 }
2732
2733 get_started() {
2734 echo ""
2735 echo "~~~~ $(green Bootstrap completed) ~~~~"
2736 echo "To start using RhodeCode run the following stacks using ./rccontrol stack [STACK_NAME]"
2737 echo ""
2738 echo "$(yellow_bold [Optional]): run the edge router to control domain and SSL"
2739 echo "./rccontrol stack router up --detach"
2740 echo ""
2741 echo "$(green_bold [Mandatory]): run the services, like database, redis, channelstream etc..."
2742 echo "./rccontrol stack services up --detach"
2743 echo ""
2744 echo "$(green_bold [Mandatory]): run the rhodecode app stack"
2745 echo "./rccontrol stack rhodecode up --detach"
2746 echo ""
2747 echo "$(yellow_bold [Optional]): run the monitoring stack, this includes grafana/promethues logging/metrics system"
2748 echo "./rccontrol stack metrics up --detach"
2749 echo ""
2750 echo "check services status/ports"
2751 echo "./rccontrol status"
2752 }
2753 }
2754
2755 # :command.run
2756 run() {
2757 declare -A args=()
2758 declare -a other_args=()
2759 declare -a input=()
2760 normalize_input "$@"
2761 parse_requirements "${input[@]}"
2762
2763 if [[ $action == "self-update" ]]; then
2764 if [[ ${args[--help]:-} ]]; then
2765 long_usage=yes
2766 rccontrol_self_update_usage
2767 else
2768 rccontrol_self_update_command
2769 fi
2770
2771 elif [[ $action == "bootstrap" ]]; then
2772 if [[ ${args[--help]:-} ]]; then
2773 long_usage=yes
2774 rccontrol_bootstrap_usage
2775 else
2776 rccontrol_bootstrap_command
2777 fi
2778
2779 elif [[ $action == "get-build-artifacts" ]]; then
2780 if [[ ${args[--help]:-} ]]; then
2781 long_usage=yes
2782 rccontrol_get_build_artifacts_usage
2783 else
2784 rccontrol_get_build_artifacts_command
2785 fi
2786
2787 elif [[ $action == "build" ]]; then
2788 if [[ ${args[--help]:-} ]]; then
2789 long_usage=yes
2790 rccontrol_build_usage
2791 else
2792 rccontrol_build_command
2793 fi
2794
2795 elif [[ $action == "get-build-source" ]]; then
2796 if [[ ${args[--help]:-} ]]; then
2797 long_usage=yes
2798 rccontrol_get_build_source_usage
2799 else
2800 rccontrol_get_build_source_command
2801 fi
2802
2803 elif [[ $action == "build-source" ]]; then
2804 if [[ ${args[--help]:-} ]]; then
2805 long_usage=yes
2806 rccontrol_build_source_usage
2807 else
2808 rccontrol_build_source_command
2809 fi
2810
2811 elif [[ $action == "stack" ]]; then
2812 if [[ ${args[--help]:-} ]]; then
2813 long_usage=yes
2814 rccontrol_stack_usage
2815 else
2816 rccontrol_stack_command
2817 fi
2818
2819 elif [[ $action == "stack-status" ]]; then
2820 if [[ ${args[--help]:-} ]]; then
2821 long_usage=yes
2822 rccontrol_stack_status_usage
2823 else
2824 rccontrol_stack_status_command
2825 fi
2826
2827 elif [[ $action == "stack-upgrade" ]]; then
2828 if [[ ${args[--help]:-} ]]; then
2829 long_usage=yes
2830 rccontrol_stack_upgrade_usage
2831 else
2832 rccontrol_stack_upgrade_command
2833 fi
2834
2835 elif [[ $action == "cli-redis" ]]; then
2836 if [[ ${args[--help]:-} ]]; then
2837 long_usage=yes
2838 rccontrol_cli_redis_usage
2839 else
2840 rccontrol_cli_redis_command
2841 fi
2842
2843 elif [[ $action == "cli-db" ]]; then
2844 if [[ ${args[--help]:-} ]]; then
2845 long_usage=yes
2846 rccontrol_cli_db_usage
2847 else
2848 rccontrol_cli_db_command
2849 fi
2850
2851 elif [[ $action == "cli-db-upgrade" ]]; then
2852 if [[ ${args[--help]:-} ]]; then
2853 long_usage=yes
2854 rccontrol_cli_db_upgrade_usage
2855 else
2856 rccontrol_cli_db_upgrade_command
2857 fi
2858
2859 elif [[ $action == "_completions" ]]; then
2860 if [[ ${args[--help]:-} ]]; then
2861 long_usage=yes
2862 rccontrol__completions_usage
2863 else
2864 rccontrol__completions_command
2865 fi
2866
2867 elif [[ $action == "backup-db" ]]; then
2868 if [[ ${args[--help]:-} ]]; then
2869 long_usage=yes
2870 rccontrol_backup_db_usage
2871 else
2872 rccontrol_backup_db_command
2873 fi
2874
2875 elif [[ $action == "backup-data" ]]; then
2876 if [[ ${args[--help]:-} ]]; then
2877 long_usage=yes
2878 rccontrol_backup_data_usage
2879 else
2880 rccontrol_backup_data_command
2881 fi
2882
2883 elif [[ $action == "root" ]]; then
2884 root_command
2885 fi
2886 }
2887
2888 initialize
2889 run "$@"
@@ -1,31 +1,56 b''
1 COMPOSE_PROJECT_NAME=rc_cluster
1 #COMPOSE_PROJECT_NAME=rc_cluster
2 TZ="UTC"
2 TZ="UTC"
3
3
4
4 # Version to deploy and run
5 # Version to deploy and run
5 RC_VERSION="4.28.0"
6 ## VERSION_INFO
7 RC_VERSION="4.28.0.REL20221120_SOURCE"
8 #RC_CLI_VERSION_NAME="4.27.0"
9
10 # ce, or ee
11 RC_EDITION=ee
12 RC_IMAGE_CE="rhodecode/rhodecode-ee:$RC_VERSION"
13 RC_IMAGE_EE="rhodecode/rhodecode-ce:$RC_VERSION"
6
14
7 # Database access credentials
15 # Database bootstrap/access credentials
8 DB_NAME=rhodecode
16 DB_NAME=rhodecode
9 DB_USER=rhodecode
17 DB_USER=rhodecode
10 DB_PASSWORD=hUc1adS7oDd6Oj3in3
11
18
12 # base url for running app
19 # Bootstrap container with specific password
13 RHODECODE_BASE_URL=http://localhost:8888
20 # This password will be used to generate new account and password for the database
21 DB_PASSWORD=[GENERATED_AT_BOOTSTRAP]
14
22
15 # HTTP and HTTPS ports for running app
23 # set limit of redis in-memory cache before keys get evicted using LRU cache
16 RC_HTTP_PORT=8888
24 RC_REDIS_MAXMEMORY=1024mb
17 RC_HTTPS_PORT=8443
25
26 # set the size of ramdisk used for locking files
27 RC_DATA_RAMDISK_SIZE=1G
28
29 # base url for running app, this is used for traefik hasname matching, so accessing this host will redirect
30 # traffic to the running instance
31 # replace this with your name of host. e.g http://yourdomain.com or https://rc.company.local
32 RHODECODE_HOSTNAME=docker-dev
18
33
19 # SSH Port exposed, increased security is to not used default 22
20 RC_SSH_PORT=9022
21
34
22 # user/password for first admin user created for access
35 # user/password for first admin user created for access
23 RHODECODE_USER_EMAIL=admin@rhodecode.com
36 # this is a bootstrap super-admin account to access the web interface
37 RHODECODE_USER_EMAIL=admin@server-com
24 RHODECODE_USER_NAME=admin
38 RHODECODE_USER_NAME=admin
25 RHODECODE_USER_PASS=secret4
39 RHODECODE_USER_PASS=secret4
26
40
41
42
43
44 # HTTP and HTTPS ports for running app
45 RC_HTTP_PORT=8888
46
47 # SSH Port exposed, increased security is to not used default 22
48 RC_SSH_PORT=3022
49
50
51 ## RHODECODE RUNTIME VARS
52
27 ## New since 4.27.0, default GIT branch name can be changed
53 ## New since 4.27.0, default GIT branch name can be changed
28 GIT_DEFAULT_BRANCH_NAME=master
54 GIT_DEFAULT_BRANCH_NAME=master
29
55
30 ## Source build version for image
56
31 SOURCE_VER=4.28.0.REL1
@@ -1,21 +1,23 b''
1 # CACHE FILES
1 # CACHE FILES
2 .cache/RhodeCode-installer*
2 .cache/RhodeCode-installer*
3 .cache/*.tar.bz2
3 .cache/*.tar.bz2
4 .cache/locale-archive
4 .cache/locale-archive
5
5
6 # Source code downloaded
7 .source/*
8
9 # Custom / override wont be tracked
10 .custom/*
11
6 # LOGS
12 # LOGS
7 logs/*.log
13 logs/*.log
8 logs/nginx/*.log
14 logs/nginx/*.log
9 logs/svn/*.log
15 logs/svn/*.log
10 logs/apps/*.log
16 logs/apps/*.log
11 logs/postgres/*.log
17 logs/postgres/*.log
12
18
13 .idea
19 .idea
14 config/_shared/.rcmetadata.json
20 config/_shared/.rcmetadata.json
15
21
16 .source/*
22 .rccontrol-bootstrap
17 scripts/download-artifacts-secret.sh
23 .rccontrol.ini No newline at end of file
18
19 scripts/download-source-secret.sh
20
21 .rccontrol-bootstrap No newline at end of file
@@ -1,258 +1,298 b''
1 # RhodeCode Cluster
1 # RhodeCode Cluster
2
2
3 RhodeCode Cluster is a multi-node highly-scalable setup to run RhodeCode and
3 RhodeCode Cluster is a multi-node highly-scalable setup to run
4 RhodeCode, Edge Router(Traefik + SSL) and Metrics Stack (Prometheus, Loki, Grafana) and
4 all its additional components in single environment using Docker.
5 all its additional components in single environment using Docker.
5
6
6 Using a docker-compose this setup creates following services for RhodeCode:
7 Using a docker-compose this setup creates following services for RhodeCode:
7
8
8 - Nginx HTTP Server for load balancing and reverse proxy
9 Edge-Router:
9 - RhodeCode HTTP
10 - Traefik, Edge Router, SSL termination etc
10 - VCSServer for GIT/SVN/HG support
11
11 - SSH Server for cloning over SSH
12 Core Services:
12 - SVN webserver for HTTP support over SVN
13 - Database (defaults to PostgreSQL)
13 - Celery workers for asynchronous tasks
14 - Redis, acts as cache and queue exchange
14 - Celery beat for automation tasks
15 - ChannelStream - live websocket communications
15 - Redis Database for caching
16 - Nginx (static/channelstream) proxy serving RhodeCode APPS and channelstream communication
16 - Postgres database for persistent storage
17 - Elasticsearch (full text search backend)
17 - Channelstream websocket server for live components
18
19 RhodeCode
20 - RhodeCode CE/EE
21 - VCSServer for GIT/SVN/HG support
22 - SSH Server for cloning over SSH
23 - SVN webserver for HTTP support over SVN
24 - Celery workers for asynchronous tasks
25 - Celery beat for automation tasks
26
27 Metrics
28 - Loki, logs aggregation
29 - Grafana, Metrics Dashboard
30 - Promethues, Metrics time-series
31 - statsd-exporter - statsd to promethues bridge
32 - node-exporter - machine stats
33 - promtail - log scraping
18
34
19
35
20 ## Pre requisites
36 ## Pre requisites
21
37
38 To Run this stack Docker engine and Docker Compose needs to be installed on the host machine.
22 Visit docker site and install docker (min version 20.10) and docker compose:
39 Visit docker site and install docker (min version 20.10) and docker compose:
23
40
24 - https://docs.docker.com/engine/install/
41 - https://docs.docker.com/engine/install/
25 - https://docs.docker.com/compose/install/
42 - https://docs.docker.com/compose/install/
26
43
27 The instructions below outline installation of the current stable release (v1.29.2) of Compose.
44 The instructions below outline installation of the current stable release (v1.29.2) of Compose.
28
45
29 ## Data structure
46 ## Data structure
30
47
31 There are 4 volumes defined:
48 There are 4 volumes defined:
32
49
33
50
34 - `/etc/rhodecode/conf`
51 - `/etc/rhodecode/conf`
35
52
36 Used for configuration files for rhodecode, vcsserver and supervisord, and some cache data
53 Shared volume used for configuration files for rhodecode, vcsserver and supervisord, and some cache data
37
54
38 - `/var/opt/rhodecode_repo_store`
55 - `/var/opt/rhodecode_repo_store`
39
56
40 Used for main repository storage where repositories would be stored
57 Used for main repository storage where repositories would be stored
41
58
42 - `/var/opt/rhodecode_data`
59 - `/var/opt/rhodecode_data`
43
60
44 data dir for rhodecode cache/lock files, or user sessions (for file backend)
61 Data dir for rhodecode cache/lock files, or user sessions (for file backend)
45
62
46 - `/var/log/rhodecode`
63 - `/var/log/rhodecode`
47
64
48 Used to store all logs from RhodeCode
65 Used to store all logs from RhodeCode
49
66
50
67
51 ## Setup/Build options
68 ## Setup/Build options
52
69
53 There are 3 options to run the docker stack.
70 There are 3 options to run the docker stack.
54
71
55 - [Standalone cluster build from installer](#standalone-cluster-build-from-installer)
72 - [Standalone cluster build from installer](#standalone-cluster-build-from-installer)
56 - [Standalone cluster build from source](#standalone-cluster-build-from-source)
73 - [Standalone cluster build from source](#standalone-cluster-build-from-source)
57 - [dev enviroment setup](#like-this-one)
74 - [dev environment setup](#dev-env-build)
75
58
76
59
77
60 # Standalone cluster build from installer
78 # Standalone cluster build from installer
61
79
62 Follow these steps to build and run the RhodeCode Cluster via Docker-compose.
80 Follow these steps to build and run the RhodeCode Cluster via Docker-compose.
63 Get the repository:
81 Get the repository:
64
82
65 git clone https://code.rhodecode.com/rhodecode-enterprise-docker && cd rhodecode-enterprise-docker
83 git clone https://code.rhodecode.com/rhodecode-enterprise-docker && cd rhodecode-enterprise-docker
66
84
67 ## Download installer binaries
85 ## Download installer binaries
68
86
69 First start by fetching required installer binaries. This is required to create both
87 First start by fetching required installer binaries. This is required to create both
70 simple build and full compose setup.
88 simple build and full compose setup.
71 Please check the `.env` file to adjust RhodeCode version if needed.
89 Please check the `.env` file to adjust RhodeCode version if needed.
72 (e.g. RC_VERSION="4.24.1")
90 (e.g. RC_VERSION="4.24.1")
73
91
74 ```shell
92 ```shell
75 cd .bootstrap/ && ./download-artifacts.sh && cd ../
93 cd .bootstrap/ && ./download-artifacts.sh && cd ../
76 ```
94 ```
77
95
78 This will download required installer files and put them into the `.cache` directory.
96 This will download required installer files and put them into the `.cache` directory.
79 This directory should look similar to that after downloads have finish:
97 This directory should look similar to that after downloads have finish:
80
98
81 ```shell
99 ```shell
82 drwxr-xr-x 8 rcdev rcdev 256B Feb 8 13:35 .
100 drwxr-xr-x 8 rcdev rcdev 256B Feb 8 13:35 .
83 drwxr-xr-x 14 rcdev rcdev 448B Feb 8 10:40 ..
101 drwxr-xr-x 14 rcdev rcdev 448B Feb 8 10:40 ..
84 -rw-r--r-- 1 rcdev rcdev 0B Feb 8 20:44 .dirkeep
102 -rw-r--r-- 1 rcdev rcdev 0B Feb 8 20:44 .dirkeep
85 -rwxr-xr-x 1 rcdev rcdev 241M Feb 8 13:35 RhodeCode-installer-linux-build20210208_0800
103 -rwxr-xr-x 1 rcdev rcdev 241M Feb 8 13:35 RhodeCode-installer-linux-build20210208_0800
86 -rw-r--r-- 1 rcdev rcdev 156M Feb 8 13:35 RhodeCodeCommunity-4.24.1+x86_64-linux_build20210208_0800.tar.bz2
104 -rw-r--r-- 1 rcdev rcdev 156M Feb 8 13:35 RhodeCodeCommunity-4.24.1+x86_64-linux_build20210208_0800.tar.bz2
87 -rw-r--r-- 1 rcdev rcdev 171M Feb 8 13:35 RhodeCodeEnterprise-4.24.1+x86_64-linux_build20210208_0800.tar.bz2
105 -rw-r--r-- 1 rcdev rcdev 171M Feb 8 13:35 RhodeCodeEnterprise-4.24.1+x86_64-linux_build20210208_0800.tar.bz2
88 -rw-r--r-- 1 rcdev rcdev 145M Feb 8 13:35 RhodeCodeVCSServer-4.24.1+x86_64-linux_build20210208_0800.tar.bz2
106 -rw-r--r-- 1 rcdev rcdev 145M Feb 8 13:35 RhodeCodeVCSServer-4.24.1+x86_64-linux_build20210208_0800.tar.bz2
89 -rw-r--r-- 1 rcdev rcdev 109M Feb 8 13:35 locale-archive
107 -rw-r--r-- 1 rcdev rcdev 109M Feb 8 13:35 locale-archive
90 ```
108 ```
91
109
92 ## Set License for EE version
110 ## Set License for EE version
93
111
94 In order to install EE edition a valid license is required.
112 In order to install EE edition a license file is required to be present.
113 It can contain your current license, or when empty license can be applied via Web interface.
114
95 To apply it during build phase save your raw license data into a file
115 To apply it during build phase save your raw license data into a file
96
116
97 `config/compose/rhodecode_enterprise.license`
117 `config/rhodecode_enterprise.license`
98
118
99 If this file is present build phase will read it and license will be applied at creation.
119 If this file is present build phase will read it and license will be applied at creation.
100 This file can also be empty and license can be applied via a WEB interface after first login.
120 This file can also be empty and license can be applied via a WEB interface after first login.
101
121
102 ## Run Docker compose build:
122 ## Run Docker compose build:
103
123
104 *This will build RhodeCode based on downloaded installer packages.*
124 *This will build RhodeCode based on downloaded installer packages.*
105
125
106 To create a full stack we need to run the database container, so it's ready to
126 To create a full stack we need to run the database container, so it's ready to
107 build the docker image.
127 build the docker image.
108
128
109 _Disk space problems?_
129 _Disk space problems?_
110
130
111 ```shell
131 ```shell
112 docker system df
132 docker system df
113 docker builder prune
133 docker builder prune
134 # optionally
135 docker image prune -a
114 ```
136 ```
137 ### Creating & building images
115
138
116 1) start with running the required database for the build stage in the background.
139 1) start with running the required database for the build stage in the background.
117
140
118 ```shell
141 ```shell
119 docker-compose up --detach database
142 docker-compose up --detach database
120 ```
143 ```
121
144
122 This will start our postgres database, and expose it to the network.
145 This will start our postgres database, and expose it to the network.
146 Postgres DB is configured to output logs into a file `/var/log/rhodecode/postgres`
123
147
124 2) We can now run the full installation. Database needs to be running for the next build command.
148 2) We can now run the full installation. Database needs to be running for the next build command.
125
149 This will build the rhodecode base image used for rhodecode, vcsserver, celery, ssh, svn
150 Then it will build all other components required.
151
126 ```shell
152 ```shell
127 docker-compose build rhodecode
153 docker-compose build rhodecode
128 docker-compose build
154 docker-compose build
129 ```
155 ```
130
156
131 3) Once we build the rhodecode app, we can run the whole stack using `docker-compose up`
157 4) Once we build all required containers, we can run the whole stack using `docker-compose up`
132
158
133 ```shell
159 ```shell
134 docker-compose up
160 docker-compose up
135 ```
161 ```
136
162
137 If running locally you can access Running RhodeCode via Nginx under:
163 If running locally you can access Running RhodeCode via Nginx under:
138 http://localhost:8888
164 http://localhost:8888
139
165
140 localhost can be changed to the server IP where docker is running.
166 localhost can be changed to the server IP where docker is running.
141
167
142
168
143 In case for bigger setups docker-compose can scale more rhodecode/vcsserver workers:
169 In case for bigger setups docker-compose can scale more rhodecode/vcsserver workers:
144
170
145 ```shell
171 ```shell
146 docker-compose up --scale vcsserver=3 rhodecode=3
172 docker-compose up --scale vcsserver=3 rhodecode=3
147 ```
173 ```
148
174
175 Logging is pushed to stdout from all services.
176
149
177
150 Upgrade:
178 ### Upgrade procedure:
151
179
152 - pull the latest rhodecode-docker repo
180 - pull the latest rhodecode-docker repo
153 - check .env file for correct update version
181 - check .env file for correct update version
154 - re-build rhodecode
182 - re-build rhodecode
155 - docker-compose build rhodecode
183 - docker-compose build rhodecode
184 - docker-compose build
156 - docker-compose stop
185 - docker-compose stop
157 - docker-compose up
186 - docker-compose up
158
187
159 # Standalone cluster build from source
188 # Standalone cluster build from source
160
189
161 There's an option to build the latest branches as a docker installation.
190 There's an option to build the latest release from the source code as a docker installation.
191
192 If you have the 3 required projects source code already, the next step can be omitted, and the
193 sources can be copied to the `.source/` directory. note: symlinks don't work.
162
194
163 Download the source:
195 - https://code.rhodecode.com/rhodecode-vcsserver
196 - https://code.rhodecode.com/rhodecode-enterprise-ce
197 - https://code.rhodecode.com/rhodecode-enterprise-ee (assuming access is granted to this)
198
199 Download the source using bootstrap script:
164
200
165 ```shell
201 ```shell
166 cd .boostrap/ && ./download-source.sh && cd ../
202 cd .boostrap/ && ./download-source.sh && cd ../
167 ```
203 ```
168
204
169 This step will create source code copies into the `.source/` path. e.g
205 This step will create source code copies into the `.source/` path. e.g
170 ```
206 ```
171 -rw-r--r-- 1 docker docker 0 Nov 25 12:27 .dirkeep
207 -rw-r--r-- 1 docker docker 0 Nov 25 12:27 .dirkeep
172 drwxr-xr-x 1 docker docker 1184 Nov 25 12:27 rhodecode-enterprise-ce
208 drwxr-xr-x 1 docker docker 1184 Nov 25 12:27 rhodecode-enterprise-ce
173 drwxr-xr-x 1 docker docker 1120 Nov 25 12:27 rhodecode-enterprise-ee
209 drwxr-xr-x 1 docker docker 1120 Nov 25 12:27 rhodecode-enterprise-ee
174 drwxr-xr-x 1 docker docker 800 Nov 25 12:27 rhodecode-vcsserver
210 drwxr-xr-x 1 docker docker 800 Nov 25 12:27 rhodecode-vcsserver
175 ```
211 ```
176
212
177 If you have the source already, this step can be omitted, and the
213 With this done, you can now proceed with every step of normal source installation (Creating & building images), but instead of using
178 sources can be linked or copied to this directory
214 just `docker-compose` command it needs to be replaced with `docker-compose -f docker-compose.yaml -f docker-compose.source.yaml`
179
180
215
181 Build the source based image
216 For example to override the installer build with the source `rhodecode` based image, and also setting proper version, run:
182
217
183 ```shell
218 ```shell
184 docker-compose -f docker-compose.yaml -f docker-compose.source.yaml build --no-cache --progress=plain rhodecode
219 RC_VERSION="4.27.0.REL202221020" docker-compose -f docker-compose-apps.yaml -f docker-compose.source.yaml build --no-cache --progress=plain rhodecode
185 ```
220 ```
186
221
187 to create a source install override and build based on the downloaded sources.
222 NOTE THAT it's recommended to keep rc_version and source_Ver the same
188
189
190 Logging is pushed to stdout from all services.
191
223
192 ## Simple build
224 ## Simple build
193
225
194 Build docker RhodeCode `Community` without any dependencies (redis, external db) using
226 Build docker RhodeCode `Community` without any dependencies (redis, external db) using
195 simple sqlite database and file based caches.
227 simple sqlite database and file based caches.
196 This is a fully running instance good for small use with 3-5 users.
228 This is a fully running instance good for small use with 3-5 users.
197
229
198 ```shell
230 ```shell
199 docker build -t rhodecode:4.23.2 -f rhodecode.dockerfile \
231 docker build -t rhodecode/rhodecode-ce:4.23.2 -f rhodecode.dockerfile \
200 -e RHODECODE_TYPE=Community \
232 -e RHODECODE_TYPE=Community \
201 -e RHODECODE_VERSION=4.23.2 \
233 -e RHODECODE_VERSION=4.23.2 \
202 -e RHODECODE_DB=sqlite \
234 -e RHODECODE_DB=sqlite \
203 -e RHODECODE_USER_NAME=admin \
235 -e RHODECODE_USER_NAME=admin \
204 -e RHODECODE_USER_PASS=secret4 \
236 -e RHODECODE_USER_PASS=secret4 \
205 -e RHODECODE_USER_EMAIL=support@rhodecode.com \
237 -e RHODECODE_USER_EMAIL=support@rhodecode.com \
206 .
238 .
207 ```
239 ```
208
240
209 note: for debugging better to add `--progress plain` into the build command to obtain all the output from the build.
241 note: for debugging better to add `--progress plain` into the build command to obtain all the output from the build.
210 To Build against existing running Postgres or MySQL you can specify:
242 To Build against existing running Postgres or MySQL you can specify:
211
243
212 --build-arg RHODECODE_DB=postgresql://postgres:secret@database/rhodecode
244 --build-arg RHODECODE_DB=postgresql://postgres:secret@database/rhodecode
213 --build-arg RHODECODE_DB=mysql://root:secret@localhost/rhodecode?charset=utf8
245 --build-arg RHODECODE_DB=mysql://root:secret@localhost/rhodecode?charset=utf8
214
246
215
247
216 To copy over the data into volumes use such command:
248 To copy over the data into volumes use such command:
217 ```shell
249 ```shell
218 docker run -v logvolume:/data --name data_vol busybox true
250 docker run -v logvolume:/data --name data_vol busybox true
219 docker cp . data_vol:/data
251 docker cp . data_vol:/data
220 docker rm data_vol
252 docker rm data_vol
221 ```
253 ```
222
254
223 Run the container, mounting the required volumes. By default the application would be
255 Run the container, mounting the required volumes. By default the application would be
224 available at http://localhost:10020, and default login is (unless specified differently in the build command)
256 available at http://localhost:10020, and default login is (unless specified differently in the build command)
225
257
226 ```
258 ```
227 user: admin
259 user: admin
228 password: secret4
260 password: secret4
229 ```
261 ```
230
262
231 We've not built our image using specific version. It's time to run it:
263 We've not built our image using specific version. It's time to run it:
264 We specify the run.ini by selecting config option we have locally
232
265
233 ```shell
266 ```shell
234 docker run \
267 docker run \
235 --name rhodecode-container \
268 --name rhodecode-container \
236 --publish 10020:10020 \
269 --publish 10020:10020 \
237 --restart unless-stopped \
270 --restart unless-stopped \
238 --volume $PWD/config:/etc/rhodecode/conf \
271 --volume $PWD/config:/etc/rhodecode/conf \
272 --volume $PWD/config/rhodecode.optimized.ini:/etc/rhodecode/conf_build/run.ini \
239 --volume $PWD/logs:/var/log/rhodecode \
273 --volume $PWD/logs:/var/log/rhodecode \
240 'rhodecode:4.23.2'
274 'rhodecode/rhodecode-ee:4.23.2'
241 ```
275 ```
242
276
243 Enter container
277 Enter container
244
278
245 ```shell
279 ```shell
246 docker exec -it rhodecode-container /bin/bash
280 docker exec -it rhodecode-container /bin/bash
247 ```
281 ```
248
282
249 Enter interactive shell
283 Enter interactive shell
250
284
251 ```shell
285 ```shell
252 docker exec -it rhodecode-container /var/opt/rhodecode_bin/bin/rc-ishell /etc/rhodecode/conf/rhodecode.ini
286 docker exec -it rhodecode-container /usr/local/bin/rhodecode_bin/bin/rc-ishell /etc/rhodecode/conf/rhodecode.ini
253 ```
287 ```
254
288
255 Run Database migrations
289 Run Database migrations
256 ```shell
290 ```shell
257 docker exec -it rhodecode-container /var/opt/rhodecode_bin/bin/rc-upgrade-db /etc/rhodecode/conf/rhodecode.ini --force-yes
291 docker exec -it rhodecode-container /usr/local/bin/rhodecode_bin/bin/rc-upgrade-db /etc/rhodecode/conf/rhodecode.ini --force-yes
258 ``` No newline at end of file
292 ```
293
294
295
296 ### Registry for docker swarm
297
298 docker run -d -p 5000:5000 --restart always --name registry registry:2 No newline at end of file
1 NO CONTENT: file renamed from config/compose/rhodecode_enterprise.license to config/_shared/rhodecode_enterprise.license
NO CONTENT: file renamed from config/compose/rhodecode_enterprise.license to config/_shared/rhodecode_enterprise.license
1 NO CONTENT: file renamed from service/database/mysql_customized.conf to config/database/mysql_customized.conf
NO CONTENT: file renamed from service/database/mysql_customized.conf to config/database/mysql_customized.conf
@@ -1,808 +1,810 b''
1 ## docker run -i --rm postgres cat /usr/share/postgresql/postgresql.conf.sample > my-postgres.conf
1 # -----------------------------
2 # -----------------------------
2 # PostgreSQL configuration file
3 # PostgreSQL configuration file
3 # -----------------------------
4 # -----------------------------
4 #
5 #
5 # This file consists of lines of the form:
6 # This file consists of lines of the form:
6 #
7 #
7 # name = value
8 # name = value
8 #
9 #
9 # (The "=" is optional.) Whitespace may be used. Comments are introduced with
10 # (The "=" is optional.) Whitespace may be used. Comments are introduced with
10 # "#" anywhere on a line. The complete list of parameter names and allowed
11 # "#" anywhere on a line. The complete list of parameter names and allowed
11 # values can be found in the PostgreSQL documentation.
12 # values can be found in the PostgreSQL documentation.
12 #
13 #
13 # The commented-out settings shown in this file represent the default values.
14 # The commented-out settings shown in this file represent the default values.
14 # Re-commenting a setting is NOT sufficient to revert it to the default value;
15 # Re-commenting a setting is NOT sufficient to revert it to the default value;
15 # you need to reload the server.
16 # you need to reload the server.
16 #
17 #
17 # This file is read on server startup and when the server receives a SIGHUP
18 # This file is read on server startup and when the server receives a SIGHUP
18 # signal. If you edit the file on a running system, you have to SIGHUP the
19 # signal. If you edit the file on a running system, you have to SIGHUP the
19 # server for the changes to take effect, run "pg_ctl reload", or execute
20 # server for the changes to take effect, run "pg_ctl reload", or execute
20 # "SELECT pg_reload_conf()". Some parameters, which are marked below,
21 # "SELECT pg_reload_conf()". Some parameters, which are marked below,
21 # require a server shutdown and restart to take effect.
22 # require a server shutdown and restart to take effect.
22 #
23 #
23 # Any parameter can also be given as a command-line option to the server, e.g.,
24 # Any parameter can also be given as a command-line option to the server, e.g.,
24 # "postgres -c log_connections=on". Some parameters can be changed at run time
25 # "postgres -c log_connections=on". Some parameters can be changed at run time
25 # with the "SET" SQL command.
26 # with the "SET" SQL command.
26 #
27 #
27 # Memory units: B = bytes Time units: us = microseconds
28 # Memory units: B = bytes Time units: us = microseconds
28 # kB = kilobytes ms = milliseconds
29 # kB = kilobytes ms = milliseconds
29 # MB = megabytes s = seconds
30 # MB = megabytes s = seconds
30 # GB = gigabytes min = minutes
31 # GB = gigabytes min = minutes
31 # TB = terabytes h = hours
32 # TB = terabytes h = hours
32 # d = days
33 # d = days
33
34
34
35
35 #------------------------------------------------------------------------------
36 #------------------------------------------------------------------------------
36 # FILE LOCATIONS
37 # FILE LOCATIONS
37 #------------------------------------------------------------------------------
38 #------------------------------------------------------------------------------
38
39
39 # The default values of these variables are driven from the -D command-line
40 # The default values of these variables are driven from the -D command-line
40 # option or PGDATA environment variable, represented here as ConfigDir.
41 # option or PGDATA environment variable, represented here as ConfigDir.
41
42
42 #data_directory = 'ConfigDir' # use data in another directory
43 #data_directory = 'ConfigDir' # use data in another directory
43 # (change requires restart)
44 # (change requires restart)
44 #hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file
45 #hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file
45 # (change requires restart)
46 # (change requires restart)
46 #ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file
47 #ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file
47 # (change requires restart)
48 # (change requires restart)
48
49
49 # If external_pid_file is not explicitly set, no extra PID file is written.
50 # If external_pid_file is not explicitly set, no extra PID file is written.
50 #external_pid_file = '' # write an extra PID file
51 #external_pid_file = '' # write an extra PID file
51 # (change requires restart)
52 # (change requires restart)
52
53
53
54
54 #------------------------------------------------------------------------------
55 #------------------------------------------------------------------------------
55 # CONNECTIONS AND AUTHENTICATION
56 # CONNECTIONS AND AUTHENTICATION
56 #------------------------------------------------------------------------------
57 #------------------------------------------------------------------------------
57
58
58 # - Connection Settings -
59 # - Connection Settings -
59
60
60 listen_addresses = '*'
61 listen_addresses = '*'
61 # comma-separated list of addresses;
62 # comma-separated list of addresses;
62 # defaults to 'localhost'; use '*' for all
63 # defaults to 'localhost'; use '*' for all
63 # (change requires restart)
64 # (change requires restart)
64 #port = 5432 # (change requires restart)
65 #port = 5432 # (change requires restart)
65 #max_connections = 100 # (change requires restart)
66 #max_connections = 100 # (change requires restart)
66 #superuser_reserved_connections = 3 # (change requires restart)
67 #superuser_reserved_connections = 3 # (change requires restart)
67 #unix_socket_directories = '/tmp' # comma-separated list of directories
68 #unix_socket_directories = '/tmp' # comma-separated list of directories
68 # (change requires restart)
69 # (change requires restart)
69 #unix_socket_group = '' # (change requires restart)
70 #unix_socket_group = '' # (change requires restart)
70 #unix_socket_permissions = 0777 # begin with 0 to use octal notation
71 #unix_socket_permissions = 0777 # begin with 0 to use octal notation
71 # (change requires restart)
72 # (change requires restart)
72 #bonjour = off # advertise server via Bonjour
73 #bonjour = off # advertise server via Bonjour
73 # (change requires restart)
74 # (change requires restart)
74 #bonjour_name = '' # defaults to the computer name
75 #bonjour_name = '' # defaults to the computer name
75 # (change requires restart)
76 # (change requires restart)
76
77
77 # - TCP settings -
78 # - TCP settings -
78 # see "man tcp" for details
79 # see "man tcp" for details
79
80
80 #tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
81 #tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
81 # 0 selects the system default
82 # 0 selects the system default
82 #tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
83 #tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
83 # 0 selects the system default
84 # 0 selects the system default
84 #tcp_keepalives_count = 0 # TCP_KEEPCNT;
85 #tcp_keepalives_count = 0 # TCP_KEEPCNT;
85 # 0 selects the system default
86 # 0 selects the system default
86 #tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds;
87 #tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds;
87 # 0 selects the system default
88 # 0 selects the system default
88
89
89 #client_connection_check_interval = 0 # time between checks for client
90 #client_connection_check_interval = 0 # time between checks for client
90 # disconnection while running queries;
91 # disconnection while running queries;
91 # 0 for never
92 # 0 for never
92
93
93 # - Authentication -
94 # - Authentication -
94
95
95 #authentication_timeout = 1min # 1s-600s
96 #authentication_timeout = 1min # 1s-600s
96 #password_encryption = scram-sha-256 # scram-sha-256 or md5
97 #note; Rhodecode needs to use old md5 for backward compat
98 password_encryption = md5 # scram-sha-256 or md5
97 #db_user_namespace = off
99 #db_user_namespace = off
98
100
99 # GSSAPI using Kerberos
101 # GSSAPI using Kerberos
100 #krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab'
102 #krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab'
101 #krb_caseins_users = off
103 #krb_caseins_users = off
102
104
103 # - SSL -
105 # - SSL -
104
106
105 #ssl = off
107 #ssl = off
106 #ssl_ca_file = ''
108 #ssl_ca_file = ''
107 #ssl_cert_file = 'server.crt'
109 #ssl_cert_file = 'server.crt'
108 #ssl_crl_file = ''
110 #ssl_crl_file = ''
109 #ssl_crl_dir = ''
111 #ssl_crl_dir = ''
110 #ssl_key_file = 'server.key'
112 #ssl_key_file = 'server.key'
111 #ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
113 #ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
112 #ssl_prefer_server_ciphers = on
114 #ssl_prefer_server_ciphers = on
113 #ssl_ecdh_curve = 'prime256v1'
115 #ssl_ecdh_curve = 'prime256v1'
114 #ssl_min_protocol_version = 'TLSv1.2'
116 #ssl_min_protocol_version = 'TLSv1.2'
115 #ssl_max_protocol_version = ''
117 #ssl_max_protocol_version = ''
116 #ssl_dh_params_file = ''
118 #ssl_dh_params_file = ''
117 #ssl_passphrase_command = ''
119 #ssl_passphrase_command = ''
118 #ssl_passphrase_command_supports_reload = off
120 #ssl_passphrase_command_supports_reload = off
119
121
120
122
121 #------------------------------------------------------------------------------
123 #------------------------------------------------------------------------------
122 # RESOURCE USAGE (except WAL)
124 # RESOURCE USAGE (except WAL)
123 #------------------------------------------------------------------------------
125 #------------------------------------------------------------------------------
124
126
125 # - Memory -
127 # - Memory -
126
128
127 #shared_buffers = 32MB # min 128kB
129 #shared_buffers = 32MB # min 128kB
128 # (change requires restart)
130 # (change requires restart)
129 #huge_pages = try # on, off, or try
131 #huge_pages = try # on, off, or try
130 # (change requires restart)
132 # (change requires restart)
131 #huge_page_size = 0 # zero for system default
133 #huge_page_size = 0 # zero for system default
132 # (change requires restart)
134 # (change requires restart)
133 #temp_buffers = 8MB # min 800kB
135 #temp_buffers = 8MB # min 800kB
134 #max_prepared_transactions = 0 # zero disables the feature
136 #max_prepared_transactions = 0 # zero disables the feature
135 # (change requires restart)
137 # (change requires restart)
136 # Caution: it is not advisable to set max_prepared_transactions nonzero unless
138 # Caution: it is not advisable to set max_prepared_transactions nonzero unless
137 # you actively intend to use prepared transactions.
139 # you actively intend to use prepared transactions.
138 #work_mem = 4MB # min 64kB
140 #work_mem = 4MB # min 64kB
139 #hash_mem_multiplier = 1.0 # 1-1000.0 multiplier on hash table work_mem
141 #hash_mem_multiplier = 1.0 # 1-1000.0 multiplier on hash table work_mem
140 #maintenance_work_mem = 64MB # min 1MB
142 #maintenance_work_mem = 64MB # min 1MB
141 #autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
143 #autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
142 #logical_decoding_work_mem = 64MB # min 64kB
144 #logical_decoding_work_mem = 64MB # min 64kB
143 #max_stack_depth = 2MB # min 100kB
145 #max_stack_depth = 2MB # min 100kB
144 #shared_memory_type = mmap # the default is the first option
146 #shared_memory_type = mmap # the default is the first option
145 # supported by the operating system:
147 # supported by the operating system:
146 # mmap
148 # mmap
147 # sysv
149 # sysv
148 # windows
150 # windows
149 # (change requires restart)
151 # (change requires restart)
150 #dynamic_shared_memory_type = posix # the default is the first option
152 #dynamic_shared_memory_type = posix # the default is the first option
151 # supported by the operating system:
153 # supported by the operating system:
152 # posix
154 # posix
153 # sysv
155 # sysv
154 # windows
156 # windows
155 # mmap
157 # mmap
156 # (change requires restart)
158 # (change requires restart)
157 #min_dynamic_shared_memory = 0MB # (change requires restart)
159 #min_dynamic_shared_memory = 0MB # (change requires restart)
158
160
159 # - Disk -
161 # - Disk -
160
162
161 #temp_file_limit = -1 # limits per-process temp file space
163 #temp_file_limit = -1 # limits per-process temp file space
162 # in kilobytes, or -1 for no limit
164 # in kilobytes, or -1 for no limit
163
165
164 # - Kernel Resources -
166 # - Kernel Resources -
165
167
166 #max_files_per_process = 1000 # min 64
168 #max_files_per_process = 1000 # min 64
167 # (change requires restart)
169 # (change requires restart)
168
170
169 # - Cost-Based Vacuum Delay -
171 # - Cost-Based Vacuum Delay -
170
172
171 #vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables)
173 #vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables)
172 #vacuum_cost_page_hit = 1 # 0-10000 credits
174 #vacuum_cost_page_hit = 1 # 0-10000 credits
173 #vacuum_cost_page_miss = 2 # 0-10000 credits
175 #vacuum_cost_page_miss = 2 # 0-10000 credits
174 #vacuum_cost_page_dirty = 20 # 0-10000 credits
176 #vacuum_cost_page_dirty = 20 # 0-10000 credits
175 #vacuum_cost_limit = 200 # 1-10000 credits
177 #vacuum_cost_limit = 200 # 1-10000 credits
176
178
177 # - Background Writer -
179 # - Background Writer -
178
180
179 #bgwriter_delay = 200ms # 10-10000ms between rounds
181 #bgwriter_delay = 200ms # 10-10000ms between rounds
180 #bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables
182 #bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables
181 #bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
183 #bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
182 #bgwriter_flush_after = 0 # measured in pages, 0 disables
184 #bgwriter_flush_after = 0 # measured in pages, 0 disables
183
185
184 # - Asynchronous Behavior -
186 # - Asynchronous Behavior -
185
187
186 #backend_flush_after = 0 # measured in pages, 0 disables
188 #backend_flush_after = 0 # measured in pages, 0 disables
187 #effective_io_concurrency = 1 # 1-1000; 0 disables prefetching
189 #effective_io_concurrency = 1 # 1-1000; 0 disables prefetching
188 #maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching
190 #maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching
189 #max_worker_processes = 8 # (change requires restart)
191 #max_worker_processes = 8 # (change requires restart)
190 #max_parallel_workers_per_gather = 2 # taken from max_parallel_workers
192 #max_parallel_workers_per_gather = 2 # taken from max_parallel_workers
191 #max_parallel_maintenance_workers = 2 # taken from max_parallel_workers
193 #max_parallel_maintenance_workers = 2 # taken from max_parallel_workers
192 #max_parallel_workers = 8 # maximum number of max_worker_processes that
194 #max_parallel_workers = 8 # maximum number of max_worker_processes that
193 # can be used in parallel operations
195 # can be used in parallel operations
194 #parallel_leader_participation = on
196 #parallel_leader_participation = on
195 #old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
197 #old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
196 # (change requires restart)
198 # (change requires restart)
197
199
198
200
199 #------------------------------------------------------------------------------
201 #------------------------------------------------------------------------------
200 # WRITE-AHEAD LOG
202 # WRITE-AHEAD LOG
201 #------------------------------------------------------------------------------
203 #------------------------------------------------------------------------------
202
204
203 # - Settings -
205 # - Settings -
204
206
205 #wal_level = replica # minimal, replica, or logical
207 #wal_level = replica # minimal, replica, or logical
206 # (change requires restart)
208 # (change requires restart)
207 #fsync = on # flush data to disk for crash safety
209 #fsync = on # flush data to disk for crash safety
208 # (turning this off can cause
210 # (turning this off can cause
209 # unrecoverable data corruption)
211 # unrecoverable data corruption)
210 #synchronous_commit = on # synchronization level;
212 #synchronous_commit = on # synchronization level;
211 # off, local, remote_write, remote_apply, or on
213 # off, local, remote_write, remote_apply, or on
212 #wal_sync_method = fsync # the default is the first option
214 #wal_sync_method = fsync # the default is the first option
213 # supported by the operating system:
215 # supported by the operating system:
214 # open_datasync
216 # open_datasync
215 # fdatasync (default on Linux and FreeBSD)
217 # fdatasync (default on Linux and FreeBSD)
216 # fsync
218 # fsync
217 # fsync_writethrough
219 # fsync_writethrough
218 # open_sync
220 # open_sync
219 #full_page_writes = on # recover from partial page writes
221 #full_page_writes = on # recover from partial page writes
220 #wal_log_hints = off # also do full page writes of non-critical updates
222 #wal_log_hints = off # also do full page writes of non-critical updates
221 # (change requires restart)
223 # (change requires restart)
222 #wal_compression = off # enable compression of full-page writes
224 #wal_compression = off # enable compression of full-page writes
223 #wal_init_zero = on # zero-fill new WAL files
225 #wal_init_zero = on # zero-fill new WAL files
224 #wal_recycle = on # recycle WAL files
226 #wal_recycle = on # recycle WAL files
225 #wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
227 #wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
226 # (change requires restart)
228 # (change requires restart)
227 #wal_writer_delay = 200ms # 1-10000 milliseconds
229 #wal_writer_delay = 200ms # 1-10000 milliseconds
228 #wal_writer_flush_after = 1MB # measured in pages, 0 disables
230 #wal_writer_flush_after = 1MB # measured in pages, 0 disables
229 #wal_skip_threshold = 2MB
231 #wal_skip_threshold = 2MB
230
232
231 #commit_delay = 0 # range 0-100000, in microseconds
233 #commit_delay = 0 # range 0-100000, in microseconds
232 #commit_siblings = 5 # range 1-1000
234 #commit_siblings = 5 # range 1-1000
233
235
234 # - Checkpoints -
236 # - Checkpoints -
235
237
236 #checkpoint_timeout = 5min # range 30s-1d
238 #checkpoint_timeout = 5min # range 30s-1d
237 #checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0
239 #checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0
238 #checkpoint_flush_after = 0 # measured in pages, 0 disables
240 #checkpoint_flush_after = 0 # measured in pages, 0 disables
239 #checkpoint_warning = 30s # 0 disables
241 #checkpoint_warning = 30s # 0 disables
240 #max_wal_size = 1GB
242 #max_wal_size = 1GB
241 #min_wal_size = 80MB
243 #min_wal_size = 80MB
242
244
243 # - Archiving -
245 # - Archiving -
244
246
245 #archive_mode = off # enables archiving; off, on, or always
247 #archive_mode = off # enables archiving; off, on, or always
246 # (change requires restart)
248 # (change requires restart)
247 #archive_command = '' # command to use to archive a logfile segment
249 #archive_command = '' # command to use to archive a logfile segment
248 # placeholders: %p = path of file to archive
250 # placeholders: %p = path of file to archive
249 # %f = file name only
251 # %f = file name only
250 # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
252 # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
251 #archive_timeout = 0 # force a logfile segment switch after this
253 #archive_timeout = 0 # force a logfile segment switch after this
252 # number of seconds; 0 disables
254 # number of seconds; 0 disables
253
255
254 # - Archive Recovery -
256 # - Archive Recovery -
255
257
256 # These are only used in recovery mode.
258 # These are only used in recovery mode.
257
259
258 #restore_command = '' # command to use to restore an archived logfile segment
260 #restore_command = '' # command to use to restore an archived logfile segment
259 # placeholders: %p = path of file to restore
261 # placeholders: %p = path of file to restore
260 # %f = file name only
262 # %f = file name only
261 # e.g. 'cp /mnt/server/archivedir/%f %p'
263 # e.g. 'cp /mnt/server/archivedir/%f %p'
262 #archive_cleanup_command = '' # command to execute at every restartpoint
264 #archive_cleanup_command = '' # command to execute at every restartpoint
263 #recovery_end_command = '' # command to execute at completion of recovery
265 #recovery_end_command = '' # command to execute at completion of recovery
264
266
265 # - Recovery Target -
267 # - Recovery Target -
266
268
267 # Set these only when performing a targeted recovery.
269 # Set these only when performing a targeted recovery.
268
270
269 #recovery_target = '' # 'immediate' to end recovery as soon as a
271 #recovery_target = '' # 'immediate' to end recovery as soon as a
270 # consistent state is reached
272 # consistent state is reached
271 # (change requires restart)
273 # (change requires restart)
272 #recovery_target_name = '' # the named restore point to which recovery will proceed
274 #recovery_target_name = '' # the named restore point to which recovery will proceed
273 # (change requires restart)
275 # (change requires restart)
274 #recovery_target_time = '' # the time stamp up to which recovery will proceed
276 #recovery_target_time = '' # the time stamp up to which recovery will proceed
275 # (change requires restart)
277 # (change requires restart)
276 #recovery_target_xid = '' # the transaction ID up to which recovery will proceed
278 #recovery_target_xid = '' # the transaction ID up to which recovery will proceed
277 # (change requires restart)
279 # (change requires restart)
278 #recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed
280 #recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed
279 # (change requires restart)
281 # (change requires restart)
280 #recovery_target_inclusive = on # Specifies whether to stop:
282 #recovery_target_inclusive = on # Specifies whether to stop:
281 # just after the specified recovery target (on)
283 # just after the specified recovery target (on)
282 # just before the recovery target (off)
284 # just before the recovery target (off)
283 # (change requires restart)
285 # (change requires restart)
284 #recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID
286 #recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID
285 # (change requires restart)
287 # (change requires restart)
286 #recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown'
288 #recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown'
287 # (change requires restart)
289 # (change requires restart)
288
290
289
291
290 #------------------------------------------------------------------------------
292 #------------------------------------------------------------------------------
291 # REPLICATION
293 # REPLICATION
292 #------------------------------------------------------------------------------
294 #------------------------------------------------------------------------------
293
295
294 # - Sending Servers -
296 # - Sending Servers -
295
297
296 # Set these on the primary and on any standby that will send replication data.
298 # Set these on the primary and on any standby that will send replication data.
297
299
298 #max_wal_senders = 10 # max number of walsender processes
300 #max_wal_senders = 10 # max number of walsender processes
299 # (change requires restart)
301 # (change requires restart)
300 #max_replication_slots = 10 # max number of replication slots
302 #max_replication_slots = 10 # max number of replication slots
301 # (change requires restart)
303 # (change requires restart)
302 #wal_keep_size = 0 # in megabytes; 0 disables
304 #wal_keep_size = 0 # in megabytes; 0 disables
303 #max_slot_wal_keep_size = -1 # in megabytes; -1 disables
305 #max_slot_wal_keep_size = -1 # in megabytes; -1 disables
304 #wal_sender_timeout = 60s # in milliseconds; 0 disables
306 #wal_sender_timeout = 60s # in milliseconds; 0 disables
305 #track_commit_timestamp = off # collect timestamp of transaction commit
307 #track_commit_timestamp = off # collect timestamp of transaction commit
306 # (change requires restart)
308 # (change requires restart)
307
309
308 # - Primary Server -
310 # - Primary Server -
309
311
310 # These settings are ignored on a standby server.
312 # These settings are ignored on a standby server.
311
313
312 #synchronous_standby_names = '' # standby servers that provide sync rep
314 #synchronous_standby_names = '' # standby servers that provide sync rep
313 # method to choose sync standbys, number of sync standbys,
315 # method to choose sync standbys, number of sync standbys,
314 # and comma-separated list of application_name
316 # and comma-separated list of application_name
315 # from standby(s); '*' = all
317 # from standby(s); '*' = all
316 #vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
318 #vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
317
319
318 # - Standby Servers -
320 # - Standby Servers -
319
321
320 # These settings are ignored on a primary server.
322 # These settings are ignored on a primary server.
321
323
322 #primary_conninfo = '' # connection string to sending server
324 #primary_conninfo = '' # connection string to sending server
323 #primary_slot_name = '' # replication slot on sending server
325 #primary_slot_name = '' # replication slot on sending server
324 #promote_trigger_file = '' # file name whose presence ends recovery
326 #promote_trigger_file = '' # file name whose presence ends recovery
325 #hot_standby = on # "off" disallows queries during recovery
327 #hot_standby = on # "off" disallows queries during recovery
326 # (change requires restart)
328 # (change requires restart)
327 #max_standby_archive_delay = 30s # max delay before canceling queries
329 #max_standby_archive_delay = 30s # max delay before canceling queries
328 # when reading WAL from archive;
330 # when reading WAL from archive;
329 # -1 allows indefinite delay
331 # -1 allows indefinite delay
330 #max_standby_streaming_delay = 30s # max delay before canceling queries
332 #max_standby_streaming_delay = 30s # max delay before canceling queries
331 # when reading streaming WAL;
333 # when reading streaming WAL;
332 # -1 allows indefinite delay
334 # -1 allows indefinite delay
333 #wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name
335 #wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name
334 # is not set
336 # is not set
335 #wal_receiver_status_interval = 10s # send replies at least this often
337 #wal_receiver_status_interval = 10s # send replies at least this often
336 # 0 disables
338 # 0 disables
337 #hot_standby_feedback = off # send info from standby to prevent
339 #hot_standby_feedback = off # send info from standby to prevent
338 # query conflicts
340 # query conflicts
339 #wal_receiver_timeout = 60s # time that receiver waits for
341 #wal_receiver_timeout = 60s # time that receiver waits for
340 # communication from primary
342 # communication from primary
341 # in milliseconds; 0 disables
343 # in milliseconds; 0 disables
342 #wal_retrieve_retry_interval = 5s # time to wait before retrying to
344 #wal_retrieve_retry_interval = 5s # time to wait before retrying to
343 # retrieve WAL after a failed attempt
345 # retrieve WAL after a failed attempt
344 #recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery
346 #recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery
345
347
346 # - Subscribers -
348 # - Subscribers -
347
349
348 # These settings are ignored on a publisher.
350 # These settings are ignored on a publisher.
349
351
350 #max_logical_replication_workers = 4 # taken from max_worker_processes
352 #max_logical_replication_workers = 4 # taken from max_worker_processes
351 # (change requires restart)
353 # (change requires restart)
352 #max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers
354 #max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers
353
355
354
356
355 #------------------------------------------------------------------------------
357 #------------------------------------------------------------------------------
356 # QUERY TUNING
358 # QUERY TUNING
357 #------------------------------------------------------------------------------
359 #------------------------------------------------------------------------------
358
360
359 # - Planner Method Configuration -
361 # - Planner Method Configuration -
360
362
361 #enable_async_append = on
363 #enable_async_append = on
362 #enable_bitmapscan = on
364 #enable_bitmapscan = on
363 #enable_gathermerge = on
365 #enable_gathermerge = on
364 #enable_hashagg = on
366 #enable_hashagg = on
365 #enable_hashjoin = on
367 #enable_hashjoin = on
366 #enable_incremental_sort = on
368 #enable_incremental_sort = on
367 #enable_indexscan = on
369 #enable_indexscan = on
368 #enable_indexonlyscan = on
370 #enable_indexonlyscan = on
369 #enable_material = on
371 #enable_material = on
370 #enable_memoize = on
372 #enable_memoize = on
371 #enable_mergejoin = on
373 #enable_mergejoin = on
372 #enable_nestloop = on
374 #enable_nestloop = on
373 #enable_parallel_append = on
375 #enable_parallel_append = on
374 #enable_parallel_hash = on
376 #enable_parallel_hash = on
375 #enable_partition_pruning = on
377 #enable_partition_pruning = on
376 #enable_partitionwise_join = off
378 #enable_partitionwise_join = off
377 #enable_partitionwise_aggregate = off
379 #enable_partitionwise_aggregate = off
378 #enable_seqscan = on
380 #enable_seqscan = on
379 #enable_sort = on
381 #enable_sort = on
380 #enable_tidscan = on
382 #enable_tidscan = on
381
383
382 # - Planner Cost Constants -
384 # - Planner Cost Constants -
383
385
384 #seq_page_cost = 1.0 # measured on an arbitrary scale
386 #seq_page_cost = 1.0 # measured on an arbitrary scale
385 #random_page_cost = 4.0 # same scale as above
387 #random_page_cost = 4.0 # same scale as above
386 #cpu_tuple_cost = 0.01 # same scale as above
388 #cpu_tuple_cost = 0.01 # same scale as above
387 #cpu_index_tuple_cost = 0.005 # same scale as above
389 #cpu_index_tuple_cost = 0.005 # same scale as above
388 #cpu_operator_cost = 0.0025 # same scale as above
390 #cpu_operator_cost = 0.0025 # same scale as above
389 #parallel_setup_cost = 1000.0 # same scale as above
391 #parallel_setup_cost = 1000.0 # same scale as above
390 #parallel_tuple_cost = 0.1 # same scale as above
392 #parallel_tuple_cost = 0.1 # same scale as above
391 #min_parallel_table_scan_size = 8MB
393 #min_parallel_table_scan_size = 8MB
392 #min_parallel_index_scan_size = 512kB
394 #min_parallel_index_scan_size = 512kB
393 #effective_cache_size = 4GB
395 #effective_cache_size = 4GB
394
396
395 #jit_above_cost = 100000 # perform JIT compilation if available
397 #jit_above_cost = 100000 # perform JIT compilation if available
396 # and query more expensive than this;
398 # and query more expensive than this;
397 # -1 disables
399 # -1 disables
398 #jit_inline_above_cost = 500000 # inline small functions if query is
400 #jit_inline_above_cost = 500000 # inline small functions if query is
399 # more expensive than this; -1 disables
401 # more expensive than this; -1 disables
400 #jit_optimize_above_cost = 500000 # use expensive JIT optimizations if
402 #jit_optimize_above_cost = 500000 # use expensive JIT optimizations if
401 # query is more expensive than this;
403 # query is more expensive than this;
402 # -1 disables
404 # -1 disables
403
405
404 # - Genetic Query Optimizer -
406 # - Genetic Query Optimizer -
405
407
406 #geqo = on
408 #geqo = on
407 #geqo_threshold = 12
409 #geqo_threshold = 12
408 #geqo_effort = 5 # range 1-10
410 #geqo_effort = 5 # range 1-10
409 #geqo_pool_size = 0 # selects default based on effort
411 #geqo_pool_size = 0 # selects default based on effort
410 #geqo_generations = 0 # selects default based on effort
412 #geqo_generations = 0 # selects default based on effort
411 #geqo_selection_bias = 2.0 # range 1.5-2.0
413 #geqo_selection_bias = 2.0 # range 1.5-2.0
412 #geqo_seed = 0.0 # range 0.0-1.0
414 #geqo_seed = 0.0 # range 0.0-1.0
413
415
414 # - Other Planner Options -
416 # - Other Planner Options -
415
417
416 #default_statistics_target = 100 # range 1-10000
418 #default_statistics_target = 100 # range 1-10000
417 #constraint_exclusion = partition # on, off, or partition
419 #constraint_exclusion = partition # on, off, or partition
418 #cursor_tuple_fraction = 0.1 # range 0.0-1.0
420 #cursor_tuple_fraction = 0.1 # range 0.0-1.0
419 #from_collapse_limit = 8
421 #from_collapse_limit = 8
420 #jit = on # allow JIT compilation
422 #jit = on # allow JIT compilation
421 #join_collapse_limit = 8 # 1 disables collapsing of explicit
423 #join_collapse_limit = 8 # 1 disables collapsing of explicit
422 # JOIN clauses
424 # JOIN clauses
423 #plan_cache_mode = auto # auto, force_generic_plan or
425 #plan_cache_mode = auto # auto, force_generic_plan or
424 # force_custom_plan
426 # force_custom_plan
425
427
426
428
427 #------------------------------------------------------------------------------
429 #------------------------------------------------------------------------------
428 # REPORTING AND LOGGING
430 # REPORTING AND LOGGING
429 #------------------------------------------------------------------------------
431 #------------------------------------------------------------------------------
430
432
431 # - Where to Log -
433 # - Where to Log -
432
434
433 log_destination = 'stderr' # Valid values are combinations of
435 log_destination = 'stderr' # Valid values are combinations of
434 # stderr, csvlog, syslog, and eventlog,
436 # stderr, csvlog, syslog, and eventlog,
435 # depending on platform. csvlog
437 # depending on platform. csvlog
436 # requires logging_collector to be on.
438 # requires logging_collector to be on.
437
439
438 # This is used when logging to stderr:
440 # This is used when logging to stderr:
439 logging_collector = on # Enable capturing of stderr and csvlog
441 #logging_collector = on # Enable capturing of stderr and csvlog
440 # into log files. Required to be on for
442 # into log files. Required to be on for
441 # csvlogs.
443 # csvlogs.
442 # (change requires restart)
444 # (change requires restart)
443
445
444 # These are only used if logging_collector is on:
446 # These are only used if logging_collector is on:
445 log_directory = '/var/log/rhodecode/postgres' # directory where log files are written,
447 #log_directory = '/var/log/rhodecode/postgres' # directory where log files are written,
446 # can be absolute or relative to PGDATA
448 # can be absolute or relative to PGDATA
447 log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
449 #log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
448 # can include strftime() escapes
450 # can include strftime() escapes
449 #log_file_mode = 0600 # creation mode for log files,
451 #log_file_mode = 0600 # creation mode for log files,
450 # begin with 0 to use octal notation
452 # begin with 0 to use octal notation
451 #log_rotation_age = 1d # Automatic rotation of logfiles will
453 #log_rotation_age = 1d # Automatic rotation of logfiles will
452 # happen after that time. 0 disables.
454 # happen after that time. 0 disables.
453 log_rotation_size = 512MB # Automatic rotation of logfiles will
455 #log_rotation_size = 512MB # Automatic rotation of logfiles will
454 # happen after that much log output.
456 # happen after that much log output.
455 # 0 disables.
457 # 0 disables.
456 #log_truncate_on_rotation = off # If on, an existing log file with the
458 #log_truncate_on_rotation = off # If on, an existing log file with the
457 # same name as the new log file will be
459 # same name as the new log file will be
458 # truncated rather than appended to.
460 # truncated rather than appended to.
459 # But such truncation only occurs on
461 # But such truncation only occurs on
460 # time-driven rotation, not on restarts
462 # time-driven rotation, not on restarts
461 # or size-driven rotation. Default is
463 # or size-driven rotation. Default is
462 # off, meaning append to existing files
464 # off, meaning append to existing files
463 # in all cases.
465 # in all cases.
464
466
465 # These are relevant when logging to syslog:
467 # These are relevant when logging to syslog:
466 #syslog_facility = 'LOCAL0'
468 #syslog_facility = 'LOCAL0'
467 #syslog_ident = 'postgres'
469 #syslog_ident = 'postgres'
468 #syslog_sequence_numbers = on
470 #syslog_sequence_numbers = on
469 #syslog_split_messages = on
471 #syslog_split_messages = on
470
472
471 # This is only relevant when logging to eventlog (Windows):
473 # This is only relevant when logging to eventlog (Windows):
472 # (change requires restart)
474 # (change requires restart)
473 #event_source = 'PostgreSQL'
475 #event_source = 'PostgreSQL'
474
476
475 # - When to Log -
477 # - When to Log -
476
478
477 #log_min_messages = warning # values in order of decreasing detail:
479 #log_min_messages = warning # values in order of decreasing detail:
478 # debug5
480 # debug5
479 # debug4
481 # debug4
480 # debug3
482 # debug3
481 # debug2
483 # debug2
482 # debug1
484 # debug1
483 # info
485 # info
484 # notice
486 # notice
485 # warning
487 # warning
486 # error
488 # error
487 # log
489 # log
488 # fatal
490 # fatal
489 # panic
491 # panic
490
492
491 #log_min_error_statement = error # values in order of decreasing detail:
493 #log_min_error_statement = error # values in order of decreasing detail:
492 # debug5
494 # debug5
493 # debug4
495 # debug4
494 # debug3
496 # debug3
495 # debug2
497 # debug2
496 # debug1
498 # debug1
497 # info
499 # info
498 # notice
500 # notice
499 # warning
501 # warning
500 # error
502 # error
501 # log
503 # log
502 # fatal
504 # fatal
503 # panic (effectively off)
505 # panic (effectively off)
504
506
505 log_min_duration_statement = 1000 # -1 is disabled, 0 logs all statements
507 log_min_duration_statement = 1000 # -1 is disabled, 0 logs all statements
506 # and their durations, > 0 logs only
508 # and their durations, > 0 logs only
507 # statements running at least this number
509 # statements running at least this number
508 # of milliseconds
510 # of milliseconds
509
511
510 #log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements
512 #log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements
511 # and their durations, > 0 logs only a sample of
513 # and their durations, > 0 logs only a sample of
512 # statements running at least this number
514 # statements running at least this number
513 # of milliseconds;
515 # of milliseconds;
514 # sample fraction is determined by log_statement_sample_rate
516 # sample fraction is determined by log_statement_sample_rate
515
517
516 #log_statement_sample_rate = 1.0 # fraction of logged statements exceeding
518 #log_statement_sample_rate = 1.0 # fraction of logged statements exceeding
517 # log_min_duration_sample to be logged;
519 # log_min_duration_sample to be logged;
518 # 1.0 logs all such statements, 0.0 never logs
520 # 1.0 logs all such statements, 0.0 never logs
519
521
520
522
521 #log_transaction_sample_rate = 0.0 # fraction of transactions whose statements
523 #log_transaction_sample_rate = 0.0 # fraction of transactions whose statements
522 # are logged regardless of their duration; 1.0 logs all
524 # are logged regardless of their duration; 1.0 logs all
523 # statements from all transactions, 0.0 never logs
525 # statements from all transactions, 0.0 never logs
524
526
525 # - What to Log -
527 # - What to Log -
526
528
527 #debug_print_parse = off
529 #debug_print_parse = off
528 #debug_print_rewritten = off
530 #debug_print_rewritten = off
529 #debug_print_plan = off
531 #debug_print_plan = off
530 #debug_pretty_print = on
532 #debug_pretty_print = on
531 #log_autovacuum_min_duration = -1 # log autovacuum activity;
533 #log_autovacuum_min_duration = -1 # log autovacuum activity;
532 # -1 disables, 0 logs all actions and
534 # -1 disables, 0 logs all actions and
533 # their durations, > 0 logs only
535 # their durations, > 0 logs only
534 # actions running at least this number
536 # actions running at least this number
535 # of milliseconds.
537 # of milliseconds.
536 #log_checkpoints = off
538 #log_checkpoints = off
537 #log_connections = off
539 log_connections = off
538 #log_disconnections = off
540 #log_disconnections = off
539 #log_duration = off
541 #log_duration = off
540 #log_error_verbosity = default # terse, default, or verbose messages
542 #log_error_verbosity = default # terse, default, or verbose messages
541 #log_hostname = off
543 #log_hostname = off
542 #log_line_prefix = '%m [%p] ' # special values:
544 #log_line_prefix = '%m [%p] ' # special values:
543 # %a = application name
545 # %a = application name
544 # %u = user name
546 # %u = user name
545 # %d = database name
547 # %d = database name
546 # %r = remote host and port
548 # %r = remote host and port
547 # %h = remote host
549 # %h = remote host
548 # %b = backend type
550 # %b = backend type
549 # %p = process ID
551 # %p = process ID
550 # %P = process ID of parallel group leader
552 # %P = process ID of parallel group leader
551 # %t = timestamp without milliseconds
553 # %t = timestamp without milliseconds
552 # %m = timestamp with milliseconds
554 # %m = timestamp with milliseconds
553 # %n = timestamp with milliseconds (as a Unix epoch)
555 # %n = timestamp with milliseconds (as a Unix epoch)
554 # %Q = query ID (0 if none or not computed)
556 # %Q = query ID (0 if none or not computed)
555 # %i = command tag
557 # %i = command tag
556 # %e = SQL state
558 # %e = SQL state
557 # %c = session ID
559 # %c = session ID
558 # %l = session line number
560 # %l = session line number
559 # %s = session start timestamp
561 # %s = session start timestamp
560 # %v = virtual transaction ID
562 # %v = virtual transaction ID
561 # %x = transaction ID (0 if none)
563 # %x = transaction ID (0 if none)
562 # %q = stop here in non-session
564 # %q = stop here in non-session
563 # processes
565 # processes
564 # %% = '%'
566 # %% = '%'
565 # e.g. '<%u%%%d> '
567 # e.g. '<%u%%%d> '
566 #log_lock_waits = off # log lock waits >= deadlock_timeout
568 #log_lock_waits = off # log lock waits >= deadlock_timeout
567 #log_recovery_conflict_waits = off # log standby recovery conflict waits
569 #log_recovery_conflict_waits = off # log standby recovery conflict waits
568 # >= deadlock_timeout
570 # >= deadlock_timeout
569 #log_parameter_max_length = -1 # when logging statements, limit logged
571 #log_parameter_max_length = -1 # when logging statements, limit logged
570 # bind-parameter values to N bytes;
572 # bind-parameter values to N bytes;
571 # -1 means print in full, 0 disables
573 # -1 means print in full, 0 disables
572 #log_parameter_max_length_on_error = 0 # when logging an error, limit logged
574 #log_parameter_max_length_on_error = 0 # when logging an error, limit logged
573 # bind-parameter values to N bytes;
575 # bind-parameter values to N bytes;
574 # -1 means print in full, 0 disables
576 # -1 means print in full, 0 disables
575 #log_statement = 'none' # none, ddl, mod, all
577 #log_statement = 'none' # none, ddl, mod, all
576 #log_replication_commands = off
578 #log_replication_commands = off
577 #log_temp_files = -1 # log temporary files equal or larger
579 #log_temp_files = -1 # log temporary files equal or larger
578 # than the specified size in kilobytes;
580 # than the specified size in kilobytes;
579 # -1 disables, 0 logs all temp files
581 # -1 disables, 0 logs all temp files
580 #log_timezone = 'GMT'
582 #log_timezone = 'GMT'
581
583
582
584
583 #------------------------------------------------------------------------------
585 #------------------------------------------------------------------------------
584 # PROCESS TITLE
586 # PROCESS TITLE
585 #------------------------------------------------------------------------------
587 #------------------------------------------------------------------------------
586
588
587 #cluster_name = '' # added to process titles if nonempty
589 #cluster_name = '' # added to process titles if nonempty
588 # (change requires restart)
590 # (change requires restart)
589 #update_process_title = on
591 #update_process_title = on
590
592
591
593
592 #------------------------------------------------------------------------------
594 #------------------------------------------------------------------------------
593 # STATISTICS
595 # STATISTICS
594 #------------------------------------------------------------------------------
596 #------------------------------------------------------------------------------
595
597
596 # - Query and Index Statistics Collector -
598 # - Query and Index Statistics Collector -
597
599
598 #track_activities = on
600 #track_activities = on
599 #track_activity_query_size = 1024 # (change requires restart)
601 #track_activity_query_size = 1024 # (change requires restart)
600 #track_counts = on
602 #track_counts = on
601 #track_io_timing = off
603 #track_io_timing = off
602 #track_wal_io_timing = off
604 #track_wal_io_timing = off
603 #track_functions = none # none, pl, all
605 #track_functions = none # none, pl, all
604 #stats_temp_directory = 'pg_stat_tmp'
606 #stats_temp_directory = 'pg_stat_tmp'
605
607
606
608
607 # - Monitoring -
609 # - Monitoring -
608
610
609 #compute_query_id = auto
611 #compute_query_id = auto
610 #log_statement_stats = off
612 #log_statement_stats = off
611 #log_parser_stats = off
613 #log_parser_stats = off
612 #log_planner_stats = off
614 #log_planner_stats = off
613 #log_executor_stats = off
615 #log_executor_stats = off
614
616
615
617
616 #------------------------------------------------------------------------------
618 #------------------------------------------------------------------------------
617 # AUTOVACUUM
619 # AUTOVACUUM
618 #------------------------------------------------------------------------------
620 #------------------------------------------------------------------------------
619
621
620 #autovacuum = on # Enable autovacuum subprocess? 'on'
622 #autovacuum = on # Enable autovacuum subprocess? 'on'
621 # requires track_counts to also be on.
623 # requires track_counts to also be on.
622 #autovacuum_max_workers = 3 # max number of autovacuum subprocesses
624 #autovacuum_max_workers = 3 # max number of autovacuum subprocesses
623 # (change requires restart)
625 # (change requires restart)
624 #autovacuum_naptime = 1min # time between autovacuum runs
626 #autovacuum_naptime = 1min # time between autovacuum runs
625 #autovacuum_vacuum_threshold = 50 # min number of row updates before
627 #autovacuum_vacuum_threshold = 50 # min number of row updates before
626 # vacuum
628 # vacuum
627 #autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts
629 #autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts
628 # before vacuum; -1 disables insert
630 # before vacuum; -1 disables insert
629 # vacuums
631 # vacuums
630 #autovacuum_analyze_threshold = 50 # min number of row updates before
632 #autovacuum_analyze_threshold = 50 # min number of row updates before
631 # analyze
633 # analyze
632 #autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
634 #autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
633 #autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of inserts over table
635 #autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of inserts over table
634 # size before insert vacuum
636 # size before insert vacuum
635 #autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
637 #autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
636 #autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
638 #autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
637 # (change requires restart)
639 # (change requires restart)
638 #autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
640 #autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
639 # before forced vacuum
641 # before forced vacuum
640 # (change requires restart)
642 # (change requires restart)
641 #autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for
643 #autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for
642 # autovacuum, in milliseconds;
644 # autovacuum, in milliseconds;
643 # -1 means use vacuum_cost_delay
645 # -1 means use vacuum_cost_delay
644 #autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
646 #autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
645 # autovacuum, -1 means use
647 # autovacuum, -1 means use
646 # vacuum_cost_limit
648 # vacuum_cost_limit
647
649
648
650
649 #------------------------------------------------------------------------------
651 #------------------------------------------------------------------------------
650 # CLIENT CONNECTION DEFAULTS
652 # CLIENT CONNECTION DEFAULTS
651 #------------------------------------------------------------------------------
653 #------------------------------------------------------------------------------
652
654
653 # - Statement Behavior -
655 # - Statement Behavior -
654
656
655 #client_min_messages = notice # values in order of decreasing detail:
657 #client_min_messages = notice # values in order of decreasing detail:
656 # debug5
658 # debug5
657 # debug4
659 # debug4
658 # debug3
660 # debug3
659 # debug2
661 # debug2
660 # debug1
662 # debug1
661 # log
663 # log
662 # notice
664 # notice
663 # warning
665 # warning
664 # error
666 # error
665 #search_path = '"$user", public' # schema names
667 #search_path = '"$user", public' # schema names
666 #row_security = on
668 #row_security = on
667 #default_table_access_method = 'heap'
669 #default_table_access_method = 'heap'
668 #default_tablespace = '' # a tablespace name, '' uses the default
670 #default_tablespace = '' # a tablespace name, '' uses the default
669 #default_toast_compression = 'pglz' # 'pglz' or 'lz4'
671 #default_toast_compression = 'pglz' # 'pglz' or 'lz4'
670 #temp_tablespaces = '' # a list of tablespace names, '' uses
672 #temp_tablespaces = '' # a list of tablespace names, '' uses
671 # only default tablespace
673 # only default tablespace
672 #check_function_bodies = on
674 #check_function_bodies = on
673 #default_transaction_isolation = 'read committed'
675 #default_transaction_isolation = 'read committed'
674 #default_transaction_read_only = off
676 #default_transaction_read_only = off
675 #default_transaction_deferrable = off
677 #default_transaction_deferrable = off
676 #session_replication_role = 'origin'
678 #session_replication_role = 'origin'
677 #statement_timeout = 0 # in milliseconds, 0 is disabled
679 #statement_timeout = 0 # in milliseconds, 0 is disabled
678 #lock_timeout = 0 # in milliseconds, 0 is disabled
680 #lock_timeout = 0 # in milliseconds, 0 is disabled
679 #idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled
681 #idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled
680 #idle_session_timeout = 0 # in milliseconds, 0 is disabled
682 #idle_session_timeout = 0 # in milliseconds, 0 is disabled
681 #vacuum_freeze_table_age = 150000000
683 #vacuum_freeze_table_age = 150000000
682 #vacuum_freeze_min_age = 50000000
684 #vacuum_freeze_min_age = 50000000
683 #vacuum_failsafe_age = 1600000000
685 #vacuum_failsafe_age = 1600000000
684 #vacuum_multixact_freeze_table_age = 150000000
686 #vacuum_multixact_freeze_table_age = 150000000
685 #vacuum_multixact_freeze_min_age = 5000000
687 #vacuum_multixact_freeze_min_age = 5000000
686 #vacuum_multixact_failsafe_age = 1600000000
688 #vacuum_multixact_failsafe_age = 1600000000
687 #bytea_output = 'hex' # hex, escape
689 #bytea_output = 'hex' # hex, escape
688 #xmlbinary = 'base64'
690 #xmlbinary = 'base64'
689 #xmloption = 'content'
691 #xmloption = 'content'
690 #gin_pending_list_limit = 4MB
692 #gin_pending_list_limit = 4MB
691
693
692 # - Locale and Formatting -
694 # - Locale and Formatting -
693
695
694 #datestyle = 'iso, mdy'
696 #datestyle = 'iso, mdy'
695 #intervalstyle = 'postgres'
697 #intervalstyle = 'postgres'
696 #timezone = 'GMT'
698 #timezone = 'GMT'
697 #timezone_abbreviations = 'Default' # Select the set of available time zone
699 #timezone_abbreviations = 'Default' # Select the set of available time zone
698 # abbreviations. Currently, there are
700 # abbreviations. Currently, there are
699 # Default
701 # Default
700 # Australia (historical usage)
702 # Australia (historical usage)
701 # India
703 # India
702 # You can create your own file in
704 # You can create your own file in
703 # share/timezonesets/.
705 # share/timezonesets/.
704 #extra_float_digits = 1 # min -15, max 3; any value >0 actually
706 #extra_float_digits = 1 # min -15, max 3; any value >0 actually
705 # selects precise output mode
707 # selects precise output mode
706 #client_encoding = sql_ascii # actually, defaults to database
708 #client_encoding = sql_ascii # actually, defaults to database
707 # encoding
709 # encoding
708
710
709 # These settings are initialized by initdb, but they can be changed.
711 # These settings are initialized by initdb, but they can be changed.
710 #lc_messages = 'C' # locale for system error message
712 #lc_messages = 'C' # locale for system error message
711 # strings
713 # strings
712 #lc_monetary = 'C' # locale for monetary formatting
714 #lc_monetary = 'C' # locale for monetary formatting
713 #lc_numeric = 'C' # locale for number formatting
715 #lc_numeric = 'C' # locale for number formatting
714 #lc_time = 'C' # locale for time formatting
716 #lc_time = 'C' # locale for time formatting
715
717
716 # default configuration for text search
718 # default configuration for text search
717 #default_text_search_config = 'pg_catalog.simple'
719 #default_text_search_config = 'pg_catalog.simple'
718
720
719 # - Shared Library Preloading -
721 # - Shared Library Preloading -
720
722
721 #local_preload_libraries = ''
723 #local_preload_libraries = ''
722 #session_preload_libraries = ''
724 #session_preload_libraries = ''
723 #shared_preload_libraries = '' # (change requires restart)
725 #shared_preload_libraries = '' # (change requires restart)
724 #jit_provider = 'llvmjit' # JIT library to use
726 #jit_provider = 'llvmjit' # JIT library to use
725
727
726 # - Other Defaults -
728 # - Other Defaults -
727
729
728 #dynamic_library_path = '$libdir'
730 #dynamic_library_path = '$libdir'
729 #extension_destdir = '' # prepend path when loading extensions
731 #extension_destdir = '' # prepend path when loading extensions
730 # and shared objects (added by Debian)
732 # and shared objects (added by Debian)
731 #gin_fuzzy_search_limit = 0
733 #gin_fuzzy_search_limit = 0
732
734
733
735
734 #------------------------------------------------------------------------------
736 #------------------------------------------------------------------------------
735 # LOCK MANAGEMENT
737 # LOCK MANAGEMENT
736 #------------------------------------------------------------------------------
738 #------------------------------------------------------------------------------
737
739
738 #deadlock_timeout = 1s
740 #deadlock_timeout = 1s
739 #max_locks_per_transaction = 64 # min 10
741 #max_locks_per_transaction = 64 # min 10
740 # (change requires restart)
742 # (change requires restart)
741 #max_pred_locks_per_transaction = 64 # min 10
743 #max_pred_locks_per_transaction = 64 # min 10
742 # (change requires restart)
744 # (change requires restart)
743 #max_pred_locks_per_relation = -2 # negative values mean
745 #max_pred_locks_per_relation = -2 # negative values mean
744 # (max_pred_locks_per_transaction
746 # (max_pred_locks_per_transaction
745 # / -max_pred_locks_per_relation) - 1
747 # / -max_pred_locks_per_relation) - 1
746 #max_pred_locks_per_page = 2 # min 0
748 #max_pred_locks_per_page = 2 # min 0
747
749
748
750
749 #------------------------------------------------------------------------------
751 #------------------------------------------------------------------------------
750 # VERSION AND PLATFORM COMPATIBILITY
752 # VERSION AND PLATFORM COMPATIBILITY
751 #------------------------------------------------------------------------------
753 #------------------------------------------------------------------------------
752
754
753 # - Previous PostgreSQL Versions -
755 # - Previous PostgreSQL Versions -
754
756
755 #array_nulls = on
757 #array_nulls = on
756 #backslash_quote = safe_encoding # on, off, or safe_encoding
758 #backslash_quote = safe_encoding # on, off, or safe_encoding
757 #escape_string_warning = on
759 #escape_string_warning = on
758 #lo_compat_privileges = off
760 #lo_compat_privileges = off
759 #quote_all_identifiers = off
761 #quote_all_identifiers = off
760 #standard_conforming_strings = on
762 #standard_conforming_strings = on
761 #synchronize_seqscans = on
763 #synchronize_seqscans = on
762
764
763 # - Other Platforms and Clients -
765 # - Other Platforms and Clients -
764
766
765 #transform_null_equals = off
767 #transform_null_equals = off
766
768
767
769
768 #------------------------------------------------------------------------------
770 #------------------------------------------------------------------------------
769 # ERROR HANDLING
771 # ERROR HANDLING
770 #------------------------------------------------------------------------------
772 #------------------------------------------------------------------------------
771
773
772 #exit_on_error = off # terminate session on any error?
774 #exit_on_error = off # terminate session on any error?
773 #restart_after_crash = on # reinitialize after backend crash?
775 #restart_after_crash = on # reinitialize after backend crash?
774 #data_sync_retry = off # retry or panic on failure to fsync
776 #data_sync_retry = off # retry or panic on failure to fsync
775 # data?
777 # data?
776 # (change requires restart)
778 # (change requires restart)
777 #recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+)
779 #recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+)
778
780
779
781
780 #------------------------------------------------------------------------------
782 #------------------------------------------------------------------------------
781 # CONFIG FILE INCLUDES
783 # CONFIG FILE INCLUDES
782 #------------------------------------------------------------------------------
784 #------------------------------------------------------------------------------
783
785
784 # These options allow settings to be loaded from files other than the
786 # These options allow settings to be loaded from files other than the
785 # default postgresql.conf. Note that these are directives, not variable
787 # default postgresql.conf. Note that these are directives, not variable
786 # assignments, so they can usefully be given more than once.
788 # assignments, so they can usefully be given more than once.
787
789
788 #include_dir = '...' # include files ending in '.conf' from
790 #include_dir = '...' # include files ending in '.conf' from
789 # a directory, e.g., 'conf.d'
791 # a directory, e.g., 'conf.d'
790 #include_if_exists = '...' # include file only if it exists
792 #include_if_exists = '...' # include file only if it exists
791 #include = '...' # include file
793 #include = '...' # include file
792
794
793
795
794 #------------------------------------------------------------------------------
796 #------------------------------------------------------------------------------
795 # CUSTOMIZED OPTIONS
797 # CUSTOMIZED OPTIONS
796 #------------------------------------------------------------------------------
798 #------------------------------------------------------------------------------
797
799
798 # Add settings for extensions here
800 # Add settings for extensions here
799
801
800 max_connections = 400
802 max_connections = 400
801 shared_buffers = 1GB
803 shared_buffers = 1GB
802 effective_cache_size = 3GB
804 effective_cache_size = 3GB
803 work_mem = 2621kB
805 work_mem = 2621kB
804 maintenance_work_mem = 256MB
806 maintenance_work_mem = 256MB
805 checkpoint_completion_target = 0.9
807 checkpoint_completion_target = 0.9
806 wal_buffers = 16MB
808 wal_buffers = 16MB
807 default_statistics_target = 100
809 default_statistics_target = 100
808 password_encryption = md5
810 password_encryption = md5
@@ -1,132 +1,135 b''
1 # read more here http://tautt.com/best-nginx-configuration-for-security/
1 # read more here http://tautt.com/best-nginx-configuration-for-security/
2
2
3 # config to don't allow the browser to render the page inside an frame or iframe
3 # config to don't allow the browser to render the page inside an frame or iframe
4 # and avoid clickjacking http://en.wikipedia.org/wiki/Clickjacking
4 # and avoid clickjacking http://en.wikipedia.org/wiki/Clickjacking
5 # if you need to allow [i]frames, you can use SAMEORIGIN or even set an uri with ALLOW-FROM uri
5 # if you need to allow [i]frames, you can use SAMEORIGIN or even set an uri with ALLOW-FROM uri
6 # https://developer.mozilla.org/en-US/docs/HTTP/X-Frame-Options
6 # https://developer.mozilla.org/en-US/docs/HTTP/X-Frame-Options
7 #add_header X-Frame-Options SAMEORIGIN;
7 #add_header X-Frame-Options SAMEORIGIN;
8
8
9 # when serving user-supplied content, include a X-Content-Type-Options: nosniff header along with the Content-Type: header,
9 # when serving user-supplied content, include a X-Content-Type-Options: nosniff header along with the Content-Type: header,
10 # to disable content-type sniffing on some browsers.
10 # to disable content-type sniffing on some browsers.
11 # https://www.owasp.org/index.php/List_of_useful_HTTP_headers
11 # https://www.owasp.org/index.php/List_of_useful_HTTP_headers
12 # currently suppoorted in IE > 8 http://blogs.msdn.com/b/ie/archive/2008/09/02/ie8-security-part-vi-beta-2-update.aspx
12 # currently suppoorted in IE > 8 http://blogs.msdn.com/b/ie/archive/2008/09/02/ie8-security-part-vi-beta-2-update.aspx
13 # http://msdn.microsoft.com/en-us/library/ie/gg622941(v=vs.85).aspx
13 # http://msdn.microsoft.com/en-us/library/ie/gg622941(v=vs.85).aspx
14 # 'soon' on Firefox https://bugzilla.mozilla.org/show_bug.cgi?id=471020
14 # 'soon' on Firefox https://bugzilla.mozilla.org/show_bug.cgi?id=471020
15 #add_header X-Content-Type-Options nosniff;
15 #add_header X-Content-Type-Options nosniff;
16
16
17 # This header enables the Cross-site scripting (XSS) filter built into most recent web browsers.
17 # This header enables the Cross-site scripting (XSS) filter built into most recent web browsers.
18 # It's usually enabled by default anyway, so the role of this header is to re-enable the filter for
18 # It's usually enabled by default anyway, so the role of this header is to re-enable the filter for
19 # this particular website if it was disabled by the user.
19 # this particular website if it was disabled by the user.
20 # https://www.owasp.org/index.php/List_of_useful_HTTP_headers
20 # https://www.owasp.org/index.php/List_of_useful_HTTP_headers
21 #add_header X-XSS-Protection "1; mode=block";
21 #add_header X-XSS-Protection "1; mode=block";
22
22
23 # with Content Security Policy (CSP) enabled(and a browser that supports it(http://caniuse.com/#feat=contentsecuritypolicy),
23 # with Content Security Policy (CSP) enabled(and a browser that supports it(http://caniuse.com/#feat=contentsecuritypolicy),
24 # you can tell the browser that it can only download content from the domains you explicitly allow
24 # you can tell the browser that it can only download content from the domains you explicitly allow
25 # http://www.html5rocks.com/en/tutorials/security/content-security-policy/
25 # http://www.html5rocks.com/en/tutorials/security/content-security-policy/
26 # https://www.owasp.org/index.php/Content_Security_Policy
26 # https://www.owasp.org/index.php/Content_Security_Policy
27 # I need to change our application code so we can increase security by disabling 'unsafe-inline' 'unsafe-eval'
27 # I need to change our application code so we can increase security by disabling 'unsafe-inline' 'unsafe-eval'
28 # directives for css and js(if you have inline css or js, you will need to keep it too).
28 # directives for css and js(if you have inline css or js, you will need to keep it too).
29 # more: http://www.html5rocks.com/en/tutorials/security/content-security-policy/#inline-code-considered-harmful
29 # more: http://www.html5rocks.com/en/tutorials/security/content-security-policy/#inline-code-considered-harmful
30 #add_header Content-Security-Policy "default-src 'self'; script-src 'self' 'unsafe-inline' 'unsafe-eval' https://ssl.google-analytics.com https://assets.zendesk.com https://connect.facebook.net; img-src 'self' https://ssl.google-analytics.com https://s-static.ak.facebook.com https://assets.zendesk.com; style-src 'self' 'unsafe-inline' https://fonts.googleapis.com https://assets.zendesk.com; font-src 'self' https://themes.googleusercontent.com; frame-src https://assets.zendesk.com https://www.facebook.com https://s-static.ak.facebook.com https://tautt.zendesk.com; object-src 'none'";
30 #add_header Content-Security-Policy "default-src 'self'; script-src 'self' 'unsafe-inline' 'unsafe-eval' https://ssl.google-analytics.com https://assets.zendesk.com https://connect.facebook.net; img-src 'self' https://ssl.google-analytics.com https://s-static.ak.facebook.com https://assets.zendesk.com; style-src 'self' 'unsafe-inline' https://fonts.googleapis.com https://assets.zendesk.com; font-src 'self' https://themes.googleusercontent.com; frame-src https://assets.zendesk.com https://www.facebook.com https://s-static.ak.facebook.com https://tautt.zendesk.com; object-src 'none'";
31
31
32 ## rate limiter for certain pages to prevent brute force attacks
32 ## rate limiter for certain pages to prevent brute force attacks
33 limit_req_zone $binary_remote_addr zone=http_req_limit:10m rate=1r/s;
33 limit_req_zone $binary_remote_addr zone=http_req_limit:10m rate=1r/s;
34
34
35 ## custom log format
35 ## custom log format
36 log_format http_log_custom '$remote_addr - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" $request_time $upstream_response_time $pipe';
36 log_format http_log_custom '$remote_addr - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" $request_time $upstream_response_time $pipe';
37
37
38 log_format json_log_custom escape=json
38 log_format json_log_custom escape=json
39 '{'
39 '{'
40 '"source":"nginx",'
40 '"source":"nginx",'
41 '"remote_user":"$remote_user",'
41 '"remote_user":"$remote_user",'
42 '"time_local":"$time_local",'
42 '"time_local":"$time_local",'
43 '"remote_addr":"$remote_addr",'
43 '"remote_addr":"$remote_addr",'
44 '"proxy_x_forwarded_for":"$proxy_add_x_forwarded_for",'
44 '"proxy_x_forwarded_for":"$proxy_add_x_forwarded_for",'
45 '"request":"$request",'
45 '"request":"$request",'
46 '"status": "$status",'
46 '"status": "$status",'
47 '"request_method": "$request_method",'
47 '"request_method": "$request_method",'
48 '"body_bytes_sent":"$body_bytes_sent",'
48 '"body_bytes_sent":"$body_bytes_sent",'
49 '"request_time":"$request_time",'
49 '"request_time":"$request_time",'
50 '"upstream_response_time":"$upstream_response_time",'
50 '"upstream_response_time":"$upstream_response_time",'
51 '"http_referrer":"$http_referer",'
51 '"http_referrer":"$http_referer",'
52 '"http_user_agent":"$http_user_agent"'
52 '"http_user_agent":"$http_user_agent"'
53 '}';
53 '}';
54
54
55
55
56 server {
56 server {
57 listen 80 default;
57 listen 80 default;
58 # ensure we get the proper Docker DNS resolver for load balancing.
58 # ensure we get the proper Docker DNS resolver for load balancing.
59 resolver 127.0.0.11 ipv6=off;
59 resolver 127.0.0.11 ipv6=off;
60 server_name localhost 127.0.0.1;
60 server_name localhost 127.0.0.1;
61 access_log /var/log/rhodecode/nginx/rhodecode.http.access.log json_log_custom;
61 access_log /dev/stdout json_log_custom;
62 error_log /var/log/rhodecode/nginx/rhodecode.http.error.log;
62 error_log /dev/stdout;
63
63
64 # uncomment to redirect http traffic to https
64 # uncomment to redirect http traffic to https (not applicable in Docker setup)
65 #return 301 https://$host$request_uri;
65 #return 301 https://$host$request_uri;
66
66
67 client_body_buffer_size 128k;
67 client_body_buffer_size 128k;
68 # maximum number and size of buffers for large headers to read from client request
68 # maximum number and size of buffers for large headers to read from client request
69 large_client_header_buffers 16 256k;
69 large_client_header_buffers 16 256k;
70
70
71 ## serve static files by nginx, recommended
71 ## serve static files by nginx, recommended
72 location /_static/rhodecode {
72 location /_static/rhodecode {
73 gzip on;
73 gzip on;
74 gzip_min_length 500;
74 gzip_min_length 500;
75 gzip_proxied any;
75 gzip_proxied any;
76 gzip_comp_level 4;
76 gzip_comp_level 4;
77 gzip_types text/css text/javascript text/xml text/plain text/x-component application/javascript application/json application/xml application/rss+xml font/truetype font/opentype application/vnd.ms-fontobject image/svg+xml;
77 gzip_types text/css text/javascript text/xml text/plain text/x-component application/javascript application/json application/xml application/rss+xml font/truetype font/opentype application/vnd.ms-fontobject image/svg+xml;
78 gzip_vary on;
78 gzip_vary on;
79 gzip_disable "msie6";
79 gzip_disable "msie6";
80 expires 60d;
80 expires 60d;
81
81
82 alias /var/opt/rhodecode_data/static;
82 alias /var/opt/rhodecode_data/static;
83 }
83 }
84
84
85 ## channelstream location handler, if channelstream live chat and notifications
85 ## channelstream location handler, if channelstream live chat and notifications
86 ## are enable this will proxy the requests to channelstream websocket server
86 ## are enable this will proxy the requests to channelstream websocket server
87 location /_channelstream {
87 location /_channelstream {
88 rewrite /_channelstream/(.*) /$1 break;
88 rewrite /_channelstream/(.*) /$1 break;
89 gzip off;
89 gzip off;
90 tcp_nodelay off;
90 tcp_nodelay off;
91
91
92 proxy_connect_timeout 10;
92 proxy_connect_timeout 10;
93 proxy_send_timeout 10m;
93 proxy_send_timeout 10m;
94 proxy_read_timeout 10m;
94 proxy_read_timeout 10m;
95
95
96 proxy_set_header Host $host;
96 proxy_set_header Host $host;
97 proxy_set_header X-Real-IP $remote_addr;
97 proxy_set_header X-Real-IP $remote_addr;
98 proxy_set_header X-Url-Scheme $scheme;
98 proxy_set_header X-Url-Scheme $scheme;
99 proxy_set_header X-Forwarded-Proto $scheme;
99 proxy_set_header X-Forwarded-Proto $scheme;
100 proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
100 proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
101
101
102 proxy_http_version 1.1;
102 proxy_http_version 1.1;
103 proxy_set_header Upgrade $http_upgrade;
103 proxy_set_header Upgrade $http_upgrade;
104 proxy_set_header Connection "upgrade";
104 proxy_set_header Connection "upgrade";
105
105
106 proxy_pass http://channelstream:9800;
106 set $upstream_channelstream http://channelstream:8000;
107 proxy_pass $upstream_channelstream;
107 }
108 }
108
109
109 ## rate limit this endpoint to prevent login page brute-force attacks
110 # ## rate limit this endpoint to prevent login page brute-force attacks
110 location /_admin/login {
111 # location /_admin/login {
111 limit_req zone=http_req_limit burst=10 nodelay;
112 # limit_req zone=http_req_limit burst=10 nodelay;
112 try_files $uri @rhodecode_http;
113 # try_files $uri @rhodecode_http;
113 }
114 # }
114
115
115 location / {
116 location / {
116 include /etc/nginx/proxy.conf;
117 include /etc/nginx/proxy.conf;
117 try_files $uri @rhodecode_http;
118 try_files $uri @rhodecode_http;
118 }
119 }
119
120
120 location @rhodecode_http {
121 location @rhodecode_http {
121 include /etc/nginx/proxy.conf;
122 set $upstream http://rhodecode:10020;
122 proxy_pass http://rhodecode:10020;
123 include /etc/nginx/proxy.conf;
124 proxy_pass $upstream;
123 }
125 }
124
126
125 ## Custom 502 error page.
127 ## Custom 502 error page.
126 ## Will be displayed while RhodeCode server is turned off
128 ## Will be displayed while RhodeCode server is turned off
127 error_page 502 /502.html;
129 error_page 502 /502.html;
128 location = /502.html {
130 location = /502.html {
129 root /var/opt/static;
131 root /var/opt/rhodecode_data/static;
132 internal;
130 }
133 }
131
134
132 }
135 }
@@ -1,30 +1,31 b''
1 user root;
1 user root;
2 worker_processes 2;
2 worker_processes 2;
3
3
4 pid /var/run/nginx.pid;
4 pid /var/run/nginx.pid;
5 error_log /var/log/rhodecode/nginx/default.error.log warn;
5 error_log /dev/stdout info;
6
6
7 events {
7 events {
8 worker_connections 1024;
8 worker_connections 1024;
9 # multi_accept on;
9 # multi_accept on;
10 }
10 }
11
11
12 http {
12 http {
13 include /etc/nginx/mime.types;
13 include /etc/nginx/mime.types;
14 default_type application/octet-stream;
14 default_type application/octet-stream;
15 server_tokens off;
15
16
16 log_format main '$remote_addr - $remote_user [$time_local] '
17 log_format main '$remote_addr - $remote_user [$time_local] '
17 '"$request" $status $body_bytes_sent '
18 '"$request" $status $body_bytes_sent '
18 '"$http_referer" "$http_user_agent" '
19 '"$http_referer" "$http_user_agent" '
19 '$request_time $upstream_response_time $pipe';
20 '$request_time $upstream_response_time $pipe';
20
21
21 access_log /var/log/rhodecode/nginx/default.access.log main;
22 access_log /dev/stdout main;
22
23
23 sendfile on;
24 sendfile on;
24 tcp_nopush on;
25 tcp_nopush on;
25 tcp_nodelay on;
26 tcp_nodelay on;
26 keepalive_timeout 65;
27 keepalive_timeout 65;
27 types_hash_max_size 2048;
28 types_hash_max_size 2048;
28
29
29 include /etc/nginx/sites-enabled/*.conf;
30 include /etc/nginx/http.conf;
30 }
31 }
1 NO CONTENT: file renamed from service/nginx/proxy.conf to config/nginx/proxy.conf
NO CONTENT: file renamed from service/nginx/proxy.conf to config/nginx/proxy.conf
@@ -1,17 +1,23 b''
1 global:
1 global:
2 scrape_interval: 15s
2 scrape_interval: 15s
3 evaluation_interval: 15s
3 evaluation_interval: 15s
4
4
5 scrape_configs:
5 scrape_configs:
6
6
7 # Scrape statsd exporter from RhodeCode
7 # Scrape statsd exporter from RhodeCode
8 - job_name: "rhodecode_metrics"
8 - job_name: "rhodecode_metrics"
9 metrics_path: "/metrics"
9 metrics_path: "/metrics"
10 static_configs:
10 static_configs:
11 - targets: ["statsd-exporter:9102"]
11 - targets: ["statsd-exporter:9102"]
12
12
13 # scrape node exporter
13 # scrape node exporter
14 - job_name: node_metrics
14 - job_name: node_metrics
15 metrics_path: "/metrics"
15 metrics_path: "/metrics"
16 static_configs:
16 static_configs:
17 - targets: ["node-exporter:9100"]
17 - targets: ["node-exporter:9100"]
18
19 # scrape traefik node
20 - job_name: traefik_metrics
21 metrics_path: "/metrics"
22 static_configs:
23 - targets: ["traefik:7000"]
@@ -1,38 +1,38 b''
1 server:
1 server:
2 http_listen_port: 9080
2 http_listen_port: 9080
3 grpc_listen_port: 0
3 grpc_listen_port: 0
4
4
5 # Describes how to save read file offsets to disk
5 # Describes how to save read file offsets to disk
6 positions:
6 positions:
7 filename: /tmp/positions.yaml
7 filename: /tmp/positions.yaml
8
8
9 clients:
9 clients:
10 - url: http://loki:3100/loki/api/v1/push
10 - url: http://loki:3100/loki/api/v1/push
11 tenant_id: docker
11 tenant_id: docker
12
12
13 ## Definition to what to scrape
13 ## Definition to what to scrape
14 scrape_configs:
14 scrape_configs:
15
15
16 - job_name: scraping_nginx
16 #- job_name: scraping_nginx
17 static_configs:
17 # static_configs:
18 - targets:
18 # - targets:
19 - localhost
19 # - localhost
20 labels:
20 # labels:
21 __path__: /var/log_volume/nginx/rhodecode*
21 # __path__: /var/log_volume/nginx/rhodecode*
22 job: nginx
22 # job: nginx
23
23 #
24 - job_name: scraping_vcsserver
24 #- job_name: scraping_vcsserver
25 static_configs:
25 # static_configs:
26 - targets:
26 # - targets:
27 - localhost
27 # - localhost
28 labels:
28 # labels:
29 __path__: /var/log_volume/apps/rhodecode*
29 # __path__: /var/log_volume/apps/rhodecode*
30 job: rhodecode
30 # job: rhodecode
31
31 #
32 - job_name: scraping_rhodecode
32 #- job_name: scraping_rhodecode
33 static_configs:
33 # static_configs:
34 - targets:
34 # - targets:
35 - localhost
35 # - localhost
36 labels:
36 # labels:
37 __path__: /var/log_volume/apps/vcsserver*
37 # __path__: /var/log_volume/apps/vcsserver*
38 job: vcsserver No newline at end of file
38 # job: vcsserver No newline at end of file
@@ -1,1865 +1,1865 b''
1 # Redis configuration file example.
1 # Redis configuration file example.
2 #
2 #
3 # Note that in order to read the configuration file, Redis must be
3 # Note that in order to read the configuration file, Redis must be
4 # started with the file path as first argument:
4 # started with the file path as first argument:
5 #
5 #
6 # ./redis-server /path/to/redis.conf
6 # ./redis-server /path/to/redis.conf
7
7
8 # Note on units: when memory size is needed, it is possible to specify
8 # Note on units: when memory size is needed, it is possible to specify
9 # it in the usual form of 1k 5GB 4M and so forth:
9 # it in the usual form of 1k 5GB 4M and so forth:
10 #
10 #
11 # 1k => 1000 bytes
11 # 1k => 1000 bytes
12 # 1kb => 1024 bytes
12 # 1kb => 1024 bytes
13 # 1m => 1000000 bytes
13 # 1m => 1000000 bytes
14 # 1mb => 1024*1024 bytes
14 # 1mb => 1024*1024 bytes
15 # 1g => 1000000000 bytes
15 # 1g => 1000000000 bytes
16 # 1gb => 1024*1024*1024 bytes
16 # 1gb => 1024*1024*1024 bytes
17 #
17 #
18 # units are case insensitive so 1GB 1Gb 1gB are all the same.
18 # units are case insensitive so 1GB 1Gb 1gB are all the same.
19
19
20 ################################## INCLUDES ###################################
20 ################################## INCLUDES ###################################
21
21
22 # Include one or more other config files here. This is useful if you
22 # Include one or more other config files here. This is useful if you
23 # have a standard template that goes to all Redis servers but also need
23 # have a standard template that goes to all Redis servers but also need
24 # to customize a few per-server settings. Include files can include
24 # to customize a few per-server settings. Include files can include
25 # other files, so use this wisely.
25 # other files, so use this wisely.
26 #
26 #
27 # Note that option "include" won't be rewritten by command "CONFIG REWRITE"
27 # Note that option "include" won't be rewritten by command "CONFIG REWRITE"
28 # from admin or Redis Sentinel. Since Redis always uses the last processed
28 # from admin or Redis Sentinel. Since Redis always uses the last processed
29 # line as value of a configuration directive, you'd better put includes
29 # line as value of a configuration directive, you'd better put includes
30 # at the beginning of this file to avoid overwriting config change at runtime.
30 # at the beginning of this file to avoid overwriting config change at runtime.
31 #
31 #
32 # If instead you are interested in using includes to override configuration
32 # If instead you are interested in using includes to override configuration
33 # options, it is better to use include as the last line.
33 # options, it is better to use include as the last line.
34 #
34 #
35 # include /path/to/local.conf
35 # include /path/to/local.conf
36 # include /path/to/other.conf
36 # include /path/to/other.conf
37
37
38 ################################## MODULES #####################################
38 ################################## MODULES #####################################
39
39
40 # Load modules at startup. If the server is not able to load modules
40 # Load modules at startup. If the server is not able to load modules
41 # it will abort. It is possible to use multiple loadmodule directives.
41 # it will abort. It is possible to use multiple loadmodule directives.
42 #
42 #
43 # loadmodule /path/to/my_module.so
43 # loadmodule /path/to/my_module.so
44 # loadmodule /path/to/other_module.so
44 # loadmodule /path/to/other_module.so
45
45
46 ################################## NETWORK #####################################
46 ################################## NETWORK #####################################
47
47
48 # By default, if no "bind" configuration directive is specified, Redis listens
48 # By default, if no "bind" configuration directive is specified, Redis listens
49 # for connections from all available network interfaces on the host machine.
49 # for connections from all available network interfaces on the host machine.
50 # It is possible to listen to just one or multiple selected interfaces using
50 # It is possible to listen to just one or multiple selected interfaces using
51 # the "bind" configuration directive, followed by one or more IP addresses.
51 # the "bind" configuration directive, followed by one or more IP addresses.
52 #
52 #
53 # Examples:
53 # Examples:
54 #
54 #
55 # bind 192.168.1.100 10.0.0.1
55 # bind 192.168.1.100 10.0.0.1
56 # bind 127.0.0.1 ::1
56 # bind 127.0.0.1 ::1
57 #
57 #
58 # ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the
58 # ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the
59 # internet, binding to all the interfaces is dangerous and will expose the
59 # internet, binding to all the interfaces is dangerous and will expose the
60 # instance to everybody on the internet. So by default we uncomment the
60 # instance to everybody on the internet. So by default we uncomment the
61 # following bind directive, that will force Redis to listen only on the
61 # following bind directive, that will force Redis to listen only on the
62 # IPv4 loopback interface address (this means Redis will only be able to
62 # IPv4 loopback interface address (this means Redis will only be able to
63 # accept client connections from the same host that it is running on).
63 # accept client connections from the same host that it is running on).
64 #
64 #
65 # IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES
65 # IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES
66 # JUST COMMENT OUT THE FOLLOWING LINE.
66 # JUST COMMENT OUT THE FOLLOWING LINE.
67 # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
67 # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
68 #bind 127.0.0.1
68 #bind 127.0.0.1
69
69
70 # Protected mode is a layer of security protection, in order to avoid that
70 # Protected mode is a layer of security protection, in order to avoid that
71 # Redis instances left open on the internet are accessed and exploited.
71 # Redis instances left open on the internet are accessed and exploited.
72 #
72 #
73 # When protected mode is on and if:
73 # When protected mode is on and if:
74 #
74 #
75 # 1) The server is not binding explicitly to a set of addresses using the
75 # 1) The server is not binding explicitly to a set of addresses using the
76 # "bind" directive.
76 # "bind" directive.
77 # 2) No password is configured.
77 # 2) No password is configured.
78 #
78 #
79 # The server only accepts connections from clients connecting from the
79 # The server only accepts connections from clients connecting from the
80 # IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain
80 # IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain
81 # sockets.
81 # sockets.
82 #
82 #
83 # By default protected mode is enabled. You should disable it only if
83 # By default protected mode is enabled. You should disable it only if
84 # you are sure you want clients from other hosts to connect to Redis
84 # you are sure you want clients from other hosts to connect to Redis
85 # even if no authentication is configured, nor a specific set of interfaces
85 # even if no authentication is configured, nor a specific set of interfaces
86 # are explicitly listed using the "bind" directive.
86 # are explicitly listed using the "bind" directive.
87 protected-mode no
87 protected-mode no
88
88
89 # Accept connections on the specified port, default is 6379 (IANA #815344).
89 # Accept connections on the specified port, default is 6379 (IANA #815344).
90 # If port 0 is specified Redis will not listen on a TCP socket.
90 # If port 0 is specified Redis will not listen on a TCP socket.
91 port 6379
91 port 6379
92
92
93 # TCP listen() backlog.
93 # TCP listen() backlog.
94 #
94 #
95 # In high requests-per-second environments you need a high backlog in order
95 # In high requests-per-second environments you need a high backlog in order
96 # to avoid slow clients connection issues. Note that the Linux kernel
96 # to avoid slow clients connection issues. Note that the Linux kernel
97 # will silently truncate it to the value of /proc/sys/net/core/somaxconn so
97 # will silently truncate it to the value of /proc/sys/net/core/somaxconn so
98 # make sure to raise both the value of somaxconn and tcp_max_syn_backlog
98 # make sure to raise both the value of somaxconn and tcp_max_syn_backlog
99 # in order to get the desired effect.
99 # in order to get the desired effect.
100 tcp-backlog 511
100 tcp-backlog 511
101
101
102 # Unix socket.
102 # Unix socket.
103 #
103 #
104 # Specify the path for the Unix socket that will be used to listen for
104 # Specify the path for the Unix socket that will be used to listen for
105 # incoming connections. There is no default, so Redis will not listen
105 # incoming connections. There is no default, so Redis will not listen
106 # on a unix socket when not specified.
106 # on a unix socket when not specified.
107 #
107 #
108 # unixsocket /tmp/redis.sock
108 # unixsocket /tmp/redis.sock
109 # unixsocketperm 700
109 # unixsocketperm 700
110
110
111 # Close the connection after a client is idle for N seconds (0 to disable)
111 # Close the connection after a client is idle for N seconds (0 to disable)
112 timeout 0
112 timeout 0
113
113
114 # TCP keepalive.
114 # TCP keepalive.
115 #
115 #
116 # If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
116 # If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
117 # of communication. This is useful for two reasons:
117 # of communication. This is useful for two reasons:
118 #
118 #
119 # 1) Detect dead peers.
119 # 1) Detect dead peers.
120 # 2) Force network equipment in the middle to consider the connection to be
120 # 2) Force network equipment in the middle to consider the connection to be
121 # alive.
121 # alive.
122 #
122 #
123 # On Linux, the specified value (in seconds) is the period used to send ACKs.
123 # On Linux, the specified value (in seconds) is the period used to send ACKs.
124 # Note that to close the connection the double of the time is needed.
124 # Note that to close the connection the double of the time is needed.
125 # On other kernels the period depends on the kernel configuration.
125 # On other kernels the period depends on the kernel configuration.
126 #
126 #
127 # A reasonable value for this option is 300 seconds, which is the new
127 # A reasonable value for this option is 300 seconds, which is the new
128 # Redis default starting with Redis 3.2.1.
128 # Redis default starting with Redis 3.2.1.
129 tcp-keepalive 300
129 tcp-keepalive 300
130
130
131 ################################# TLS/SSL #####################################
131 ################################# TLS/SSL #####################################
132
132
133 # By default, TLS/SSL is disabled. To enable it, the "tls-port" configuration
133 # By default, TLS/SSL is disabled. To enable it, the "tls-port" configuration
134 # directive can be used to define TLS-listening ports. To enable TLS on the
134 # directive can be used to define TLS-listening ports. To enable TLS on the
135 # default port, use:
135 # default port, use:
136 #
136 #
137 # port 0
137 # port 0
138 # tls-port 6379
138 # tls-port 6379
139
139
140 # Configure a X.509 certificate and private key to use for authenticating the
140 # Configure a X.509 certificate and private key to use for authenticating the
141 # server to connected clients, masters or cluster peers. These files should be
141 # server to connected clients, masters or cluster peers. These files should be
142 # PEM formatted.
142 # PEM formatted.
143 #
143 #
144 # tls-cert-file redis.crt
144 # tls-cert-file redis.crt
145 # tls-key-file redis.key
145 # tls-key-file redis.key
146
146
147 # Configure a DH parameters file to enable Diffie-Hellman (DH) key exchange:
147 # Configure a DH parameters file to enable Diffie-Hellman (DH) key exchange:
148 #
148 #
149 # tls-dh-params-file redis.dh
149 # tls-dh-params-file redis.dh
150
150
151 # Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL
151 # Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL
152 # clients and peers. Redis requires an explicit configuration of at least one
152 # clients and peers. Redis requires an explicit configuration of at least one
153 # of these, and will not implicitly use the system wide configuration.
153 # of these, and will not implicitly use the system wide configuration.
154 #
154 #
155 # tls-ca-cert-file ca.crt
155 # tls-ca-cert-file ca.crt
156 # tls-ca-cert-dir /etc/ssl/certs
156 # tls-ca-cert-dir /etc/ssl/certs
157
157
158 # By default, clients (including replica servers) on a TLS port are required
158 # By default, clients (including replica servers) on a TLS port are required
159 # to authenticate using valid client side certificates.
159 # to authenticate using valid client side certificates.
160 #
160 #
161 # If "no" is specified, client certificates are not required and not accepted.
161 # If "no" is specified, client certificates are not required and not accepted.
162 # If "optional" is specified, client certificates are accepted and must be
162 # If "optional" is specified, client certificates are accepted and must be
163 # valid if provided, but are not required.
163 # valid if provided, but are not required.
164 #
164 #
165 # tls-auth-clients no
165 # tls-auth-clients no
166 # tls-auth-clients optional
166 # tls-auth-clients optional
167
167
168 # By default, a Redis replica does not attempt to establish a TLS connection
168 # By default, a Redis replica does not attempt to establish a TLS connection
169 # with its master.
169 # with its master.
170 #
170 #
171 # Use the following directive to enable TLS on replication links.
171 # Use the following directive to enable TLS on replication links.
172 #
172 #
173 # tls-replication yes
173 # tls-replication yes
174
174
175 # By default, the Redis Cluster bus uses a plain TCP connection. To enable
175 # By default, the Redis Cluster bus uses a plain TCP connection. To enable
176 # TLS for the bus protocol, use the following directive:
176 # TLS for the bus protocol, use the following directive:
177 #
177 #
178 # tls-cluster yes
178 # tls-cluster yes
179
179
180 # Explicitly specify TLS versions to support. Allowed values are case insensitive
180 # Explicitly specify TLS versions to support. Allowed values are case insensitive
181 # and include "TLSv1", "TLSv1.1", "TLSv1.2", "TLSv1.3" (OpenSSL >= 1.1.1) or
181 # and include "TLSv1", "TLSv1.1", "TLSv1.2", "TLSv1.3" (OpenSSL >= 1.1.1) or
182 # any combination. To enable only TLSv1.2 and TLSv1.3, use:
182 # any combination. To enable only TLSv1.2 and TLSv1.3, use:
183 #
183 #
184 # tls-protocols "TLSv1.2 TLSv1.3"
184 # tls-protocols "TLSv1.2 TLSv1.3"
185
185
186 # Configure allowed ciphers. See the ciphers(1ssl) manpage for more information
186 # Configure allowed ciphers. See the ciphers(1ssl) manpage for more information
187 # about the syntax of this string.
187 # about the syntax of this string.
188 #
188 #
189 # Note: this configuration applies only to <= TLSv1.2.
189 # Note: this configuration applies only to <= TLSv1.2.
190 #
190 #
191 # tls-ciphers DEFAULT:!MEDIUM
191 # tls-ciphers DEFAULT:!MEDIUM
192
192
193 # Configure allowed TLSv1.3 ciphersuites. See the ciphers(1ssl) manpage for more
193 # Configure allowed TLSv1.3 ciphersuites. See the ciphers(1ssl) manpage for more
194 # information about the syntax of this string, and specifically for TLSv1.3
194 # information about the syntax of this string, and specifically for TLSv1.3
195 # ciphersuites.
195 # ciphersuites.
196 #
196 #
197 # tls-ciphersuites TLS_CHACHA20_POLY1305_SHA256
197 # tls-ciphersuites TLS_CHACHA20_POLY1305_SHA256
198
198
199 # When choosing a cipher, use the server's preference instead of the client
199 # When choosing a cipher, use the server's preference instead of the client
200 # preference. By default, the server follows the client's preference.
200 # preference. By default, the server follows the client's preference.
201 #
201 #
202 # tls-prefer-server-ciphers yes
202 # tls-prefer-server-ciphers yes
203
203
204 # By default, TLS session caching is enabled to allow faster and less expensive
204 # By default, TLS session caching is enabled to allow faster and less expensive
205 # reconnections by clients that support it. Use the following directive to disable
205 # reconnections by clients that support it. Use the following directive to disable
206 # caching.
206 # caching.
207 #
207 #
208 # tls-session-caching no
208 # tls-session-caching no
209
209
210 # Change the default number of TLS sessions cached. A zero value sets the cache
210 # Change the default number of TLS sessions cached. A zero value sets the cache
211 # to unlimited size. The default size is 20480.
211 # to unlimited size. The default size is 20480.
212 #
212 #
213 # tls-session-cache-size 5000
213 # tls-session-cache-size 5000
214
214
215 # Change the default timeout of cached TLS sessions. The default timeout is 300
215 # Change the default timeout of cached TLS sessions. The default timeout is 300
216 # seconds.
216 # seconds.
217 #
217 #
218 # tls-session-cache-timeout 60
218 # tls-session-cache-timeout 60
219
219
220 ################################# GENERAL #####################################
220 ################################# GENERAL #####################################
221
221
222 # By default Redis does not run as a daemon. Use 'yes' if you need it.
222 # By default Redis does not run as a daemon. Use 'yes' if you need it.
223 # Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
223 # Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
224 daemonize no
224 daemonize no
225
225
226 # If you run Redis from upstart or systemd, Redis can interact with your
226 # If you run Redis from upstart or systemd, Redis can interact with your
227 # supervision tree. Options:
227 # supervision tree. Options:
228 # supervised no - no supervision interaction
228 # supervised no - no supervision interaction
229 # supervised upstart - signal upstart by putting Redis into SIGSTOP mode
229 # supervised upstart - signal upstart by putting Redis into SIGSTOP mode
230 # requires "expect stop" in your upstart job config
230 # requires "expect stop" in your upstart job config
231 # supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET
231 # supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET
232 # supervised auto - detect upstart or systemd method based on
232 # supervised auto - detect upstart or systemd method based on
233 # UPSTART_JOB or NOTIFY_SOCKET environment variables
233 # UPSTART_JOB or NOTIFY_SOCKET environment variables
234 # Note: these supervision methods only signal "process is ready."
234 # Note: these supervision methods only signal "process is ready."
235 # They do not enable continuous pings back to your supervisor.
235 # They do not enable continuous pings back to your supervisor.
236 supervised no
236 supervised no
237
237
238 # If a pid file is specified, Redis writes it where specified at startup
238 # If a pid file is specified, Redis writes it where specified at startup
239 # and removes it at exit.
239 # and removes it at exit.
240 #
240 #
241 # When the server runs non daemonized, no pid file is created if none is
241 # When the server runs non daemonized, no pid file is created if none is
242 # specified in the configuration. When the server is daemonized, the pid file
242 # specified in the configuration. When the server is daemonized, the pid file
243 # is used even if not specified, defaulting to "/var/run/redis.pid".
243 # is used even if not specified, defaulting to "/var/run/redis.pid".
244 #
244 #
245 # Creating a pid file is best effort: if Redis is not able to create it
245 # Creating a pid file is best effort: if Redis is not able to create it
246 # nothing bad happens, the server will start and run normally.
246 # nothing bad happens, the server will start and run normally.
247 pidfile /var/run/redis_6379.pid
247 pidfile /var/run/redis_6379.pid
248
248
249 # Specify the server verbosity level.
249 # Specify the server verbosity level.
250 # This can be one of:
250 # This can be one of:
251 # debug (a lot of information, useful for development/testing)
251 # debug (a lot of information, useful for development/testing)
252 # verbose (many rarely useful info, but not a mess like the debug level)
252 # verbose (many rarely useful info, but not a mess like the debug level)
253 # notice (moderately verbose, what you want in production probably)
253 # notice (moderately verbose, what you want in production probably)
254 # warning (only very important / critical messages are logged)
254 # warning (only very important / critical messages are logged)
255 loglevel notice
255 loglevel notice
256
256
257 # Specify the log file name. Also the empty string can be used to force
257 # Specify the log file name. Also the empty string can be used to force
258 # Redis to log on the standard output. Note that if you use standard
258 # Redis to log on the standard output. Note that if you use standard
259 # output for logging but daemonize, logs will be sent to /dev/null
259 # output for logging but daemonize, logs will be sent to /dev/null
260 logfile ""
260 logfile ""
261
261
262 # To enable logging to the system logger, just set 'syslog-enabled' to yes,
262 # To enable logging to the system logger, just set 'syslog-enabled' to yes,
263 # and optionally update the other syslog parameters to suit your needs.
263 # and optionally update the other syslog parameters to suit your needs.
264 # syslog-enabled no
264 # syslog-enabled no
265
265
266 # Specify the syslog identity.
266 # Specify the syslog identity.
267 # syslog-ident redis
267 # syslog-ident redis
268
268
269 # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
269 # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
270 # syslog-facility local0
270 # syslog-facility local0
271
271
272 # Set the number of databases. The default database is DB 0, you can select
272 # Set the number of databases. The default database is DB 0, you can select
273 # a different one on a per-connection basis using SELECT <dbid> where
273 # a different one on a per-connection basis using SELECT <dbid> where
274 # dbid is a number between 0 and 'databases'-1
274 # dbid is a number between 0 and 'databases'-1
275 databases 16
275 databases 16
276
276
277 # By default Redis shows an ASCII art logo only when started to log to the
277 # By default Redis shows an ASCII art logo only when started to log to the
278 # standard output and if the standard output is a TTY. Basically this means
278 # standard output and if the standard output is a TTY. Basically this means
279 # that normally a logo is displayed only in interactive sessions.
279 # that normally a logo is displayed only in interactive sessions.
280 #
280 #
281 # However it is possible to force the pre-4.0 behavior and always show a
281 # However it is possible to force the pre-4.0 behavior and always show a
282 # ASCII art logo in startup logs by setting the following option to yes.
282 # ASCII art logo in startup logs by setting the following option to yes.
283 always-show-logo yes
283 always-show-logo no
284
284
285 ################################ SNAPSHOTTING ################################
285 ################################ SNAPSHOTTING ################################
286 #
286 #
287 # Save the DB on disk:
287 # Save the DB on disk:
288 #
288 #
289 # save <seconds> <changes>
289 # save <seconds> <changes>
290 #
290 #
291 # Will save the DB if both the given number of seconds and the given
291 # Will save the DB if both the given number of seconds and the given
292 # number of write operations against the DB occurred.
292 # number of write operations against the DB occurred.
293 #
293 #
294 # In the example below the behavior will be to save:
294 # In the example below the behavior will be to save:
295 # after 900 sec (15 min) if at least 1 key changed
295 # after 900 sec (15 min) if at least 1 key changed
296 # after 300 sec (5 min) if at least 10 keys changed
296 # after 300 sec (5 min) if at least 10 keys changed
297 # after 60 sec if at least 10000 keys changed
297 # after 60 sec if at least 10000 keys changed
298 #
298 #
299 # Note: you can disable saving completely by commenting out all "save" lines.
299 # Note: you can disable saving completely by commenting out all "save" lines.
300 #
300 #
301 # It is also possible to remove all the previously configured save
301 # It is also possible to remove all the previously configured save
302 # points by adding a save directive with a single empty string argument
302 # points by adding a save directive with a single empty string argument
303 # like in the following example:
303 # like in the following example:
304 #
304 #
305 # save ""
305 # save ""
306
306
307 save 900 1
307 save 900 1
308 save 300 10
308 save 300 10
309 save 60 10000
309 save 60 10000
310
310
311 # By default Redis will stop accepting writes if RDB snapshots are enabled
311 # By default Redis will stop accepting writes if RDB snapshots are enabled
312 # (at least one save point) and the latest background save failed.
312 # (at least one save point) and the latest background save failed.
313 # This will make the user aware (in a hard way) that data is not persisting
313 # This will make the user aware (in a hard way) that data is not persisting
314 # on disk properly, otherwise chances are that no one will notice and some
314 # on disk properly, otherwise chances are that no one will notice and some
315 # disaster will happen.
315 # disaster will happen.
316 #
316 #
317 # If the background saving process will start working again Redis will
317 # If the background saving process will start working again Redis will
318 # automatically allow writes again.
318 # automatically allow writes again.
319 #
319 #
320 # However if you have setup your proper monitoring of the Redis server
320 # However if you have setup your proper monitoring of the Redis server
321 # and persistence, you may want to disable this feature so that Redis will
321 # and persistence, you may want to disable this feature so that Redis will
322 # continue to work as usual even if there are problems with disk,
322 # continue to work as usual even if there are problems with disk,
323 # permissions, and so forth.
323 # permissions, and so forth.
324 stop-writes-on-bgsave-error yes
324 stop-writes-on-bgsave-error yes
325
325
326 # Compress string objects using LZF when dump .rdb databases?
326 # Compress string objects using LZF when dump .rdb databases?
327 # By default compression is enabled as it's almost always a win.
327 # By default compression is enabled as it's almost always a win.
328 # If you want to save some CPU in the saving child set it to 'no' but
328 # If you want to save some CPU in the saving child set it to 'no' but
329 # the dataset will likely be bigger if you have compressible values or keys.
329 # the dataset will likely be bigger if you have compressible values or keys.
330 rdbcompression yes
330 rdbcompression yes
331
331
332 # Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
332 # Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
333 # This makes the format more resistant to corruption but there is a performance
333 # This makes the format more resistant to corruption but there is a performance
334 # hit to pay (around 10%) when saving and loading RDB files, so you can disable it
334 # hit to pay (around 10%) when saving and loading RDB files, so you can disable it
335 # for maximum performances.
335 # for maximum performances.
336 #
336 #
337 # RDB files created with checksum disabled have a checksum of zero that will
337 # RDB files created with checksum disabled have a checksum of zero that will
338 # tell the loading code to skip the check.
338 # tell the loading code to skip the check.
339 rdbchecksum yes
339 rdbchecksum yes
340
340
341 # The filename where to dump the DB
341 # The filename where to dump the DB
342 dbfilename redis_dump.rdb
342 dbfilename redis_dump.rdb
343
343
344 # Remove RDB files used by replication in instances without persistence
344 # Remove RDB files used by replication in instances without persistence
345 # enabled. By default this option is disabled, however there are environments
345 # enabled. By default this option is disabled, however there are environments
346 # where for regulations or other security concerns, RDB files persisted on
346 # where for regulations or other security concerns, RDB files persisted on
347 # disk by masters in order to feed replicas, or stored on disk by replicas
347 # disk by masters in order to feed replicas, or stored on disk by replicas
348 # in order to load them for the initial synchronization, should be deleted
348 # in order to load them for the initial synchronization, should be deleted
349 # ASAP. Note that this option ONLY WORKS in instances that have both AOF
349 # ASAP. Note that this option ONLY WORKS in instances that have both AOF
350 # and RDB persistence disabled, otherwise is completely ignored.
350 # and RDB persistence disabled, otherwise is completely ignored.
351 #
351 #
352 # An alternative (and sometimes better) way to obtain the same effect is
352 # An alternative (and sometimes better) way to obtain the same effect is
353 # to use diskless replication on both master and replicas instances. However
353 # to use diskless replication on both master and replicas instances. However
354 # in the case of replicas, diskless is not always an option.
354 # in the case of replicas, diskless is not always an option.
355 rdb-del-sync-files no
355 rdb-del-sync-files no
356
356
357 # The working directory.
357 # The working directory.
358 #
358 #
359 # The DB will be written inside this directory, with the filename specified
359 # The DB will be written inside this directory, with the filename specified
360 # above using the 'dbfilename' configuration directive.
360 # above using the 'dbfilename' configuration directive.
361 #
361 #
362 # The Append Only File will also be created inside this directory.
362 # The Append Only File will also be created inside this directory.
363 #
363 #
364 # Note that you must specify a directory here, not a file name.
364 # Note that you must specify a directory here, not a file name.
365 dir /var/opt/rhodecode_data/
365 dir /data
366
366
367 ################################# REPLICATION #################################
367 ################################# REPLICATION #################################
368
368
369 # Master-Replica replication. Use replicaof to make a Redis instance a copy of
369 # Master-Replica replication. Use replicaof to make a Redis instance a copy of
370 # another Redis server. A few things to understand ASAP about Redis replication.
370 # another Redis server. A few things to understand ASAP about Redis replication.
371 #
371 #
372 # +------------------+ +---------------+
372 # +------------------+ +---------------+
373 # | Master | ---> | Replica |
373 # | Master | ---> | Replica |
374 # | (receive writes) | | (exact copy) |
374 # | (receive writes) | | (exact copy) |
375 # +------------------+ +---------------+
375 # +------------------+ +---------------+
376 #
376 #
377 # 1) Redis replication is asynchronous, but you can configure a master to
377 # 1) Redis replication is asynchronous, but you can configure a master to
378 # stop accepting writes if it appears to be not connected with at least
378 # stop accepting writes if it appears to be not connected with at least
379 # a given number of replicas.
379 # a given number of replicas.
380 # 2) Redis replicas are able to perform a partial resynchronization with the
380 # 2) Redis replicas are able to perform a partial resynchronization with the
381 # master if the replication link is lost for a relatively small amount of
381 # master if the replication link is lost for a relatively small amount of
382 # time. You may want to configure the replication backlog size (see the next
382 # time. You may want to configure the replication backlog size (see the next
383 # sections of this file) with a sensible value depending on your needs.
383 # sections of this file) with a sensible value depending on your needs.
384 # 3) Replication is automatic and does not need user intervention. After a
384 # 3) Replication is automatic and does not need user intervention. After a
385 # network partition replicas automatically try to reconnect to masters
385 # network partition replicas automatically try to reconnect to masters
386 # and resynchronize with them.
386 # and resynchronize with them.
387 #
387 #
388 # replicaof <masterip> <masterport>
388 # replicaof <masterip> <masterport>
389
389
390 # If the master is password protected (using the "requirepass" configuration
390 # If the master is password protected (using the "requirepass" configuration
391 # directive below) it is possible to tell the replica to authenticate before
391 # directive below) it is possible to tell the replica to authenticate before
392 # starting the replication synchronization process, otherwise the master will
392 # starting the replication synchronization process, otherwise the master will
393 # refuse the replica request.
393 # refuse the replica request.
394 #
394 #
395 # masterauth <master-password>
395 # masterauth <master-password>
396 #
396 #
397 # However this is not enough if you are using Redis ACLs (for Redis version
397 # However this is not enough if you are using Redis ACLs (for Redis version
398 # 6 or greater), and the default user is not capable of running the PSYNC
398 # 6 or greater), and the default user is not capable of running the PSYNC
399 # command and/or other commands needed for replication. In this case it's
399 # command and/or other commands needed for replication. In this case it's
400 # better to configure a special user to use with replication, and specify the
400 # better to configure a special user to use with replication, and specify the
401 # masteruser configuration as such:
401 # masteruser configuration as such:
402 #
402 #
403 # masteruser <username>
403 # masteruser <username>
404 #
404 #
405 # When masteruser is specified, the replica will authenticate against its
405 # When masteruser is specified, the replica will authenticate against its
406 # master using the new AUTH form: AUTH <username> <password>.
406 # master using the new AUTH form: AUTH <username> <password>.
407
407
408 # When a replica loses its connection with the master, or when the replication
408 # When a replica loses its connection with the master, or when the replication
409 # is still in progress, the replica can act in two different ways:
409 # is still in progress, the replica can act in two different ways:
410 #
410 #
411 # 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will
411 # 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will
412 # still reply to client requests, possibly with out of date data, or the
412 # still reply to client requests, possibly with out of date data, or the
413 # data set may just be empty if this is the first synchronization.
413 # data set may just be empty if this is the first synchronization.
414 #
414 #
415 # 2) If replica-serve-stale-data is set to 'no' the replica will reply with
415 # 2) If replica-serve-stale-data is set to 'no' the replica will reply with
416 # an error "SYNC with master in progress" to all commands except:
416 # an error "SYNC with master in progress" to all commands except:
417 # INFO, REPLICAOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG, SUBSCRIBE,
417 # INFO, REPLICAOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG, SUBSCRIBE,
418 # UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, COMMAND, POST,
418 # UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, COMMAND, POST,
419 # HOST and LATENCY.
419 # HOST and LATENCY.
420 #
420 #
421 replica-serve-stale-data yes
421 replica-serve-stale-data yes
422
422
423 # You can configure a replica instance to accept writes or not. Writing against
423 # You can configure a replica instance to accept writes or not. Writing against
424 # a replica instance may be useful to store some ephemeral data (because data
424 # a replica instance may be useful to store some ephemeral data (because data
425 # written on a replica will be easily deleted after resync with the master) but
425 # written on a replica will be easily deleted after resync with the master) but
426 # may also cause problems if clients are writing to it because of a
426 # may also cause problems if clients are writing to it because of a
427 # misconfiguration.
427 # misconfiguration.
428 #
428 #
429 # Since Redis 2.6 by default replicas are read-only.
429 # Since Redis 2.6 by default replicas are read-only.
430 #
430 #
431 # Note: read only replicas are not designed to be exposed to untrusted clients
431 # Note: read only replicas are not designed to be exposed to untrusted clients
432 # on the internet. It's just a protection layer against misuse of the instance.
432 # on the internet. It's just a protection layer against misuse of the instance.
433 # Still a read only replica exports by default all the administrative commands
433 # Still a read only replica exports by default all the administrative commands
434 # such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
434 # such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
435 # security of read only replicas using 'rename-command' to shadow all the
435 # security of read only replicas using 'rename-command' to shadow all the
436 # administrative / dangerous commands.
436 # administrative / dangerous commands.
437 replica-read-only yes
437 replica-read-only yes
438
438
439 # Replication SYNC strategy: disk or socket.
439 # Replication SYNC strategy: disk or socket.
440 #
440 #
441 # New replicas and reconnecting replicas that are not able to continue the
441 # New replicas and reconnecting replicas that are not able to continue the
442 # replication process just receiving differences, need to do what is called a
442 # replication process just receiving differences, need to do what is called a
443 # "full synchronization". An RDB file is transmitted from the master to the
443 # "full synchronization". An RDB file is transmitted from the master to the
444 # replicas.
444 # replicas.
445 #
445 #
446 # The transmission can happen in two different ways:
446 # The transmission can happen in two different ways:
447 #
447 #
448 # 1) Disk-backed: The Redis master creates a new process that writes the RDB
448 # 1) Disk-backed: The Redis master creates a new process that writes the RDB
449 # file on disk. Later the file is transferred by the parent
449 # file on disk. Later the file is transferred by the parent
450 # process to the replicas incrementally.
450 # process to the replicas incrementally.
451 # 2) Diskless: The Redis master creates a new process that directly writes the
451 # 2) Diskless: The Redis master creates a new process that directly writes the
452 # RDB file to replica sockets, without touching the disk at all.
452 # RDB file to replica sockets, without touching the disk at all.
453 #
453 #
454 # With disk-backed replication, while the RDB file is generated, more replicas
454 # With disk-backed replication, while the RDB file is generated, more replicas
455 # can be queued and served with the RDB file as soon as the current child
455 # can be queued and served with the RDB file as soon as the current child
456 # producing the RDB file finishes its work. With diskless replication instead
456 # producing the RDB file finishes its work. With diskless replication instead
457 # once the transfer starts, new replicas arriving will be queued and a new
457 # once the transfer starts, new replicas arriving will be queued and a new
458 # transfer will start when the current one terminates.
458 # transfer will start when the current one terminates.
459 #
459 #
460 # When diskless replication is used, the master waits a configurable amount of
460 # When diskless replication is used, the master waits a configurable amount of
461 # time (in seconds) before starting the transfer in the hope that multiple
461 # time (in seconds) before starting the transfer in the hope that multiple
462 # replicas will arrive and the transfer can be parallelized.
462 # replicas will arrive and the transfer can be parallelized.
463 #
463 #
464 # With slow disks and fast (large bandwidth) networks, diskless replication
464 # With slow disks and fast (large bandwidth) networks, diskless replication
465 # works better.
465 # works better.
466 repl-diskless-sync no
466 repl-diskless-sync no
467
467
468 # When diskless replication is enabled, it is possible to configure the delay
468 # When diskless replication is enabled, it is possible to configure the delay
469 # the server waits in order to spawn the child that transfers the RDB via socket
469 # the server waits in order to spawn the child that transfers the RDB via socket
470 # to the replicas.
470 # to the replicas.
471 #
471 #
472 # This is important since once the transfer starts, it is not possible to serve
472 # This is important since once the transfer starts, it is not possible to serve
473 # new replicas arriving, that will be queued for the next RDB transfer, so the
473 # new replicas arriving, that will be queued for the next RDB transfer, so the
474 # server waits a delay in order to let more replicas arrive.
474 # server waits a delay in order to let more replicas arrive.
475 #
475 #
476 # The delay is specified in seconds, and by default is 5 seconds. To disable
476 # The delay is specified in seconds, and by default is 5 seconds. To disable
477 # it entirely just set it to 0 seconds and the transfer will start ASAP.
477 # it entirely just set it to 0 seconds and the transfer will start ASAP.
478 repl-diskless-sync-delay 5
478 repl-diskless-sync-delay 5
479
479
480 # -----------------------------------------------------------------------------
480 # -----------------------------------------------------------------------------
481 # WARNING: RDB diskless load is experimental. Since in this setup the replica
481 # WARNING: RDB diskless load is experimental. Since in this setup the replica
482 # does not immediately store an RDB on disk, it may cause data loss during
482 # does not immediately store an RDB on disk, it may cause data loss during
483 # failovers. RDB diskless load + Redis modules not handling I/O reads may also
483 # failovers. RDB diskless load + Redis modules not handling I/O reads may also
484 # cause Redis to abort in case of I/O errors during the initial synchronization
484 # cause Redis to abort in case of I/O errors during the initial synchronization
485 # stage with the master. Use only if your do what you are doing.
485 # stage with the master. Use only if your do what you are doing.
486 # -----------------------------------------------------------------------------
486 # -----------------------------------------------------------------------------
487 #
487 #
488 # Replica can load the RDB it reads from the replication link directly from the
488 # Replica can load the RDB it reads from the replication link directly from the
489 # socket, or store the RDB to a file and read that file after it was completely
489 # socket, or store the RDB to a file and read that file after it was completely
490 # received from the master.
490 # received from the master.
491 #
491 #
492 # In many cases the disk is slower than the network, and storing and loading
492 # In many cases the disk is slower than the network, and storing and loading
493 # the RDB file may increase replication time (and even increase the master's
493 # the RDB file may increase replication time (and even increase the master's
494 # Copy on Write memory and salve buffers).
494 # Copy on Write memory and salve buffers).
495 # However, parsing the RDB file directly from the socket may mean that we have
495 # However, parsing the RDB file directly from the socket may mean that we have
496 # to flush the contents of the current database before the full rdb was
496 # to flush the contents of the current database before the full rdb was
497 # received. For this reason we have the following options:
497 # received. For this reason we have the following options:
498 #
498 #
499 # "disabled" - Don't use diskless load (store the rdb file to the disk first)
499 # "disabled" - Don't use diskless load (store the rdb file to the disk first)
500 # "on-empty-db" - Use diskless load only when it is completely safe.
500 # "on-empty-db" - Use diskless load only when it is completely safe.
501 # "swapdb" - Keep a copy of the current db contents in RAM while parsing
501 # "swapdb" - Keep a copy of the current db contents in RAM while parsing
502 # the data directly from the socket. note that this requires
502 # the data directly from the socket. note that this requires
503 # sufficient memory, if you don't have it, you risk an OOM kill.
503 # sufficient memory, if you don't have it, you risk an OOM kill.
504 repl-diskless-load disabled
504 repl-diskless-load disabled
505
505
506 # Replicas send PINGs to server in a predefined interval. It's possible to
506 # Replicas send PINGs to server in a predefined interval. It's possible to
507 # change this interval with the repl_ping_replica_period option. The default
507 # change this interval with the repl_ping_replica_period option. The default
508 # value is 10 seconds.
508 # value is 10 seconds.
509 #
509 #
510 # repl-ping-replica-period 10
510 # repl-ping-replica-period 10
511
511
512 # The following option sets the replication timeout for:
512 # The following option sets the replication timeout for:
513 #
513 #
514 # 1) Bulk transfer I/O during SYNC, from the point of view of replica.
514 # 1) Bulk transfer I/O during SYNC, from the point of view of replica.
515 # 2) Master timeout from the point of view of replicas (data, pings).
515 # 2) Master timeout from the point of view of replicas (data, pings).
516 # 3) Replica timeout from the point of view of masters (REPLCONF ACK pings).
516 # 3) Replica timeout from the point of view of masters (REPLCONF ACK pings).
517 #
517 #
518 # It is important to make sure that this value is greater than the value
518 # It is important to make sure that this value is greater than the value
519 # specified for repl-ping-replica-period otherwise a timeout will be detected
519 # specified for repl-ping-replica-period otherwise a timeout will be detected
520 # every time there is low traffic between the master and the replica. The default
520 # every time there is low traffic between the master and the replica. The default
521 # value is 60 seconds.
521 # value is 60 seconds.
522 #
522 #
523 # repl-timeout 60
523 # repl-timeout 60
524
524
525 # Disable TCP_NODELAY on the replica socket after SYNC?
525 # Disable TCP_NODELAY on the replica socket after SYNC?
526 #
526 #
527 # If you select "yes" Redis will use a smaller number of TCP packets and
527 # If you select "yes" Redis will use a smaller number of TCP packets and
528 # less bandwidth to send data to replicas. But this can add a delay for
528 # less bandwidth to send data to replicas. But this can add a delay for
529 # the data to appear on the replica side, up to 40 milliseconds with
529 # the data to appear on the replica side, up to 40 milliseconds with
530 # Linux kernels using a default configuration.
530 # Linux kernels using a default configuration.
531 #
531 #
532 # If you select "no" the delay for data to appear on the replica side will
532 # If you select "no" the delay for data to appear on the replica side will
533 # be reduced but more bandwidth will be used for replication.
533 # be reduced but more bandwidth will be used for replication.
534 #
534 #
535 # By default we optimize for low latency, but in very high traffic conditions
535 # By default we optimize for low latency, but in very high traffic conditions
536 # or when the master and replicas are many hops away, turning this to "yes" may
536 # or when the master and replicas are many hops away, turning this to "yes" may
537 # be a good idea.
537 # be a good idea.
538 repl-disable-tcp-nodelay no
538 repl-disable-tcp-nodelay no
539
539
540 # Set the replication backlog size. The backlog is a buffer that accumulates
540 # Set the replication backlog size. The backlog is a buffer that accumulates
541 # replica data when replicas are disconnected for some time, so that when a
541 # replica data when replicas are disconnected for some time, so that when a
542 # replica wants to reconnect again, often a full resync is not needed, but a
542 # replica wants to reconnect again, often a full resync is not needed, but a
543 # partial resync is enough, just passing the portion of data the replica
543 # partial resync is enough, just passing the portion of data the replica
544 # missed while disconnected.
544 # missed while disconnected.
545 #
545 #
546 # The bigger the replication backlog, the longer the replica can endure the
546 # The bigger the replication backlog, the longer the replica can endure the
547 # disconnect and later be able to perform a partial resynchronization.
547 # disconnect and later be able to perform a partial resynchronization.
548 #
548 #
549 # The backlog is only allocated if there is at least one replica connected.
549 # The backlog is only allocated if there is at least one replica connected.
550 #
550 #
551 # repl-backlog-size 1mb
551 # repl-backlog-size 1mb
552
552
553 # After a master has no connected replicas for some time, the backlog will be
553 # After a master has no connected replicas for some time, the backlog will be
554 # freed. The following option configures the amount of seconds that need to
554 # freed. The following option configures the amount of seconds that need to
555 # elapse, starting from the time the last replica disconnected, for the backlog
555 # elapse, starting from the time the last replica disconnected, for the backlog
556 # buffer to be freed.
556 # buffer to be freed.
557 #
557 #
558 # Note that replicas never free the backlog for timeout, since they may be
558 # Note that replicas never free the backlog for timeout, since they may be
559 # promoted to masters later, and should be able to correctly "partially
559 # promoted to masters later, and should be able to correctly "partially
560 # resynchronize" with other replicas: hence they should always accumulate backlog.
560 # resynchronize" with other replicas: hence they should always accumulate backlog.
561 #
561 #
562 # A value of 0 means to never release the backlog.
562 # A value of 0 means to never release the backlog.
563 #
563 #
564 # repl-backlog-ttl 3600
564 # repl-backlog-ttl 3600
565
565
566 # The replica priority is an integer number published by Redis in the INFO
566 # The replica priority is an integer number published by Redis in the INFO
567 # output. It is used by Redis Sentinel in order to select a replica to promote
567 # output. It is used by Redis Sentinel in order to select a replica to promote
568 # into a master if the master is no longer working correctly.
568 # into a master if the master is no longer working correctly.
569 #
569 #
570 # A replica with a low priority number is considered better for promotion, so
570 # A replica with a low priority number is considered better for promotion, so
571 # for instance if there are three replicas with priority 10, 100, 25 Sentinel
571 # for instance if there are three replicas with priority 10, 100, 25 Sentinel
572 # will pick the one with priority 10, that is the lowest.
572 # will pick the one with priority 10, that is the lowest.
573 #
573 #
574 # However a special priority of 0 marks the replica as not able to perform the
574 # However a special priority of 0 marks the replica as not able to perform the
575 # role of master, so a replica with priority of 0 will never be selected by
575 # role of master, so a replica with priority of 0 will never be selected by
576 # Redis Sentinel for promotion.
576 # Redis Sentinel for promotion.
577 #
577 #
578 # By default the priority is 100.
578 # By default the priority is 100.
579 replica-priority 100
579 replica-priority 100
580
580
581 # It is possible for a master to stop accepting writes if there are less than
581 # It is possible for a master to stop accepting writes if there are less than
582 # N replicas connected, having a lag less or equal than M seconds.
582 # N replicas connected, having a lag less or equal than M seconds.
583 #
583 #
584 # The N replicas need to be in "online" state.
584 # The N replicas need to be in "online" state.
585 #
585 #
586 # The lag in seconds, that must be <= the specified value, is calculated from
586 # The lag in seconds, that must be <= the specified value, is calculated from
587 # the last ping received from the replica, that is usually sent every second.
587 # the last ping received from the replica, that is usually sent every second.
588 #
588 #
589 # This option does not GUARANTEE that N replicas will accept the write, but
589 # This option does not GUARANTEE that N replicas will accept the write, but
590 # will limit the window of exposure for lost writes in case not enough replicas
590 # will limit the window of exposure for lost writes in case not enough replicas
591 # are available, to the specified number of seconds.
591 # are available, to the specified number of seconds.
592 #
592 #
593 # For example to require at least 3 replicas with a lag <= 10 seconds use:
593 # For example to require at least 3 replicas with a lag <= 10 seconds use:
594 #
594 #
595 # min-replicas-to-write 3
595 # min-replicas-to-write 3
596 # min-replicas-max-lag 10
596 # min-replicas-max-lag 10
597 #
597 #
598 # Setting one or the other to 0 disables the feature.
598 # Setting one or the other to 0 disables the feature.
599 #
599 #
600 # By default min-replicas-to-write is set to 0 (feature disabled) and
600 # By default min-replicas-to-write is set to 0 (feature disabled) and
601 # min-replicas-max-lag is set to 10.
601 # min-replicas-max-lag is set to 10.
602
602
603 # A Redis master is able to list the address and port of the attached
603 # A Redis master is able to list the address and port of the attached
604 # replicas in different ways. For example the "INFO replication" section
604 # replicas in different ways. For example the "INFO replication" section
605 # offers this information, which is used, among other tools, by
605 # offers this information, which is used, among other tools, by
606 # Redis Sentinel in order to discover replica instances.
606 # Redis Sentinel in order to discover replica instances.
607 # Another place where this info is available is in the output of the
607 # Another place where this info is available is in the output of the
608 # "ROLE" command of a master.
608 # "ROLE" command of a master.
609 #
609 #
610 # The listed IP address and port normally reported by a replica is
610 # The listed IP address and port normally reported by a replica is
611 # obtained in the following way:
611 # obtained in the following way:
612 #
612 #
613 # IP: The address is auto detected by checking the peer address
613 # IP: The address is auto detected by checking the peer address
614 # of the socket used by the replica to connect with the master.
614 # of the socket used by the replica to connect with the master.
615 #
615 #
616 # Port: The port is communicated by the replica during the replication
616 # Port: The port is communicated by the replica during the replication
617 # handshake, and is normally the port that the replica is using to
617 # handshake, and is normally the port that the replica is using to
618 # listen for connections.
618 # listen for connections.
619 #
619 #
620 # However when port forwarding or Network Address Translation (NAT) is
620 # However when port forwarding or Network Address Translation (NAT) is
621 # used, the replica may actually be reachable via different IP and port
621 # used, the replica may actually be reachable via different IP and port
622 # pairs. The following two options can be used by a replica in order to
622 # pairs. The following two options can be used by a replica in order to
623 # report to its master a specific set of IP and port, so that both INFO
623 # report to its master a specific set of IP and port, so that both INFO
624 # and ROLE will report those values.
624 # and ROLE will report those values.
625 #
625 #
626 # There is no need to use both the options if you need to override just
626 # There is no need to use both the options if you need to override just
627 # the port or the IP address.
627 # the port or the IP address.
628 #
628 #
629 # replica-announce-ip 5.5.5.5
629 # replica-announce-ip 5.5.5.5
630 # replica-announce-port 1234
630 # replica-announce-port 1234
631
631
632 ############################### KEYS TRACKING #################################
632 ############################### KEYS TRACKING #################################
633
633
634 # Redis implements server assisted support for client side caching of values.
634 # Redis implements server assisted support for client side caching of values.
635 # This is implemented using an invalidation table that remembers, using
635 # This is implemented using an invalidation table that remembers, using
636 # 16 millions of slots, what clients may have certain subsets of keys. In turn
636 # 16 millions of slots, what clients may have certain subsets of keys. In turn
637 # this is used in order to send invalidation messages to clients. Please
637 # this is used in order to send invalidation messages to clients. Please
638 # check this page to understand more about the feature:
638 # check this page to understand more about the feature:
639 #
639 #
640 # https://redis.io/topics/client-side-caching
640 # https://redis.io/topics/client-side-caching
641 #
641 #
642 # When tracking is enabled for a client, all the read only queries are assumed
642 # When tracking is enabled for a client, all the read only queries are assumed
643 # to be cached: this will force Redis to store information in the invalidation
643 # to be cached: this will force Redis to store information in the invalidation
644 # table. When keys are modified, such information is flushed away, and
644 # table. When keys are modified, such information is flushed away, and
645 # invalidation messages are sent to the clients. However if the workload is
645 # invalidation messages are sent to the clients. However if the workload is
646 # heavily dominated by reads, Redis could use more and more memory in order
646 # heavily dominated by reads, Redis could use more and more memory in order
647 # to track the keys fetched by many clients.
647 # to track the keys fetched by many clients.
648 #
648 #
649 # For this reason it is possible to configure a maximum fill value for the
649 # For this reason it is possible to configure a maximum fill value for the
650 # invalidation table. By default it is set to 1M of keys, and once this limit
650 # invalidation table. By default it is set to 1M of keys, and once this limit
651 # is reached, Redis will start to evict keys in the invalidation table
651 # is reached, Redis will start to evict keys in the invalidation table
652 # even if they were not modified, just to reclaim memory: this will in turn
652 # even if they were not modified, just to reclaim memory: this will in turn
653 # force the clients to invalidate the cached values. Basically the table
653 # force the clients to invalidate the cached values. Basically the table
654 # maximum size is a trade off between the memory you want to spend server
654 # maximum size is a trade off between the memory you want to spend server
655 # side to track information about who cached what, and the ability of clients
655 # side to track information about who cached what, and the ability of clients
656 # to retain cached objects in memory.
656 # to retain cached objects in memory.
657 #
657 #
658 # If you set the value to 0, it means there are no limits, and Redis will
658 # If you set the value to 0, it means there are no limits, and Redis will
659 # retain as many keys as needed in the invalidation table.
659 # retain as many keys as needed in the invalidation table.
660 # In the "stats" INFO section, you can find information about the number of
660 # In the "stats" INFO section, you can find information about the number of
661 # keys in the invalidation table at every given moment.
661 # keys in the invalidation table at every given moment.
662 #
662 #
663 # Note: when key tracking is used in broadcasting mode, no memory is used
663 # Note: when key tracking is used in broadcasting mode, no memory is used
664 # in the server side so this setting is useless.
664 # in the server side so this setting is useless.
665 #
665 #
666 # tracking-table-max-keys 1000000
666 # tracking-table-max-keys 1000000
667
667
668 ################################## SECURITY ###################################
668 ################################## SECURITY ###################################
669
669
670 # Warning: since Redis is pretty fast, an outside user can try up to
670 # Warning: since Redis is pretty fast, an outside user can try up to
671 # 1 million passwords per second against a modern box. This means that you
671 # 1 million passwords per second against a modern box. This means that you
672 # should use very strong passwords, otherwise they will be very easy to break.
672 # should use very strong passwords, otherwise they will be very easy to break.
673 # Note that because the password is really a shared secret between the client
673 # Note that because the password is really a shared secret between the client
674 # and the server, and should not be memorized by any human, the password
674 # and the server, and should not be memorized by any human, the password
675 # can be easily a long string from /dev/urandom or whatever, so by using a
675 # can be easily a long string from /dev/urandom or whatever, so by using a
676 # long and unguessable password no brute force attack will be possible.
676 # long and unguessable password no brute force attack will be possible.
677
677
678 # Redis ACL users are defined in the following format:
678 # Redis ACL users are defined in the following format:
679 #
679 #
680 # user <username> ... acl rules ...
680 # user <username> ... acl rules ...
681 #
681 #
682 # For example:
682 # For example:
683 #
683 #
684 # user worker +@list +@connection ~jobs:* on >ffa9203c493aa99
684 # user worker +@list +@connection ~jobs:* on >ffa9203c493aa99
685 #
685 #
686 # The special username "default" is used for new connections. If this user
686 # The special username "default" is used for new connections. If this user
687 # has the "nopass" rule, then new connections will be immediately authenticated
687 # has the "nopass" rule, then new connections will be immediately authenticated
688 # as the "default" user without the need of any password provided via the
688 # as the "default" user without the need of any password provided via the
689 # AUTH command. Otherwise if the "default" user is not flagged with "nopass"
689 # AUTH command. Otherwise if the "default" user is not flagged with "nopass"
690 # the connections will start in not authenticated state, and will require
690 # the connections will start in not authenticated state, and will require
691 # AUTH (or the HELLO command AUTH option) in order to be authenticated and
691 # AUTH (or the HELLO command AUTH option) in order to be authenticated and
692 # start to work.
692 # start to work.
693 #
693 #
694 # The ACL rules that describe what a user can do are the following:
694 # The ACL rules that describe what a user can do are the following:
695 #
695 #
696 # on Enable the user: it is possible to authenticate as this user.
696 # on Enable the user: it is possible to authenticate as this user.
697 # off Disable the user: it's no longer possible to authenticate
697 # off Disable the user: it's no longer possible to authenticate
698 # with this user, however the already authenticated connections
698 # with this user, however the already authenticated connections
699 # will still work.
699 # will still work.
700 # +<command> Allow the execution of that command
700 # +<command> Allow the execution of that command
701 # -<command> Disallow the execution of that command
701 # -<command> Disallow the execution of that command
702 # +@<category> Allow the execution of all the commands in such category
702 # +@<category> Allow the execution of all the commands in such category
703 # with valid categories are like @admin, @set, @sortedset, ...
703 # with valid categories are like @admin, @set, @sortedset, ...
704 # and so forth, see the full list in the server.c file where
704 # and so forth, see the full list in the server.c file where
705 # the Redis command table is described and defined.
705 # the Redis command table is described and defined.
706 # The special category @all means all the commands, but currently
706 # The special category @all means all the commands, but currently
707 # present in the server, and that will be loaded in the future
707 # present in the server, and that will be loaded in the future
708 # via modules.
708 # via modules.
709 # +<command>|subcommand Allow a specific subcommand of an otherwise
709 # +<command>|subcommand Allow a specific subcommand of an otherwise
710 # disabled command. Note that this form is not
710 # disabled command. Note that this form is not
711 # allowed as negative like -DEBUG|SEGFAULT, but
711 # allowed as negative like -DEBUG|SEGFAULT, but
712 # only additive starting with "+".
712 # only additive starting with "+".
713 # allcommands Alias for +@all. Note that it implies the ability to execute
713 # allcommands Alias for +@all. Note that it implies the ability to execute
714 # all the future commands loaded via the modules system.
714 # all the future commands loaded via the modules system.
715 # nocommands Alias for -@all.
715 # nocommands Alias for -@all.
716 # ~<pattern> Add a pattern of keys that can be mentioned as part of
716 # ~<pattern> Add a pattern of keys that can be mentioned as part of
717 # commands. For instance ~* allows all the keys. The pattern
717 # commands. For instance ~* allows all the keys. The pattern
718 # is a glob-style pattern like the one of KEYS.
718 # is a glob-style pattern like the one of KEYS.
719 # It is possible to specify multiple patterns.
719 # It is possible to specify multiple patterns.
720 # allkeys Alias for ~*
720 # allkeys Alias for ~*
721 # resetkeys Flush the list of allowed keys patterns.
721 # resetkeys Flush the list of allowed keys patterns.
722 # ><password> Add this password to the list of valid password for the user.
722 # ><password> Add this password to the list of valid password for the user.
723 # For example >mypass will add "mypass" to the list.
723 # For example >mypass will add "mypass" to the list.
724 # This directive clears the "nopass" flag (see later).
724 # This directive clears the "nopass" flag (see later).
725 # <<password> Remove this password from the list of valid passwords.
725 # <<password> Remove this password from the list of valid passwords.
726 # nopass All the set passwords of the user are removed, and the user
726 # nopass All the set passwords of the user are removed, and the user
727 # is flagged as requiring no password: it means that every
727 # is flagged as requiring no password: it means that every
728 # password will work against this user. If this directive is
728 # password will work against this user. If this directive is
729 # used for the default user, every new connection will be
729 # used for the default user, every new connection will be
730 # immediately authenticated with the default user without
730 # immediately authenticated with the default user without
731 # any explicit AUTH command required. Note that the "resetpass"
731 # any explicit AUTH command required. Note that the "resetpass"
732 # directive will clear this condition.
732 # directive will clear this condition.
733 # resetpass Flush the list of allowed passwords. Moreover removes the
733 # resetpass Flush the list of allowed passwords. Moreover removes the
734 # "nopass" status. After "resetpass" the user has no associated
734 # "nopass" status. After "resetpass" the user has no associated
735 # passwords and there is no way to authenticate without adding
735 # passwords and there is no way to authenticate without adding
736 # some password (or setting it as "nopass" later).
736 # some password (or setting it as "nopass" later).
737 # reset Performs the following actions: resetpass, resetkeys, off,
737 # reset Performs the following actions: resetpass, resetkeys, off,
738 # -@all. The user returns to the same state it has immediately
738 # -@all. The user returns to the same state it has immediately
739 # after its creation.
739 # after its creation.
740 #
740 #
741 # ACL rules can be specified in any order: for instance you can start with
741 # ACL rules can be specified in any order: for instance you can start with
742 # passwords, then flags, or key patterns. However note that the additive
742 # passwords, then flags, or key patterns. However note that the additive
743 # and subtractive rules will CHANGE MEANING depending on the ordering.
743 # and subtractive rules will CHANGE MEANING depending on the ordering.
744 # For instance see the following example:
744 # For instance see the following example:
745 #
745 #
746 # user alice on +@all -DEBUG ~* >somepassword
746 # user alice on +@all -DEBUG ~* >somepassword
747 #
747 #
748 # This will allow "alice" to use all the commands with the exception of the
748 # This will allow "alice" to use all the commands with the exception of the
749 # DEBUG command, since +@all added all the commands to the set of the commands
749 # DEBUG command, since +@all added all the commands to the set of the commands
750 # alice can use, and later DEBUG was removed. However if we invert the order
750 # alice can use, and later DEBUG was removed. However if we invert the order
751 # of two ACL rules the result will be different:
751 # of two ACL rules the result will be different:
752 #
752 #
753 # user alice on -DEBUG +@all ~* >somepassword
753 # user alice on -DEBUG +@all ~* >somepassword
754 #
754 #
755 # Now DEBUG was removed when alice had yet no commands in the set of allowed
755 # Now DEBUG was removed when alice had yet no commands in the set of allowed
756 # commands, later all the commands are added, so the user will be able to
756 # commands, later all the commands are added, so the user will be able to
757 # execute everything.
757 # execute everything.
758 #
758 #
759 # Basically ACL rules are processed left-to-right.
759 # Basically ACL rules are processed left-to-right.
760 #
760 #
761 # For more information about ACL configuration please refer to
761 # For more information about ACL configuration please refer to
762 # the Redis web site at https://redis.io/topics/acl
762 # the Redis web site at https://redis.io/topics/acl
763
763
764 # ACL LOG
764 # ACL LOG
765 #
765 #
766 # The ACL Log tracks failed commands and authentication events associated
766 # The ACL Log tracks failed commands and authentication events associated
767 # with ACLs. The ACL Log is useful to troubleshoot failed commands blocked
767 # with ACLs. The ACL Log is useful to troubleshoot failed commands blocked
768 # by ACLs. The ACL Log is stored in memory. You can reclaim memory with
768 # by ACLs. The ACL Log is stored in memory. You can reclaim memory with
769 # ACL LOG RESET. Define the maximum entry length of the ACL Log below.
769 # ACL LOG RESET. Define the maximum entry length of the ACL Log below.
770 acllog-max-len 128
770 acllog-max-len 128
771
771
772 # Using an external ACL file
772 # Using an external ACL file
773 #
773 #
774 # Instead of configuring users here in this file, it is possible to use
774 # Instead of configuring users here in this file, it is possible to use
775 # a stand-alone file just listing users. The two methods cannot be mixed:
775 # a stand-alone file just listing users. The two methods cannot be mixed:
776 # if you configure users here and at the same time you activate the external
776 # if you configure users here and at the same time you activate the external
777 # ACL file, the server will refuse to start.
777 # ACL file, the server will refuse to start.
778 #
778 #
779 # The format of the external ACL user file is exactly the same as the
779 # The format of the external ACL user file is exactly the same as the
780 # format that is used inside redis.conf to describe users.
780 # format that is used inside redis.conf to describe users.
781 #
781 #
782 # aclfile /etc/redis/users.acl
782 # aclfile /etc/redis/users.acl
783
783
784 # IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatibility
784 # IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatibility
785 # layer on top of the new ACL system. The option effect will be just setting
785 # layer on top of the new ACL system. The option effect will be just setting
786 # the password for the default user. Clients will still authenticate using
786 # the password for the default user. Clients will still authenticate using
787 # AUTH <password> as usually, or more explicitly with AUTH default <password>
787 # AUTH <password> as usually, or more explicitly with AUTH default <password>
788 # if they follow the new protocol: both will work.
788 # if they follow the new protocol: both will work.
789 #
789 #
790 # requirepass foobared
790 # requirepass foobared
791
791
792 # Command renaming (DEPRECATED).
792 # Command renaming (DEPRECATED).
793 #
793 #
794 # ------------------------------------------------------------------------
794 # ------------------------------------------------------------------------
795 # WARNING: avoid using this option if possible. Instead use ACLs to remove
795 # WARNING: avoid using this option if possible. Instead use ACLs to remove
796 # commands from the default user, and put them only in some admin user you
796 # commands from the default user, and put them only in some admin user you
797 # create for administrative purposes.
797 # create for administrative purposes.
798 # ------------------------------------------------------------------------
798 # ------------------------------------------------------------------------
799 #
799 #
800 # It is possible to change the name of dangerous commands in a shared
800 # It is possible to change the name of dangerous commands in a shared
801 # environment. For instance the CONFIG command may be renamed into something
801 # environment. For instance the CONFIG command may be renamed into something
802 # hard to guess so that it will still be available for internal-use tools
802 # hard to guess so that it will still be available for internal-use tools
803 # but not available for general clients.
803 # but not available for general clients.
804 #
804 #
805 # Example:
805 # Example:
806 #
806 #
807 # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
807 # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
808 #
808 #
809 # It is also possible to completely kill a command by renaming it into
809 # It is also possible to completely kill a command by renaming it into
810 # an empty string:
810 # an empty string:
811 #
811 #
812 # rename-command CONFIG ""
812 # rename-command CONFIG ""
813 #
813 #
814 # Please note that changing the name of commands that are logged into the
814 # Please note that changing the name of commands that are logged into the
815 # AOF file or transmitted to replicas may cause problems.
815 # AOF file or transmitted to replicas may cause problems.
816
816
817 ################################### CLIENTS ####################################
817 ################################### CLIENTS ####################################
818
818
819 # Set the max number of connected clients at the same time. By default
819 # Set the max number of connected clients at the same time. By default
820 # this limit is set to 10000 clients, however if the Redis server is not
820 # this limit is set to 10000 clients, however if the Redis server is not
821 # able to configure the process file limit to allow for the specified limit
821 # able to configure the process file limit to allow for the specified limit
822 # the max number of allowed clients is set to the current file limit
822 # the max number of allowed clients is set to the current file limit
823 # minus 32 (as Redis reserves a few file descriptors for internal uses).
823 # minus 32 (as Redis reserves a few file descriptors for internal uses).
824 #
824 #
825 # Once the limit is reached Redis will close all the new connections sending
825 # Once the limit is reached Redis will close all the new connections sending
826 # an error 'max number of clients reached'.
826 # an error 'max number of clients reached'.
827 #
827 #
828 # IMPORTANT: When Redis Cluster is used, the max number of connections is also
828 # IMPORTANT: When Redis Cluster is used, the max number of connections is also
829 # shared with the cluster bus: every node in the cluster will use two
829 # shared with the cluster bus: every node in the cluster will use two
830 # connections, one incoming and another outgoing. It is important to size the
830 # connections, one incoming and another outgoing. It is important to size the
831 # limit accordingly in case of very large clusters.
831 # limit accordingly in case of very large clusters.
832 #
832 #
833 # maxclients 10000
833 # maxclients 10000
834
834
835 ############################## MEMORY MANAGEMENT ################################
835 ############################## MEMORY MANAGEMENT ################################
836
836
837 # Set a memory usage limit to the specified amount of bytes.
837 # Set a memory usage limit to the specified amount of bytes.
838 # When the memory limit is reached Redis will try to remove keys
838 # When the memory limit is reached Redis will try to remove keys
839 # according to the eviction policy selected (see maxmemory-policy).
839 # according to the eviction policy selected (see maxmemory-policy).
840 #
840 #
841 # If Redis can't remove keys according to the policy, or if the policy is
841 # If Redis can't remove keys according to the policy, or if the policy is
842 # set to 'noeviction', Redis will start to reply with errors to commands
842 # set to 'noeviction', Redis will start to reply with errors to commands
843 # that would use more memory, like SET, LPUSH, and so on, and will continue
843 # that would use more memory, like SET, LPUSH, and so on, and will continue
844 # to reply to read-only commands like GET.
844 # to reply to read-only commands like GET.
845 #
845 #
846 # This option is usually useful when using Redis as an LRU or LFU cache, or to
846 # This option is usually useful when using Redis as an LRU or LFU cache, or to
847 # set a hard memory limit for an instance (using the 'noeviction' policy).
847 # set a hard memory limit for an instance (using the 'noeviction' policy).
848 #
848 #
849 # WARNING: If you have replicas attached to an instance with maxmemory on,
849 # WARNING: If you have replicas attached to an instance with maxmemory on,
850 # the size of the output buffers needed to feed the replicas are subtracted
850 # the size of the output buffers needed to feed the replicas are subtracted
851 # from the used memory count, so that network problems / resyncs will
851 # from the used memory count, so that network problems / resyncs will
852 # not trigger a loop where keys are evicted, and in turn the output
852 # not trigger a loop where keys are evicted, and in turn the output
853 # buffer of replicas is full with DELs of keys evicted triggering the deletion
853 # buffer of replicas is full with DELs of keys evicted triggering the deletion
854 # of more keys, and so forth until the database is completely emptied.
854 # of more keys, and so forth until the database is completely emptied.
855 #
855 #
856 # In short... if you have replicas attached it is suggested that you set a lower
856 # In short... if you have replicas attached it is suggested that you set a lower
857 # limit for maxmemory so that there is some free RAM on the system for replica
857 # limit for maxmemory so that there is some free RAM on the system for replica
858 # output buffers (but this is not needed if the policy is 'noeviction').
858 # output buffers (but this is not needed if the policy is 'noeviction').
859 #
859 #
860 maxmemory 8192mb
860 #maxmemory 8192mb
861
861
862 # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
862 # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
863 # is reached. You can select one from the following behaviors:
863 # is reached. You can select one from the following behaviors:
864 #
864 #
865 # volatile-lru -> Evict using approximated LRU, only keys with an expire set.
865 # volatile-lru -> Evict using approximated LRU, only keys with an expire set.
866 # allkeys-lru -> Evict any key using approximated LRU.
866 # allkeys-lru -> Evict any key using approximated LRU.
867 # volatile-lfu -> Evict using approximated LFU, only keys with an expire set.
867 # volatile-lfu -> Evict using approximated LFU, only keys with an expire set.
868 # allkeys-lfu -> Evict any key using approximated LFU.
868 # allkeys-lfu -> Evict any key using approximated LFU.
869 # volatile-random -> Remove a random key having an expire set.
869 # volatile-random -> Remove a random key having an expire set.
870 # allkeys-random -> Remove a random key, any key.
870 # allkeys-random -> Remove a random key, any key.
871 # volatile-ttl -> Remove the key with the nearest expire time (minor TTL)
871 # volatile-ttl -> Remove the key with the nearest expire time (minor TTL)
872 # noeviction -> Don't evict anything, just return an error on write operations.
872 # noeviction -> Don't evict anything, just return an error on write operations.
873 #
873 #
874 # LRU means Least Recently Used
874 # LRU means Least Recently Used
875 # LFU means Least Frequently Used
875 # LFU means Least Frequently Used
876 #
876 #
877 # Both LRU, LFU and volatile-ttl are implemented using approximated
877 # Both LRU, LFU and volatile-ttl are implemented using approximated
878 # randomized algorithms.
878 # randomized algorithms.
879 #
879 #
880 # Note: with any of the above policies, Redis will return an error on write
880 # Note: with any of the above policies, Redis will return an error on write
881 # operations, when there are no suitable keys for eviction.
881 # operations, when there are no suitable keys for eviction.
882 #
882 #
883 # At the date of writing these commands are: set setnx setex append
883 # At the date of writing these commands are: set setnx setex append
884 # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
884 # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
885 # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
885 # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
886 # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
886 # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
887 # getset mset msetnx exec sort
887 # getset mset msetnx exec sort
888 #
888 #
889 # The default is:
889 # The default is:
890 #
890 #
891 maxmemory-policy volatile-lru
891 #maxmemory-policy volatile-lru
892
892
893 # LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated
893 # LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated
894 # algorithms (in order to save memory), so you can tune it for speed or
894 # algorithms (in order to save memory), so you can tune it for speed or
895 # accuracy. By default Redis will check five keys and pick the one that was
895 # accuracy. By default Redis will check five keys and pick the one that was
896 # used least recently, you can change the sample size using the following
896 # used least recently, you can change the sample size using the following
897 # configuration directive.
897 # configuration directive.
898 #
898 #
899 # The default of 5 produces good enough results. 10 Approximates very closely
899 # The default of 5 produces good enough results. 10 Approximates very closely
900 # true LRU but costs more CPU. 3 is faster but not very accurate.
900 # true LRU but costs more CPU. 3 is faster but not very accurate.
901 #
901 #
902 maxmemory-samples 5
902 maxmemory-samples 10
903
903
904 # Starting from Redis 5, by default a replica will ignore its maxmemory setting
904 # Starting from Redis 5, by default a replica will ignore its maxmemory setting
905 # (unless it is promoted to master after a failover or manually). It means
905 # (unless it is promoted to master after a failover or manually). It means
906 # that the eviction of keys will be just handled by the master, sending the
906 # that the eviction of keys will be just handled by the master, sending the
907 # DEL commands to the replica as keys evict in the master side.
907 # DEL commands to the replica as keys evict in the master side.
908 #
908 #
909 # This behavior ensures that masters and replicas stay consistent, and is usually
909 # This behavior ensures that masters and replicas stay consistent, and is usually
910 # what you want, however if your replica is writable, or you want the replica
910 # what you want, however if your replica is writable, or you want the replica
911 # to have a different memory setting, and you are sure all the writes performed
911 # to have a different memory setting, and you are sure all the writes performed
912 # to the replica are idempotent, then you may change this default (but be sure
912 # to the replica are idempotent, then you may change this default (but be sure
913 # to understand what you are doing).
913 # to understand what you are doing).
914 #
914 #
915 # Note that since the replica by default does not evict, it may end using more
915 # Note that since the replica by default does not evict, it may end using more
916 # memory than the one set via maxmemory (there are certain buffers that may
916 # memory than the one set via maxmemory (there are certain buffers that may
917 # be larger on the replica, or data structures may sometimes take more memory
917 # be larger on the replica, or data structures may sometimes take more memory
918 # and so forth). So make sure you monitor your replicas and make sure they
918 # and so forth). So make sure you monitor your replicas and make sure they
919 # have enough memory to never hit a real out-of-memory condition before the
919 # have enough memory to never hit a real out-of-memory condition before the
920 # master hits the configured maxmemory setting.
920 # master hits the configured maxmemory setting.
921 #
921 #
922 # replica-ignore-maxmemory yes
922 # replica-ignore-maxmemory yes
923
923
924 # Redis reclaims expired keys in two ways: upon access when those keys are
924 # Redis reclaims expired keys in two ways: upon access when those keys are
925 # found to be expired, and also in background, in what is called the
925 # found to be expired, and also in background, in what is called the
926 # "active expire key". The key space is slowly and interactively scanned
926 # "active expire key". The key space is slowly and interactively scanned
927 # looking for expired keys to reclaim, so that it is possible to free memory
927 # looking for expired keys to reclaim, so that it is possible to free memory
928 # of keys that are expired and will never be accessed again in a short time.
928 # of keys that are expired and will never be accessed again in a short time.
929 #
929 #
930 # The default effort of the expire cycle will try to avoid having more than
930 # The default effort of the expire cycle will try to avoid having more than
931 # ten percent of expired keys still in memory, and will try to avoid consuming
931 # ten percent of expired keys still in memory, and will try to avoid consuming
932 # more than 25% of total memory and to add latency to the system. However
932 # more than 25% of total memory and to add latency to the system. However
933 # it is possible to increase the expire "effort" that is normally set to
933 # it is possible to increase the expire "effort" that is normally set to
934 # "1", to a greater value, up to the value "10". At its maximum value the
934 # "1", to a greater value, up to the value "10". At its maximum value the
935 # system will use more CPU, longer cycles (and technically may introduce
935 # system will use more CPU, longer cycles (and technically may introduce
936 # more latency), and will tolerate less already expired keys still present
936 # more latency), and will tolerate less already expired keys still present
937 # in the system. It's a tradeoff between memory, CPU and latency.
937 # in the system. It's a tradeoff between memory, CPU and latency.
938 #
938 #
939 # active-expire-effort 1
939 # active-expire-effort 1
940
940
941 ############################# LAZY FREEING ####################################
941 ############################# LAZY FREEING ####################################
942
942
943 # Redis has two primitives to delete keys. One is called DEL and is a blocking
943 # Redis has two primitives to delete keys. One is called DEL and is a blocking
944 # deletion of the object. It means that the server stops processing new commands
944 # deletion of the object. It means that the server stops processing new commands
945 # in order to reclaim all the memory associated with an object in a synchronous
945 # in order to reclaim all the memory associated with an object in a synchronous
946 # way. If the key deleted is associated with a small object, the time needed
946 # way. If the key deleted is associated with a small object, the time needed
947 # in order to execute the DEL command is very small and comparable to most other
947 # in order to execute the DEL command is very small and comparable to most other
948 # O(1) or O(log_N) commands in Redis. However if the key is associated with an
948 # O(1) or O(log_N) commands in Redis. However if the key is associated with an
949 # aggregated value containing millions of elements, the server can block for
949 # aggregated value containing millions of elements, the server can block for
950 # a long time (even seconds) in order to complete the operation.
950 # a long time (even seconds) in order to complete the operation.
951 #
951 #
952 # For the above reasons Redis also offers non blocking deletion primitives
952 # For the above reasons Redis also offers non blocking deletion primitives
953 # such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and
953 # such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and
954 # FLUSHDB commands, in order to reclaim memory in background. Those commands
954 # FLUSHDB commands, in order to reclaim memory in background. Those commands
955 # are executed in constant time. Another thread will incrementally free the
955 # are executed in constant time. Another thread will incrementally free the
956 # object in the background as fast as possible.
956 # object in the background as fast as possible.
957 #
957 #
958 # DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled.
958 # DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled.
959 # It's up to the design of the application to understand when it is a good
959 # It's up to the design of the application to understand when it is a good
960 # idea to use one or the other. However the Redis server sometimes has to
960 # idea to use one or the other. However the Redis server sometimes has to
961 # delete keys or flush the whole database as a side effect of other operations.
961 # delete keys or flush the whole database as a side effect of other operations.
962 # Specifically Redis deletes objects independently of a user call in the
962 # Specifically Redis deletes objects independently of a user call in the
963 # following scenarios:
963 # following scenarios:
964 #
964 #
965 # 1) On eviction, because of the maxmemory and maxmemory policy configurations,
965 # 1) On eviction, because of the maxmemory and maxmemory policy configurations,
966 # in order to make room for new data, without going over the specified
966 # in order to make room for new data, without going over the specified
967 # memory limit.
967 # memory limit.
968 # 2) Because of expire: when a key with an associated time to live (see the
968 # 2) Because of expire: when a key with an associated time to live (see the
969 # EXPIRE command) must be deleted from memory.
969 # EXPIRE command) must be deleted from memory.
970 # 3) Because of a side effect of a command that stores data on a key that may
970 # 3) Because of a side effect of a command that stores data on a key that may
971 # already exist. For example the RENAME command may delete the old key
971 # already exist. For example the RENAME command may delete the old key
972 # content when it is replaced with another one. Similarly SUNIONSTORE
972 # content when it is replaced with another one. Similarly SUNIONSTORE
973 # or SORT with STORE option may delete existing keys. The SET command
973 # or SORT with STORE option may delete existing keys. The SET command
974 # itself removes any old content of the specified key in order to replace
974 # itself removes any old content of the specified key in order to replace
975 # it with the specified string.
975 # it with the specified string.
976 # 4) During replication, when a replica performs a full resynchronization with
976 # 4) During replication, when a replica performs a full resynchronization with
977 # its master, the content of the whole database is removed in order to
977 # its master, the content of the whole database is removed in order to
978 # load the RDB file just transferred.
978 # load the RDB file just transferred.
979 #
979 #
980 # In all the above cases the default is to delete objects in a blocking way,
980 # In all the above cases the default is to delete objects in a blocking way,
981 # like if DEL was called. However you can configure each case specifically
981 # like if DEL was called. However you can configure each case specifically
982 # in order to instead release memory in a non-blocking way like if UNLINK
982 # in order to instead release memory in a non-blocking way like if UNLINK
983 # was called, using the following configuration directives.
983 # was called, using the following configuration directives.
984
984
985 lazyfree-lazy-eviction no
985 lazyfree-lazy-eviction no
986 lazyfree-lazy-expire no
986 lazyfree-lazy-expire no
987 lazyfree-lazy-server-del no
987 lazyfree-lazy-server-del no
988 replica-lazy-flush no
988 replica-lazy-flush no
989
989
990 # It is also possible, for the case when to replace the user code DEL calls
990 # It is also possible, for the case when to replace the user code DEL calls
991 # with UNLINK calls is not easy, to modify the default behavior of the DEL
991 # with UNLINK calls is not easy, to modify the default behavior of the DEL
992 # command to act exactly like UNLINK, using the following configuration
992 # command to act exactly like UNLINK, using the following configuration
993 # directive:
993 # directive:
994
994
995 lazyfree-lazy-user-del no
995 lazyfree-lazy-user-del no
996
996
997 ################################ THREADED I/O #################################
997 ################################ THREADED I/O #################################
998
998
999 # Redis is mostly single threaded, however there are certain threaded
999 # Redis is mostly single threaded, however there are certain threaded
1000 # operations such as UNLINK, slow I/O accesses and other things that are
1000 # operations such as UNLINK, slow I/O accesses and other things that are
1001 # performed on side threads.
1001 # performed on side threads.
1002 #
1002 #
1003 # Now it is also possible to handle Redis clients socket reads and writes
1003 # Now it is also possible to handle Redis clients socket reads and writes
1004 # in different I/O threads. Since especially writing is so slow, normally
1004 # in different I/O threads. Since especially writing is so slow, normally
1005 # Redis users use pipelining in order to speed up the Redis performances per
1005 # Redis users use pipelining in order to speed up the Redis performances per
1006 # core, and spawn multiple instances in order to scale more. Using I/O
1006 # core, and spawn multiple instances in order to scale more. Using I/O
1007 # threads it is possible to easily speedup two times Redis without resorting
1007 # threads it is possible to easily speedup two times Redis without resorting
1008 # to pipelining nor sharding of the instance.
1008 # to pipelining nor sharding of the instance.
1009 #
1009 #
1010 # By default threading is disabled, we suggest enabling it only in machines
1010 # By default threading is disabled, we suggest enabling it only in machines
1011 # that have at least 4 or more cores, leaving at least one spare core.
1011 # that have at least 4 or more cores, leaving at least one spare core.
1012 # Using more than 8 threads is unlikely to help much. We also recommend using
1012 # Using more than 8 threads is unlikely to help much. We also recommend using
1013 # threaded I/O only if you actually have performance problems, with Redis
1013 # threaded I/O only if you actually have performance problems, with Redis
1014 # instances being able to use a quite big percentage of CPU time, otherwise
1014 # instances being able to use a quite big percentage of CPU time, otherwise
1015 # there is no point in using this feature.
1015 # there is no point in using this feature.
1016 #
1016 #
1017 # So for instance if you have a four cores boxes, try to use 2 or 3 I/O
1017 # So for instance if you have a four cores boxes, try to use 2 or 3 I/O
1018 # threads, if you have a 8 cores, try to use 6 threads. In order to
1018 # threads, if you have a 8 cores, try to use 6 threads. In order to
1019 # enable I/O threads use the following configuration directive:
1019 # enable I/O threads use the following configuration directive:
1020 #
1020 #
1021 # io-threads 4
1021 # io-threads 4
1022 #
1022 #
1023 # Setting io-threads to 1 will just use the main thread as usual.
1023 # Setting io-threads to 1 will just use the main thread as usual.
1024 # When I/O threads are enabled, we only use threads for writes, that is
1024 # When I/O threads are enabled, we only use threads for writes, that is
1025 # to thread the write(2) syscall and transfer the client buffers to the
1025 # to thread the write(2) syscall and transfer the client buffers to the
1026 # socket. However it is also possible to enable threading of reads and
1026 # socket. However it is also possible to enable threading of reads and
1027 # protocol parsing using the following configuration directive, by setting
1027 # protocol parsing using the following configuration directive, by setting
1028 # it to yes:
1028 # it to yes:
1029 #
1029 #
1030 # io-threads-do-reads no
1030 # io-threads-do-reads no
1031 #
1031 #
1032 # Usually threading reads doesn't help much.
1032 # Usually threading reads doesn't help much.
1033 #
1033 #
1034 # NOTE 1: This configuration directive cannot be changed at runtime via
1034 # NOTE 1: This configuration directive cannot be changed at runtime via
1035 # CONFIG SET. Aso this feature currently does not work when SSL is
1035 # CONFIG SET. Aso this feature currently does not work when SSL is
1036 # enabled.
1036 # enabled.
1037 #
1037 #
1038 # NOTE 2: If you want to test the Redis speedup using redis-benchmark, make
1038 # NOTE 2: If you want to test the Redis speedup using redis-benchmark, make
1039 # sure you also run the benchmark itself in threaded mode, using the
1039 # sure you also run the benchmark itself in threaded mode, using the
1040 # --threads option to match the number of Redis threads, otherwise you'll not
1040 # --threads option to match the number of Redis threads, otherwise you'll not
1041 # be able to notice the improvements.
1041 # be able to notice the improvements.
1042
1042
1043 ############################ KERNEL OOM CONTROL ##############################
1043 ############################ KERNEL OOM CONTROL ##############################
1044
1044
1045 # On Linux, it is possible to hint the kernel OOM killer on what processes
1045 # On Linux, it is possible to hint the kernel OOM killer on what processes
1046 # should be killed first when out of memory.
1046 # should be killed first when out of memory.
1047 #
1047 #
1048 # Enabling this feature makes Redis actively control the oom_score_adj value
1048 # Enabling this feature makes Redis actively control the oom_score_adj value
1049 # for all its processes, depending on their role. The default scores will
1049 # for all its processes, depending on their role. The default scores will
1050 # attempt to have background child processes killed before all others, and
1050 # attempt to have background child processes killed before all others, and
1051 # replicas killed before masters.
1051 # replicas killed before masters.
1052
1052
1053 oom-score-adj no
1053 oom-score-adj no
1054
1054
1055 # When oom-score-adj is used, this directive controls the specific values used
1055 # When oom-score-adj is used, this directive controls the specific values used
1056 # for master, replica and background child processes. Values range -1000 to
1056 # for master, replica and background child processes. Values range -1000 to
1057 # 1000 (higher means more likely to be killed).
1057 # 1000 (higher means more likely to be killed).
1058 #
1058 #
1059 # Unprivileged processes (not root, and without CAP_SYS_RESOURCE capabilities)
1059 # Unprivileged processes (not root, and without CAP_SYS_RESOURCE capabilities)
1060 # can freely increase their value, but not decrease it below its initial
1060 # can freely increase their value, but not decrease it below its initial
1061 # settings.
1061 # settings.
1062 #
1062 #
1063 # Values are used relative to the initial value of oom_score_adj when the server
1063 # Values are used relative to the initial value of oom_score_adj when the server
1064 # starts. Because typically the initial value is 0, they will often match the
1064 # starts. Because typically the initial value is 0, they will often match the
1065 # absolute values.
1065 # absolute values.
1066
1066
1067 oom-score-adj-values 0 200 800
1067 oom-score-adj-values 0 200 800
1068
1068
1069 ############################## APPEND ONLY MODE ###############################
1069 ############################## APPEND ONLY MODE ###############################
1070
1070
1071 # By default Redis asynchronously dumps the dataset on disk. This mode is
1071 # By default Redis asynchronously dumps the dataset on disk. This mode is
1072 # good enough in many applications, but an issue with the Redis process or
1072 # good enough in many applications, but an issue with the Redis process or
1073 # a power outage may result into a few minutes of writes lost (depending on
1073 # a power outage may result into a few minutes of writes lost (depending on
1074 # the configured save points).
1074 # the configured save points).
1075 #
1075 #
1076 # The Append Only File is an alternative persistence mode that provides
1076 # The Append Only File is an alternative persistence mode that provides
1077 # much better durability. For instance using the default data fsync policy
1077 # much better durability. For instance using the default data fsync policy
1078 # (see later in the config file) Redis can lose just one second of writes in a
1078 # (see later in the config file) Redis can lose just one second of writes in a
1079 # dramatic event like a server power outage, or a single write if something
1079 # dramatic event like a server power outage, or a single write if something
1080 # wrong with the Redis process itself happens, but the operating system is
1080 # wrong with the Redis process itself happens, but the operating system is
1081 # still running correctly.
1081 # still running correctly.
1082 #
1082 #
1083 # AOF and RDB persistence can be enabled at the same time without problems.
1083 # AOF and RDB persistence can be enabled at the same time without problems.
1084 # If the AOF is enabled on startup Redis will load the AOF, that is the file
1084 # If the AOF is enabled on startup Redis will load the AOF, that is the file
1085 # with the better durability guarantees.
1085 # with the better durability guarantees.
1086 #
1086 #
1087 # Please check http://redis.io/topics/persistence for more information.
1087 # Please check http://redis.io/topics/persistence for more information.
1088
1088
1089 appendonly no
1089 appendonly no
1090
1090
1091 # The name of the append only file (default: "appendonly.aof")
1091 # The name of the append only file (default: "appendonly.aof")
1092
1092
1093 appendfilename "appendonly.aof"
1093 appendfilename "appendonly.aof"
1094
1094
1095 # The fsync() call tells the Operating System to actually write data on disk
1095 # The fsync() call tells the Operating System to actually write data on disk
1096 # instead of waiting for more data in the output buffer. Some OS will really flush
1096 # instead of waiting for more data in the output buffer. Some OS will really flush
1097 # data on disk, some other OS will just try to do it ASAP.
1097 # data on disk, some other OS will just try to do it ASAP.
1098 #
1098 #
1099 # Redis supports three different modes:
1099 # Redis supports three different modes:
1100 #
1100 #
1101 # no: don't fsync, just let the OS flush the data when it wants. Faster.
1101 # no: don't fsync, just let the OS flush the data when it wants. Faster.
1102 # always: fsync after every write to the append only log. Slow, Safest.
1102 # always: fsync after every write to the append only log. Slow, Safest.
1103 # everysec: fsync only one time every second. Compromise.
1103 # everysec: fsync only one time every second. Compromise.
1104 #
1104 #
1105 # The default is "everysec", as that's usually the right compromise between
1105 # The default is "everysec", as that's usually the right compromise between
1106 # speed and data safety. It's up to you to understand if you can relax this to
1106 # speed and data safety. It's up to you to understand if you can relax this to
1107 # "no" that will let the operating system flush the output buffer when
1107 # "no" that will let the operating system flush the output buffer when
1108 # it wants, for better performances (but if you can live with the idea of
1108 # it wants, for better performances (but if you can live with the idea of
1109 # some data loss consider the default persistence mode that's snapshotting),
1109 # some data loss consider the default persistence mode that's snapshotting),
1110 # or on the contrary, use "always" that's very slow but a bit safer than
1110 # or on the contrary, use "always" that's very slow but a bit safer than
1111 # everysec.
1111 # everysec.
1112 #
1112 #
1113 # More details please check the following article:
1113 # More details please check the following article:
1114 # http://antirez.com/post/redis-persistence-demystified.html
1114 # http://antirez.com/post/redis-persistence-demystified.html
1115 #
1115 #
1116 # If unsure, use "everysec".
1116 # If unsure, use "everysec".
1117
1117
1118 # appendfsync always
1118 # appendfsync always
1119 appendfsync everysec
1119 appendfsync everysec
1120 # appendfsync no
1120 # appendfsync no
1121
1121
1122 # When the AOF fsync policy is set to always or everysec, and a background
1122 # When the AOF fsync policy is set to always or everysec, and a background
1123 # saving process (a background save or AOF log background rewriting) is
1123 # saving process (a background save or AOF log background rewriting) is
1124 # performing a lot of I/O against the disk, in some Linux configurations
1124 # performing a lot of I/O against the disk, in some Linux configurations
1125 # Redis may block too long on the fsync() call. Note that there is no fix for
1125 # Redis may block too long on the fsync() call. Note that there is no fix for
1126 # this currently, as even performing fsync in a different thread will block
1126 # this currently, as even performing fsync in a different thread will block
1127 # our synchronous write(2) call.
1127 # our synchronous write(2) call.
1128 #
1128 #
1129 # In order to mitigate this problem it's possible to use the following option
1129 # In order to mitigate this problem it's possible to use the following option
1130 # that will prevent fsync() from being called in the main process while a
1130 # that will prevent fsync() from being called in the main process while a
1131 # BGSAVE or BGREWRITEAOF is in progress.
1131 # BGSAVE or BGREWRITEAOF is in progress.
1132 #
1132 #
1133 # This means that while another child is saving, the durability of Redis is
1133 # This means that while another child is saving, the durability of Redis is
1134 # the same as "appendfsync none". In practical terms, this means that it is
1134 # the same as "appendfsync none". In practical terms, this means that it is
1135 # possible to lose up to 30 seconds of log in the worst scenario (with the
1135 # possible to lose up to 30 seconds of log in the worst scenario (with the
1136 # default Linux settings).
1136 # default Linux settings).
1137 #
1137 #
1138 # If you have latency problems turn this to "yes". Otherwise leave it as
1138 # If you have latency problems turn this to "yes". Otherwise leave it as
1139 # "no" that is the safest pick from the point of view of durability.
1139 # "no" that is the safest pick from the point of view of durability.
1140
1140
1141 no-appendfsync-on-rewrite no
1141 no-appendfsync-on-rewrite no
1142
1142
1143 # Automatic rewrite of the append only file.
1143 # Automatic rewrite of the append only file.
1144 # Redis is able to automatically rewrite the log file implicitly calling
1144 # Redis is able to automatically rewrite the log file implicitly calling
1145 # BGREWRITEAOF when the AOF log size grows by the specified percentage.
1145 # BGREWRITEAOF when the AOF log size grows by the specified percentage.
1146 #
1146 #
1147 # This is how it works: Redis remembers the size of the AOF file after the
1147 # This is how it works: Redis remembers the size of the AOF file after the
1148 # latest rewrite (if no rewrite has happened since the restart, the size of
1148 # latest rewrite (if no rewrite has happened since the restart, the size of
1149 # the AOF at startup is used).
1149 # the AOF at startup is used).
1150 #
1150 #
1151 # This base size is compared to the current size. If the current size is
1151 # This base size is compared to the current size. If the current size is
1152 # bigger than the specified percentage, the rewrite is triggered. Also
1152 # bigger than the specified percentage, the rewrite is triggered. Also
1153 # you need to specify a minimal size for the AOF file to be rewritten, this
1153 # you need to specify a minimal size for the AOF file to be rewritten, this
1154 # is useful to avoid rewriting the AOF file even if the percentage increase
1154 # is useful to avoid rewriting the AOF file even if the percentage increase
1155 # is reached but it is still pretty small.
1155 # is reached but it is still pretty small.
1156 #
1156 #
1157 # Specify a percentage of zero in order to disable the automatic AOF
1157 # Specify a percentage of zero in order to disable the automatic AOF
1158 # rewrite feature.
1158 # rewrite feature.
1159
1159
1160 auto-aof-rewrite-percentage 100
1160 auto-aof-rewrite-percentage 100
1161 auto-aof-rewrite-min-size 64mb
1161 auto-aof-rewrite-min-size 64mb
1162
1162
1163 # An AOF file may be found to be truncated at the end during the Redis
1163 # An AOF file may be found to be truncated at the end during the Redis
1164 # startup process, when the AOF data gets loaded back into memory.
1164 # startup process, when the AOF data gets loaded back into memory.
1165 # This may happen when the system where Redis is running
1165 # This may happen when the system where Redis is running
1166 # crashes, especially when an ext4 filesystem is mounted without the
1166 # crashes, especially when an ext4 filesystem is mounted without the
1167 # data=ordered option (however this can't happen when Redis itself
1167 # data=ordered option (however this can't happen when Redis itself
1168 # crashes or aborts but the operating system still works correctly).
1168 # crashes or aborts but the operating system still works correctly).
1169 #
1169 #
1170 # Redis can either exit with an error when this happens, or load as much
1170 # Redis can either exit with an error when this happens, or load as much
1171 # data as possible (the default now) and start if the AOF file is found
1171 # data as possible (the default now) and start if the AOF file is found
1172 # to be truncated at the end. The following option controls this behavior.
1172 # to be truncated at the end. The following option controls this behavior.
1173 #
1173 #
1174 # If aof-load-truncated is set to yes, a truncated AOF file is loaded and
1174 # If aof-load-truncated is set to yes, a truncated AOF file is loaded and
1175 # the Redis server starts emitting a log to inform the user of the event.
1175 # the Redis server starts emitting a log to inform the user of the event.
1176 # Otherwise if the option is set to no, the server aborts with an error
1176 # Otherwise if the option is set to no, the server aborts with an error
1177 # and refuses to start. When the option is set to no, the user requires
1177 # and refuses to start. When the option is set to no, the user requires
1178 # to fix the AOF file using the "redis-check-aof" utility before to restart
1178 # to fix the AOF file using the "redis-check-aof" utility before to restart
1179 # the server.
1179 # the server.
1180 #
1180 #
1181 # Note that if the AOF file will be found to be corrupted in the middle
1181 # Note that if the AOF file will be found to be corrupted in the middle
1182 # the server will still exit with an error. This option only applies when
1182 # the server will still exit with an error. This option only applies when
1183 # Redis will try to read more data from the AOF file but not enough bytes
1183 # Redis will try to read more data from the AOF file but not enough bytes
1184 # will be found.
1184 # will be found.
1185 aof-load-truncated yes
1185 aof-load-truncated yes
1186
1186
1187 # When rewriting the AOF file, Redis is able to use an RDB preamble in the
1187 # When rewriting the AOF file, Redis is able to use an RDB preamble in the
1188 # AOF file for faster rewrites and recoveries. When this option is turned
1188 # AOF file for faster rewrites and recoveries. When this option is turned
1189 # on the rewritten AOF file is composed of two different stanzas:
1189 # on the rewritten AOF file is composed of two different stanzas:
1190 #
1190 #
1191 # [RDB file][AOF tail]
1191 # [RDB file][AOF tail]
1192 #
1192 #
1193 # When loading, Redis recognizes that the AOF file starts with the "REDIS"
1193 # When loading, Redis recognizes that the AOF file starts with the "REDIS"
1194 # string and loads the prefixed RDB file, then continues loading the AOF
1194 # string and loads the prefixed RDB file, then continues loading the AOF
1195 # tail.
1195 # tail.
1196 aof-use-rdb-preamble yes
1196 aof-use-rdb-preamble yes
1197
1197
1198 ################################ LUA SCRIPTING ###############################
1198 ################################ LUA SCRIPTING ###############################
1199
1199
1200 # Max execution time of a Lua script in milliseconds.
1200 # Max execution time of a Lua script in milliseconds.
1201 #
1201 #
1202 # If the maximum execution time is reached Redis will log that a script is
1202 # If the maximum execution time is reached Redis will log that a script is
1203 # still in execution after the maximum allowed time and will start to
1203 # still in execution after the maximum allowed time and will start to
1204 # reply to queries with an error.
1204 # reply to queries with an error.
1205 #
1205 #
1206 # When a long running script exceeds the maximum execution time only the
1206 # When a long running script exceeds the maximum execution time only the
1207 # SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
1207 # SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
1208 # used to stop a script that did not yet call any write commands. The second
1208 # used to stop a script that did not yet call any write commands. The second
1209 # is the only way to shut down the server in the case a write command was
1209 # is the only way to shut down the server in the case a write command was
1210 # already issued by the script but the user doesn't want to wait for the natural
1210 # already issued by the script but the user doesn't want to wait for the natural
1211 # termination of the script.
1211 # termination of the script.
1212 #
1212 #
1213 # Set it to 0 or a negative value for unlimited execution without warnings.
1213 # Set it to 0 or a negative value for unlimited execution without warnings.
1214 lua-time-limit 5000
1214 lua-time-limit 5000
1215
1215
1216 ################################ REDIS CLUSTER ###############################
1216 ################################ REDIS CLUSTER ###############################
1217
1217
1218 # Normal Redis instances can't be part of a Redis Cluster; only nodes that are
1218 # Normal Redis instances can't be part of a Redis Cluster; only nodes that are
1219 # started as cluster nodes can. In order to start a Redis instance as a
1219 # started as cluster nodes can. In order to start a Redis instance as a
1220 # cluster node enable the cluster support uncommenting the following:
1220 # cluster node enable the cluster support uncommenting the following:
1221 #
1221 #
1222 # cluster-enabled yes
1222 # cluster-enabled yes
1223
1223
1224 # Every cluster node has a cluster configuration file. This file is not
1224 # Every cluster node has a cluster configuration file. This file is not
1225 # intended to be edited by hand. It is created and updated by Redis nodes.
1225 # intended to be edited by hand. It is created and updated by Redis nodes.
1226 # Every Redis Cluster node requires a different cluster configuration file.
1226 # Every Redis Cluster node requires a different cluster configuration file.
1227 # Make sure that instances running in the same system do not have
1227 # Make sure that instances running in the same system do not have
1228 # overlapping cluster configuration file names.
1228 # overlapping cluster configuration file names.
1229 #
1229 #
1230 # cluster-config-file nodes-6379.conf
1230 # cluster-config-file nodes-6379.conf
1231
1231
1232 # Cluster node timeout is the amount of milliseconds a node must be unreachable
1232 # Cluster node timeout is the amount of milliseconds a node must be unreachable
1233 # for it to be considered in failure state.
1233 # for it to be considered in failure state.
1234 # Most other internal time limits are a multiple of the node timeout.
1234 # Most other internal time limits are a multiple of the node timeout.
1235 #
1235 #
1236 # cluster-node-timeout 15000
1236 # cluster-node-timeout 15000
1237
1237
1238 # A replica of a failing master will avoid to start a failover if its data
1238 # A replica of a failing master will avoid to start a failover if its data
1239 # looks too old.
1239 # looks too old.
1240 #
1240 #
1241 # There is no simple way for a replica to actually have an exact measure of
1241 # There is no simple way for a replica to actually have an exact measure of
1242 # its "data age", so the following two checks are performed:
1242 # its "data age", so the following two checks are performed:
1243 #
1243 #
1244 # 1) If there are multiple replicas able to failover, they exchange messages
1244 # 1) If there are multiple replicas able to failover, they exchange messages
1245 # in order to try to give an advantage to the replica with the best
1245 # in order to try to give an advantage to the replica with the best
1246 # replication offset (more data from the master processed).
1246 # replication offset (more data from the master processed).
1247 # Replicas will try to get their rank by offset, and apply to the start
1247 # Replicas will try to get their rank by offset, and apply to the start
1248 # of the failover a delay proportional to their rank.
1248 # of the failover a delay proportional to their rank.
1249 #
1249 #
1250 # 2) Every single replica computes the time of the last interaction with
1250 # 2) Every single replica computes the time of the last interaction with
1251 # its master. This can be the last ping or command received (if the master
1251 # its master. This can be the last ping or command received (if the master
1252 # is still in the "connected" state), or the time that elapsed since the
1252 # is still in the "connected" state), or the time that elapsed since the
1253 # disconnection with the master (if the replication link is currently down).
1253 # disconnection with the master (if the replication link is currently down).
1254 # If the last interaction is too old, the replica will not try to failover
1254 # If the last interaction is too old, the replica will not try to failover
1255 # at all.
1255 # at all.
1256 #
1256 #
1257 # The point "2" can be tuned by user. Specifically a replica will not perform
1257 # The point "2" can be tuned by user. Specifically a replica will not perform
1258 # the failover if, since the last interaction with the master, the time
1258 # the failover if, since the last interaction with the master, the time
1259 # elapsed is greater than:
1259 # elapsed is greater than:
1260 #
1260 #
1261 # (node-timeout * cluster-replica-validity-factor) + repl-ping-replica-period
1261 # (node-timeout * cluster-replica-validity-factor) + repl-ping-replica-period
1262 #
1262 #
1263 # So for example if node-timeout is 30 seconds, and the cluster-replica-validity-factor
1263 # So for example if node-timeout is 30 seconds, and the cluster-replica-validity-factor
1264 # is 10, and assuming a default repl-ping-replica-period of 10 seconds, the
1264 # is 10, and assuming a default repl-ping-replica-period of 10 seconds, the
1265 # replica will not try to failover if it was not able to talk with the master
1265 # replica will not try to failover if it was not able to talk with the master
1266 # for longer than 310 seconds.
1266 # for longer than 310 seconds.
1267 #
1267 #
1268 # A large cluster-replica-validity-factor may allow replicas with too old data to failover
1268 # A large cluster-replica-validity-factor may allow replicas with too old data to failover
1269 # a master, while a too small value may prevent the cluster from being able to
1269 # a master, while a too small value may prevent the cluster from being able to
1270 # elect a replica at all.
1270 # elect a replica at all.
1271 #
1271 #
1272 # For maximum availability, it is possible to set the cluster-replica-validity-factor
1272 # For maximum availability, it is possible to set the cluster-replica-validity-factor
1273 # to a value of 0, which means, that replicas will always try to failover the
1273 # to a value of 0, which means, that replicas will always try to failover the
1274 # master regardless of the last time they interacted with the master.
1274 # master regardless of the last time they interacted with the master.
1275 # (However they'll always try to apply a delay proportional to their
1275 # (However they'll always try to apply a delay proportional to their
1276 # offset rank).
1276 # offset rank).
1277 #
1277 #
1278 # Zero is the only value able to guarantee that when all the partitions heal
1278 # Zero is the only value able to guarantee that when all the partitions heal
1279 # the cluster will always be able to continue.
1279 # the cluster will always be able to continue.
1280 #
1280 #
1281 # cluster-replica-validity-factor 10
1281 # cluster-replica-validity-factor 10
1282
1282
1283 # Cluster replicas are able to migrate to orphaned masters, that are masters
1283 # Cluster replicas are able to migrate to orphaned masters, that are masters
1284 # that are left without working replicas. This improves the cluster ability
1284 # that are left without working replicas. This improves the cluster ability
1285 # to resist to failures as otherwise an orphaned master can't be failed over
1285 # to resist to failures as otherwise an orphaned master can't be failed over
1286 # in case of failure if it has no working replicas.
1286 # in case of failure if it has no working replicas.
1287 #
1287 #
1288 # Replicas migrate to orphaned masters only if there are still at least a
1288 # Replicas migrate to orphaned masters only if there are still at least a
1289 # given number of other working replicas for their old master. This number
1289 # given number of other working replicas for their old master. This number
1290 # is the "migration barrier". A migration barrier of 1 means that a replica
1290 # is the "migration barrier". A migration barrier of 1 means that a replica
1291 # will migrate only if there is at least 1 other working replica for its master
1291 # will migrate only if there is at least 1 other working replica for its master
1292 # and so forth. It usually reflects the number of replicas you want for every
1292 # and so forth. It usually reflects the number of replicas you want for every
1293 # master in your cluster.
1293 # master in your cluster.
1294 #
1294 #
1295 # Default is 1 (replicas migrate only if their masters remain with at least
1295 # Default is 1 (replicas migrate only if their masters remain with at least
1296 # one replica). To disable migration just set it to a very large value.
1296 # one replica). To disable migration just set it to a very large value.
1297 # A value of 0 can be set but is useful only for debugging and dangerous
1297 # A value of 0 can be set but is useful only for debugging and dangerous
1298 # in production.
1298 # in production.
1299 #
1299 #
1300 # cluster-migration-barrier 1
1300 # cluster-migration-barrier 1
1301
1301
1302 # By default Redis Cluster nodes stop accepting queries if they detect there
1302 # By default Redis Cluster nodes stop accepting queries if they detect there
1303 # is at least a hash slot uncovered (no available node is serving it).
1303 # is at least a hash slot uncovered (no available node is serving it).
1304 # This way if the cluster is partially down (for example a range of hash slots
1304 # This way if the cluster is partially down (for example a range of hash slots
1305 # are no longer covered) all the cluster becomes, eventually, unavailable.
1305 # are no longer covered) all the cluster becomes, eventually, unavailable.
1306 # It automatically returns available as soon as all the slots are covered again.
1306 # It automatically returns available as soon as all the slots are covered again.
1307 #
1307 #
1308 # However sometimes you want the subset of the cluster which is working,
1308 # However sometimes you want the subset of the cluster which is working,
1309 # to continue to accept queries for the part of the key space that is still
1309 # to continue to accept queries for the part of the key space that is still
1310 # covered. In order to do so, just set the cluster-require-full-coverage
1310 # covered. In order to do so, just set the cluster-require-full-coverage
1311 # option to no.
1311 # option to no.
1312 #
1312 #
1313 # cluster-require-full-coverage yes
1313 # cluster-require-full-coverage yes
1314
1314
1315 # This option, when set to yes, prevents replicas from trying to failover its
1315 # This option, when set to yes, prevents replicas from trying to failover its
1316 # master during master failures. However the master can still perform a
1316 # master during master failures. However the master can still perform a
1317 # manual failover, if forced to do so.
1317 # manual failover, if forced to do so.
1318 #
1318 #
1319 # This is useful in different scenarios, especially in the case of multiple
1319 # This is useful in different scenarios, especially in the case of multiple
1320 # data center operations, where we want one side to never be promoted if not
1320 # data center operations, where we want one side to never be promoted if not
1321 # in the case of a total DC failure.
1321 # in the case of a total DC failure.
1322 #
1322 #
1323 # cluster-replica-no-failover no
1323 # cluster-replica-no-failover no
1324
1324
1325 # This option, when set to yes, allows nodes to serve read traffic while the
1325 # This option, when set to yes, allows nodes to serve read traffic while the
1326 # the cluster is in a down state, as long as it believes it owns the slots.
1326 # the cluster is in a down state, as long as it believes it owns the slots.
1327 #
1327 #
1328 # This is useful for two cases. The first case is for when an application
1328 # This is useful for two cases. The first case is for when an application
1329 # doesn't require consistency of data during node failures or network partitions.
1329 # doesn't require consistency of data during node failures or network partitions.
1330 # One example of this is a cache, where as long as the node has the data it
1330 # One example of this is a cache, where as long as the node has the data it
1331 # should be able to serve it.
1331 # should be able to serve it.
1332 #
1332 #
1333 # The second use case is for configurations that don't meet the recommended
1333 # The second use case is for configurations that don't meet the recommended
1334 # three shards but want to enable cluster mode and scale later. A
1334 # three shards but want to enable cluster mode and scale later. A
1335 # master outage in a 1 or 2 shard configuration causes a read/write outage to the
1335 # master outage in a 1 or 2 shard configuration causes a read/write outage to the
1336 # entire cluster without this option set, with it set there is only a write outage.
1336 # entire cluster without this option set, with it set there is only a write outage.
1337 # Without a quorum of masters, slot ownership will not change automatically.
1337 # Without a quorum of masters, slot ownership will not change automatically.
1338 #
1338 #
1339 # cluster-allow-reads-when-down no
1339 # cluster-allow-reads-when-down no
1340
1340
1341 # In order to setup your cluster make sure to read the documentation
1341 # In order to setup your cluster make sure to read the documentation
1342 # available at http://redis.io web site.
1342 # available at http://redis.io web site.
1343
1343
1344 ########################## CLUSTER DOCKER/NAT support ########################
1344 ########################## CLUSTER DOCKER/NAT support ########################
1345
1345
1346 # In certain deployments, Redis Cluster nodes address discovery fails, because
1346 # In certain deployments, Redis Cluster nodes address discovery fails, because
1347 # addresses are NAT-ted or because ports are forwarded (the typical case is
1347 # addresses are NAT-ted or because ports are forwarded (the typical case is
1348 # Docker and other containers).
1348 # Docker and other containers).
1349 #
1349 #
1350 # In order to make Redis Cluster working in such environments, a static
1350 # In order to make Redis Cluster working in such environments, a static
1351 # configuration where each node knows its public address is needed. The
1351 # configuration where each node knows its public address is needed. The
1352 # following two options are used for this scope, and are:
1352 # following two options are used for this scope, and are:
1353 #
1353 #
1354 # * cluster-announce-ip
1354 # * cluster-announce-ip
1355 # * cluster-announce-port
1355 # * cluster-announce-port
1356 # * cluster-announce-bus-port
1356 # * cluster-announce-bus-port
1357 #
1357 #
1358 # Each instructs the node about its address, client port, and cluster message
1358 # Each instructs the node about its address, client port, and cluster message
1359 # bus port. The information is then published in the header of the bus packets
1359 # bus port. The information is then published in the header of the bus packets
1360 # so that other nodes will be able to correctly map the address of the node
1360 # so that other nodes will be able to correctly map the address of the node
1361 # publishing the information.
1361 # publishing the information.
1362 #
1362 #
1363 # If the above options are not used, the normal Redis Cluster auto-detection
1363 # If the above options are not used, the normal Redis Cluster auto-detection
1364 # will be used instead.
1364 # will be used instead.
1365 #
1365 #
1366 # Note that when remapped, the bus port may not be at the fixed offset of
1366 # Note that when remapped, the bus port may not be at the fixed offset of
1367 # clients port + 10000, so you can specify any port and bus-port depending
1367 # clients port + 10000, so you can specify any port and bus-port depending
1368 # on how they get remapped. If the bus-port is not set, a fixed offset of
1368 # on how they get remapped. If the bus-port is not set, a fixed offset of
1369 # 10000 will be used as usual.
1369 # 10000 will be used as usual.
1370 #
1370 #
1371 # Example:
1371 # Example:
1372 #
1372 #
1373 # cluster-announce-ip 10.1.1.5
1373 # cluster-announce-ip 10.1.1.5
1374 # cluster-announce-port 6379
1374 # cluster-announce-port 6379
1375 # cluster-announce-bus-port 6380
1375 # cluster-announce-bus-port 6380
1376
1376
1377 ################################## SLOW LOG ###################################
1377 ################################## SLOW LOG ###################################
1378
1378
1379 # The Redis Slow Log is a system to log queries that exceeded a specified
1379 # The Redis Slow Log is a system to log queries that exceeded a specified
1380 # execution time. The execution time does not include the I/O operations
1380 # execution time. The execution time does not include the I/O operations
1381 # like talking with the client, sending the reply and so forth,
1381 # like talking with the client, sending the reply and so forth,
1382 # but just the time needed to actually execute the command (this is the only
1382 # but just the time needed to actually execute the command (this is the only
1383 # stage of command execution where the thread is blocked and can not serve
1383 # stage of command execution where the thread is blocked and can not serve
1384 # other requests in the meantime).
1384 # other requests in the meantime).
1385 #
1385 #
1386 # You can configure the slow log with two parameters: one tells Redis
1386 # You can configure the slow log with two parameters: one tells Redis
1387 # what is the execution time, in microseconds, to exceed in order for the
1387 # what is the execution time, in microseconds, to exceed in order for the
1388 # command to get logged, and the other parameter is the length of the
1388 # command to get logged, and the other parameter is the length of the
1389 # slow log. When a new command is logged the oldest one is removed from the
1389 # slow log. When a new command is logged the oldest one is removed from the
1390 # queue of logged commands.
1390 # queue of logged commands.
1391
1391
1392 # The following time is expressed in microseconds, so 1000000 is equivalent
1392 # The following time is expressed in microseconds, so 1000000 is equivalent
1393 # to one second. Note that a negative number disables the slow log, while
1393 # to one second. Note that a negative number disables the slow log, while
1394 # a value of zero forces the logging of every command.
1394 # a value of zero forces the logging of every command.
1395 slowlog-log-slower-than 10000
1395 slowlog-log-slower-than 10000
1396
1396
1397 # There is no limit to this length. Just be aware that it will consume memory.
1397 # There is no limit to this length. Just be aware that it will consume memory.
1398 # You can reclaim memory used by the slow log with SLOWLOG RESET.
1398 # You can reclaim memory used by the slow log with SLOWLOG RESET.
1399 slowlog-max-len 128
1399 slowlog-max-len 128
1400
1400
1401 ################################ LATENCY MONITOR ##############################
1401 ################################ LATENCY MONITOR ##############################
1402
1402
1403 # The Redis latency monitoring subsystem samples different operations
1403 # The Redis latency monitoring subsystem samples different operations
1404 # at runtime in order to collect data related to possible sources of
1404 # at runtime in order to collect data related to possible sources of
1405 # latency of a Redis instance.
1405 # latency of a Redis instance.
1406 #
1406 #
1407 # Via the LATENCY command this information is available to the user that can
1407 # Via the LATENCY command this information is available to the user that can
1408 # print graphs and obtain reports.
1408 # print graphs and obtain reports.
1409 #
1409 #
1410 # The system only logs operations that were performed in a time equal or
1410 # The system only logs operations that were performed in a time equal or
1411 # greater than the amount of milliseconds specified via the
1411 # greater than the amount of milliseconds specified via the
1412 # latency-monitor-threshold configuration directive. When its value is set
1412 # latency-monitor-threshold configuration directive. When its value is set
1413 # to zero, the latency monitor is turned off.
1413 # to zero, the latency monitor is turned off.
1414 #
1414 #
1415 # By default latency monitoring is disabled since it is mostly not needed
1415 # By default latency monitoring is disabled since it is mostly not needed
1416 # if you don't have latency issues, and collecting data has a performance
1416 # if you don't have latency issues, and collecting data has a performance
1417 # impact, that while very small, can be measured under big load. Latency
1417 # impact, that while very small, can be measured under big load. Latency
1418 # monitoring can easily be enabled at runtime using the command
1418 # monitoring can easily be enabled at runtime using the command
1419 # "CONFIG SET latency-monitor-threshold <milliseconds>" if needed.
1419 # "CONFIG SET latency-monitor-threshold <milliseconds>" if needed.
1420 latency-monitor-threshold 0
1420 latency-monitor-threshold 0
1421
1421
1422 ############################# EVENT NOTIFICATION ##############################
1422 ############################# EVENT NOTIFICATION ##############################
1423
1423
1424 # Redis can notify Pub/Sub clients about events happening in the key space.
1424 # Redis can notify Pub/Sub clients about events happening in the key space.
1425 # This feature is documented at http://redis.io/topics/notifications
1425 # This feature is documented at http://redis.io/topics/notifications
1426 #
1426 #
1427 # For instance if keyspace events notification is enabled, and a client
1427 # For instance if keyspace events notification is enabled, and a client
1428 # performs a DEL operation on key "foo" stored in the Database 0, two
1428 # performs a DEL operation on key "foo" stored in the Database 0, two
1429 # messages will be published via Pub/Sub:
1429 # messages will be published via Pub/Sub:
1430 #
1430 #
1431 # PUBLISH __keyspace@0__:foo del
1431 # PUBLISH __keyspace@0__:foo del
1432 # PUBLISH __keyevent@0__:del foo
1432 # PUBLISH __keyevent@0__:del foo
1433 #
1433 #
1434 # It is possible to select the events that Redis will notify among a set
1434 # It is possible to select the events that Redis will notify among a set
1435 # of classes. Every class is identified by a single character:
1435 # of classes. Every class is identified by a single character:
1436 #
1436 #
1437 # K Keyspace events, published with __keyspace@<db>__ prefix.
1437 # K Keyspace events, published with __keyspace@<db>__ prefix.
1438 # E Keyevent events, published with __keyevent@<db>__ prefix.
1438 # E Keyevent events, published with __keyevent@<db>__ prefix.
1439 # g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
1439 # g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
1440 # $ String commands
1440 # $ String commands
1441 # l List commands
1441 # l List commands
1442 # s Set commands
1442 # s Set commands
1443 # h Hash commands
1443 # h Hash commands
1444 # z Sorted set commands
1444 # z Sorted set commands
1445 # x Expired events (events generated every time a key expires)
1445 # x Expired events (events generated every time a key expires)
1446 # e Evicted events (events generated when a key is evicted for maxmemory)
1446 # e Evicted events (events generated when a key is evicted for maxmemory)
1447 # t Stream commands
1447 # t Stream commands
1448 # m Key-miss events (Note: It is not included in the 'A' class)
1448 # m Key-miss events (Note: It is not included in the 'A' class)
1449 # A Alias for g$lshzxet, so that the "AKE" string means all the events
1449 # A Alias for g$lshzxet, so that the "AKE" string means all the events
1450 # (Except key-miss events which are excluded from 'A' due to their
1450 # (Except key-miss events which are excluded from 'A' due to their
1451 # unique nature).
1451 # unique nature).
1452 #
1452 #
1453 # The "notify-keyspace-events" takes as argument a string that is composed
1453 # The "notify-keyspace-events" takes as argument a string that is composed
1454 # of zero or multiple characters. The empty string means that notifications
1454 # of zero or multiple characters. The empty string means that notifications
1455 # are disabled.
1455 # are disabled.
1456 #
1456 #
1457 # Example: to enable list and generic events, from the point of view of the
1457 # Example: to enable list and generic events, from the point of view of the
1458 # event name, use:
1458 # event name, use:
1459 #
1459 #
1460 # notify-keyspace-events Elg
1460 # notify-keyspace-events Elg
1461 #
1461 #
1462 # Example 2: to get the stream of the expired keys subscribing to channel
1462 # Example 2: to get the stream of the expired keys subscribing to channel
1463 # name __keyevent@0__:expired use:
1463 # name __keyevent@0__:expired use:
1464 #
1464 #
1465 # notify-keyspace-events Ex
1465 # notify-keyspace-events Ex
1466 #
1466 #
1467 # By default all notifications are disabled because most users don't need
1467 # By default all notifications are disabled because most users don't need
1468 # this feature and the feature has some overhead. Note that if you don't
1468 # this feature and the feature has some overhead. Note that if you don't
1469 # specify at least one of K or E, no events will be delivered.
1469 # specify at least one of K or E, no events will be delivered.
1470 notify-keyspace-events ""
1470 notify-keyspace-events ""
1471
1471
1472 ############################### GOPHER SERVER #################################
1472 ############################### GOPHER SERVER #################################
1473
1473
1474 # Redis contains an implementation of the Gopher protocol, as specified in
1474 # Redis contains an implementation of the Gopher protocol, as specified in
1475 # the RFC 1436 (https://www.ietf.org/rfc/rfc1436.txt).
1475 # the RFC 1436 (https://www.ietf.org/rfc/rfc1436.txt).
1476 #
1476 #
1477 # The Gopher protocol was very popular in the late '90s. It is an alternative
1477 # The Gopher protocol was very popular in the late '90s. It is an alternative
1478 # to the web, and the implementation both server and client side is so simple
1478 # to the web, and the implementation both server and client side is so simple
1479 # that the Redis server has just 100 lines of code in order to implement this
1479 # that the Redis server has just 100 lines of code in order to implement this
1480 # support.
1480 # support.
1481 #
1481 #
1482 # What do you do with Gopher nowadays? Well Gopher never *really* died, and
1482 # What do you do with Gopher nowadays? Well Gopher never *really* died, and
1483 # lately there is a movement in order for the Gopher more hierarchical content
1483 # lately there is a movement in order for the Gopher more hierarchical content
1484 # composed of just plain text documents to be resurrected. Some want a simpler
1484 # composed of just plain text documents to be resurrected. Some want a simpler
1485 # internet, others believe that the mainstream internet became too much
1485 # internet, others believe that the mainstream internet became too much
1486 # controlled, and it's cool to create an alternative space for people that
1486 # controlled, and it's cool to create an alternative space for people that
1487 # want a bit of fresh air.
1487 # want a bit of fresh air.
1488 #
1488 #
1489 # Anyway for the 10nth birthday of the Redis, we gave it the Gopher protocol
1489 # Anyway for the 10nth birthday of the Redis, we gave it the Gopher protocol
1490 # as a gift.
1490 # as a gift.
1491 #
1491 #
1492 # --- HOW IT WORKS? ---
1492 # --- HOW IT WORKS? ---
1493 #
1493 #
1494 # The Redis Gopher support uses the inline protocol of Redis, and specifically
1494 # The Redis Gopher support uses the inline protocol of Redis, and specifically
1495 # two kind of inline requests that were anyway illegal: an empty request
1495 # two kind of inline requests that were anyway illegal: an empty request
1496 # or any request that starts with "/" (there are no Redis commands starting
1496 # or any request that starts with "/" (there are no Redis commands starting
1497 # with such a slash). Normal RESP2/RESP3 requests are completely out of the
1497 # with such a slash). Normal RESP2/RESP3 requests are completely out of the
1498 # path of the Gopher protocol implementation and are served as usual as well.
1498 # path of the Gopher protocol implementation and are served as usual as well.
1499 #
1499 #
1500 # If you open a connection to Redis when Gopher is enabled and send it
1500 # If you open a connection to Redis when Gopher is enabled and send it
1501 # a string like "/foo", if there is a key named "/foo" it is served via the
1501 # a string like "/foo", if there is a key named "/foo" it is served via the
1502 # Gopher protocol.
1502 # Gopher protocol.
1503 #
1503 #
1504 # In order to create a real Gopher "hole" (the name of a Gopher site in Gopher
1504 # In order to create a real Gopher "hole" (the name of a Gopher site in Gopher
1505 # talking), you likely need a script like the following:
1505 # talking), you likely need a script like the following:
1506 #
1506 #
1507 # https://github.com/antirez/gopher2redis
1507 # https://github.com/antirez/gopher2redis
1508 #
1508 #
1509 # --- SECURITY WARNING ---
1509 # --- SECURITY WARNING ---
1510 #
1510 #
1511 # If you plan to put Redis on the internet in a publicly accessible address
1511 # If you plan to put Redis on the internet in a publicly accessible address
1512 # to server Gopher pages MAKE SURE TO SET A PASSWORD to the instance.
1512 # to server Gopher pages MAKE SURE TO SET A PASSWORD to the instance.
1513 # Once a password is set:
1513 # Once a password is set:
1514 #
1514 #
1515 # 1. The Gopher server (when enabled, not by default) will still serve
1515 # 1. The Gopher server (when enabled, not by default) will still serve
1516 # content via Gopher.
1516 # content via Gopher.
1517 # 2. However other commands cannot be called before the client will
1517 # 2. However other commands cannot be called before the client will
1518 # authenticate.
1518 # authenticate.
1519 #
1519 #
1520 # So use the 'requirepass' option to protect your instance.
1520 # So use the 'requirepass' option to protect your instance.
1521 #
1521 #
1522 # Note that Gopher is not currently supported when 'io-threads-do-reads'
1522 # Note that Gopher is not currently supported when 'io-threads-do-reads'
1523 # is enabled.
1523 # is enabled.
1524 #
1524 #
1525 # To enable Gopher support, uncomment the following line and set the option
1525 # To enable Gopher support, uncomment the following line and set the option
1526 # from no (the default) to yes.
1526 # from no (the default) to yes.
1527 #
1527 #
1528 # gopher-enabled no
1528 # gopher-enabled no
1529
1529
1530 ############################### ADVANCED CONFIG ###############################
1530 ############################### ADVANCED CONFIG ###############################
1531
1531
1532 # Hashes are encoded using a memory efficient data structure when they have a
1532 # Hashes are encoded using a memory efficient data structure when they have a
1533 # small number of entries, and the biggest entry does not exceed a given
1533 # small number of entries, and the biggest entry does not exceed a given
1534 # threshold. These thresholds can be configured using the following directives.
1534 # threshold. These thresholds can be configured using the following directives.
1535 hash-max-ziplist-entries 512
1535 hash-max-ziplist-entries 512
1536 hash-max-ziplist-value 64
1536 hash-max-ziplist-value 64
1537
1537
1538 # Lists are also encoded in a special way to save a lot of space.
1538 # Lists are also encoded in a special way to save a lot of space.
1539 # The number of entries allowed per internal list node can be specified
1539 # The number of entries allowed per internal list node can be specified
1540 # as a fixed maximum size or a maximum number of elements.
1540 # as a fixed maximum size or a maximum number of elements.
1541 # For a fixed maximum size, use -5 through -1, meaning:
1541 # For a fixed maximum size, use -5 through -1, meaning:
1542 # -5: max size: 64 Kb <-- not recommended for normal workloads
1542 # -5: max size: 64 Kb <-- not recommended for normal workloads
1543 # -4: max size: 32 Kb <-- not recommended
1543 # -4: max size: 32 Kb <-- not recommended
1544 # -3: max size: 16 Kb <-- probably not recommended
1544 # -3: max size: 16 Kb <-- probably not recommended
1545 # -2: max size: 8 Kb <-- good
1545 # -2: max size: 8 Kb <-- good
1546 # -1: max size: 4 Kb <-- good
1546 # -1: max size: 4 Kb <-- good
1547 # Positive numbers mean store up to _exactly_ that number of elements
1547 # Positive numbers mean store up to _exactly_ that number of elements
1548 # per list node.
1548 # per list node.
1549 # The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size),
1549 # The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size),
1550 # but if your use case is unique, adjust the settings as necessary.
1550 # but if your use case is unique, adjust the settings as necessary.
1551 list-max-ziplist-size -2
1551 list-max-ziplist-size -2
1552
1552
1553 # Lists may also be compressed.
1553 # Lists may also be compressed.
1554 # Compress depth is the number of quicklist ziplist nodes from *each* side of
1554 # Compress depth is the number of quicklist ziplist nodes from *each* side of
1555 # the list to *exclude* from compression. The head and tail of the list
1555 # the list to *exclude* from compression. The head and tail of the list
1556 # are always uncompressed for fast push/pop operations. Settings are:
1556 # are always uncompressed for fast push/pop operations. Settings are:
1557 # 0: disable all list compression
1557 # 0: disable all list compression
1558 # 1: depth 1 means "don't start compressing until after 1 node into the list,
1558 # 1: depth 1 means "don't start compressing until after 1 node into the list,
1559 # going from either the head or tail"
1559 # going from either the head or tail"
1560 # So: [head]->node->node->...->node->[tail]
1560 # So: [head]->node->node->...->node->[tail]
1561 # [head], [tail] will always be uncompressed; inner nodes will compress.
1561 # [head], [tail] will always be uncompressed; inner nodes will compress.
1562 # 2: [head]->[next]->node->node->...->node->[prev]->[tail]
1562 # 2: [head]->[next]->node->node->...->node->[prev]->[tail]
1563 # 2 here means: don't compress head or head->next or tail->prev or tail,
1563 # 2 here means: don't compress head or head->next or tail->prev or tail,
1564 # but compress all nodes between them.
1564 # but compress all nodes between them.
1565 # 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail]
1565 # 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail]
1566 # etc.
1566 # etc.
1567 list-compress-depth 0
1567 list-compress-depth 0
1568
1568
1569 # Sets have a special encoding in just one case: when a set is composed
1569 # Sets have a special encoding in just one case: when a set is composed
1570 # of just strings that happen to be integers in radix 10 in the range
1570 # of just strings that happen to be integers in radix 10 in the range
1571 # of 64 bit signed integers.
1571 # of 64 bit signed integers.
1572 # The following configuration setting sets the limit in the size of the
1572 # The following configuration setting sets the limit in the size of the
1573 # set in order to use this special memory saving encoding.
1573 # set in order to use this special memory saving encoding.
1574 set-max-intset-entries 512
1574 set-max-intset-entries 512
1575
1575
1576 # Similarly to hashes and lists, sorted sets are also specially encoded in
1576 # Similarly to hashes and lists, sorted sets are also specially encoded in
1577 # order to save a lot of space. This encoding is only used when the length and
1577 # order to save a lot of space. This encoding is only used when the length and
1578 # elements of a sorted set are below the following limits:
1578 # elements of a sorted set are below the following limits:
1579 zset-max-ziplist-entries 128
1579 zset-max-ziplist-entries 128
1580 zset-max-ziplist-value 64
1580 zset-max-ziplist-value 64
1581
1581
1582 # HyperLogLog sparse representation bytes limit. The limit includes the
1582 # HyperLogLog sparse representation bytes limit. The limit includes the
1583 # 16 bytes header. When an HyperLogLog using the sparse representation crosses
1583 # 16 bytes header. When an HyperLogLog using the sparse representation crosses
1584 # this limit, it is converted into the dense representation.
1584 # this limit, it is converted into the dense representation.
1585 #
1585 #
1586 # A value greater than 16000 is totally useless, since at that point the
1586 # A value greater than 16000 is totally useless, since at that point the
1587 # dense representation is more memory efficient.
1587 # dense representation is more memory efficient.
1588 #
1588 #
1589 # The suggested value is ~ 3000 in order to have the benefits of
1589 # The suggested value is ~ 3000 in order to have the benefits of
1590 # the space efficient encoding without slowing down too much PFADD,
1590 # the space efficient encoding without slowing down too much PFADD,
1591 # which is O(N) with the sparse encoding. The value can be raised to
1591 # which is O(N) with the sparse encoding. The value can be raised to
1592 # ~ 10000 when CPU is not a concern, but space is, and the data set is
1592 # ~ 10000 when CPU is not a concern, but space is, and the data set is
1593 # composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
1593 # composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
1594 hll-sparse-max-bytes 3000
1594 hll-sparse-max-bytes 3000
1595
1595
1596 # Streams macro node max size / items. The stream data structure is a radix
1596 # Streams macro node max size / items. The stream data structure is a radix
1597 # tree of big nodes that encode multiple items inside. Using this configuration
1597 # tree of big nodes that encode multiple items inside. Using this configuration
1598 # it is possible to configure how big a single node can be in bytes, and the
1598 # it is possible to configure how big a single node can be in bytes, and the
1599 # maximum number of items it may contain before switching to a new node when
1599 # maximum number of items it may contain before switching to a new node when
1600 # appending new stream entries. If any of the following settings are set to
1600 # appending new stream entries. If any of the following settings are set to
1601 # zero, the limit is ignored, so for instance it is possible to set just a
1601 # zero, the limit is ignored, so for instance it is possible to set just a
1602 # max entires limit by setting max-bytes to 0 and max-entries to the desired
1602 # max entires limit by setting max-bytes to 0 and max-entries to the desired
1603 # value.
1603 # value.
1604 stream-node-max-bytes 4096
1604 stream-node-max-bytes 4096
1605 stream-node-max-entries 100
1605 stream-node-max-entries 100
1606
1606
1607 # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
1607 # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
1608 # order to help rehashing the main Redis hash table (the one mapping top-level
1608 # order to help rehashing the main Redis hash table (the one mapping top-level
1609 # keys to values). The hash table implementation Redis uses (see dict.c)
1609 # keys to values). The hash table implementation Redis uses (see dict.c)
1610 # performs a lazy rehashing: the more operation you run into a hash table
1610 # performs a lazy rehashing: the more operation you run into a hash table
1611 # that is rehashing, the more rehashing "steps" are performed, so if the
1611 # that is rehashing, the more rehashing "steps" are performed, so if the
1612 # server is idle the rehashing is never complete and some more memory is used
1612 # server is idle the rehashing is never complete and some more memory is used
1613 # by the hash table.
1613 # by the hash table.
1614 #
1614 #
1615 # The default is to use this millisecond 10 times every second in order to
1615 # The default is to use this millisecond 10 times every second in order to
1616 # actively rehash the main dictionaries, freeing memory when possible.
1616 # actively rehash the main dictionaries, freeing memory when possible.
1617 #
1617 #
1618 # If unsure:
1618 # If unsure:
1619 # use "activerehashing no" if you have hard latency requirements and it is
1619 # use "activerehashing no" if you have hard latency requirements and it is
1620 # not a good thing in your environment that Redis can reply from time to time
1620 # not a good thing in your environment that Redis can reply from time to time
1621 # to queries with 2 milliseconds delay.
1621 # to queries with 2 milliseconds delay.
1622 #
1622 #
1623 # use "activerehashing yes" if you don't have such hard requirements but
1623 # use "activerehashing yes" if you don't have such hard requirements but
1624 # want to free memory asap when possible.
1624 # want to free memory asap when possible.
1625 activerehashing yes
1625 activerehashing yes
1626
1626
1627 # The client output buffer limits can be used to force disconnection of clients
1627 # The client output buffer limits can be used to force disconnection of clients
1628 # that are not reading data from the server fast enough for some reason (a
1628 # that are not reading data from the server fast enough for some reason (a
1629 # common reason is that a Pub/Sub client can't consume messages as fast as the
1629 # common reason is that a Pub/Sub client can't consume messages as fast as the
1630 # publisher can produce them).
1630 # publisher can produce them).
1631 #
1631 #
1632 # The limit can be set differently for the three different classes of clients:
1632 # The limit can be set differently for the three different classes of clients:
1633 #
1633 #
1634 # normal -> normal clients including MONITOR clients
1634 # normal -> normal clients including MONITOR clients
1635 # replica -> replica clients
1635 # replica -> replica clients
1636 # pubsub -> clients subscribed to at least one pubsub channel or pattern
1636 # pubsub -> clients subscribed to at least one pubsub channel or pattern
1637 #
1637 #
1638 # The syntax of every client-output-buffer-limit directive is the following:
1638 # The syntax of every client-output-buffer-limit directive is the following:
1639 #
1639 #
1640 # client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
1640 # client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
1641 #
1641 #
1642 # A client is immediately disconnected once the hard limit is reached, or if
1642 # A client is immediately disconnected once the hard limit is reached, or if
1643 # the soft limit is reached and remains reached for the specified number of
1643 # the soft limit is reached and remains reached for the specified number of
1644 # seconds (continuously).
1644 # seconds (continuously).
1645 # So for instance if the hard limit is 32 megabytes and the soft limit is
1645 # So for instance if the hard limit is 32 megabytes and the soft limit is
1646 # 16 megabytes / 10 seconds, the client will get disconnected immediately
1646 # 16 megabytes / 10 seconds, the client will get disconnected immediately
1647 # if the size of the output buffers reach 32 megabytes, but will also get
1647 # if the size of the output buffers reach 32 megabytes, but will also get
1648 # disconnected if the client reaches 16 megabytes and continuously overcomes
1648 # disconnected if the client reaches 16 megabytes and continuously overcomes
1649 # the limit for 10 seconds.
1649 # the limit for 10 seconds.
1650 #
1650 #
1651 # By default normal clients are not limited because they don't receive data
1651 # By default normal clients are not limited because they don't receive data
1652 # without asking (in a push way), but just after a request, so only
1652 # without asking (in a push way), but just after a request, so only
1653 # asynchronous clients may create a scenario where data is requested faster
1653 # asynchronous clients may create a scenario where data is requested faster
1654 # than it can read.
1654 # than it can read.
1655 #
1655 #
1656 # Instead there is a default limit for pubsub and replica clients, since
1656 # Instead there is a default limit for pubsub and replica clients, since
1657 # subscribers and replicas receive data in a push fashion.
1657 # subscribers and replicas receive data in a push fashion.
1658 #
1658 #
1659 # Both the hard or the soft limit can be disabled by setting them to zero.
1659 # Both the hard or the soft limit can be disabled by setting them to zero.
1660 client-output-buffer-limit normal 0 0 0
1660 client-output-buffer-limit normal 0 0 0
1661 client-output-buffer-limit replica 256mb 64mb 60
1661 client-output-buffer-limit replica 256mb 64mb 60
1662 client-output-buffer-limit pubsub 32mb 8mb 60
1662 client-output-buffer-limit pubsub 32mb 8mb 60
1663
1663
1664 # Client query buffers accumulate new commands. They are limited to a fixed
1664 # Client query buffers accumulate new commands. They are limited to a fixed
1665 # amount by default in order to avoid that a protocol desynchronization (for
1665 # amount by default in order to avoid that a protocol desynchronization (for
1666 # instance due to a bug in the client) will lead to unbound memory usage in
1666 # instance due to a bug in the client) will lead to unbound memory usage in
1667 # the query buffer. However you can configure it here if you have very special
1667 # the query buffer. However you can configure it here if you have very special
1668 # needs, such us huge multi/exec requests or alike.
1668 # needs, such us huge multi/exec requests or alike.
1669 #
1669 #
1670 # client-query-buffer-limit 1gb
1670 # client-query-buffer-limit 1gb
1671
1671
1672 # In the Redis protocol, bulk requests, that are, elements representing single
1672 # In the Redis protocol, bulk requests, that are, elements representing single
1673 # strings, are normally limited to 512 mb. However you can change this limit
1673 # strings, are normally limited to 512 mb. However you can change this limit
1674 # here, but must be 1mb or greater
1674 # here, but must be 1mb or greater
1675 #
1675 #
1676 # proto-max-bulk-len 512mb
1676 # proto-max-bulk-len 512mb
1677
1677
1678 # Redis calls an internal function to perform many background tasks, like
1678 # Redis calls an internal function to perform many background tasks, like
1679 # closing connections of clients in timeout, purging expired keys that are
1679 # closing connections of clients in timeout, purging expired keys that are
1680 # never requested, and so forth.
1680 # never requested, and so forth.
1681 #
1681 #
1682 # Not all tasks are performed with the same frequency, but Redis checks for
1682 # Not all tasks are performed with the same frequency, but Redis checks for
1683 # tasks to perform according to the specified "hz" value.
1683 # tasks to perform according to the specified "hz" value.
1684 #
1684 #
1685 # By default "hz" is set to 10. Raising the value will use more CPU when
1685 # By default "hz" is set to 10. Raising the value will use more CPU when
1686 # Redis is idle, but at the same time will make Redis more responsive when
1686 # Redis is idle, but at the same time will make Redis more responsive when
1687 # there are many keys expiring at the same time, and timeouts may be
1687 # there are many keys expiring at the same time, and timeouts may be
1688 # handled with more precision.
1688 # handled with more precision.
1689 #
1689 #
1690 # The range is between 1 and 500, however a value over 100 is usually not
1690 # The range is between 1 and 500, however a value over 100 is usually not
1691 # a good idea. Most users should use the default of 10 and raise this up to
1691 # a good idea. Most users should use the default of 10 and raise this up to
1692 # 100 only in environments where very low latency is required.
1692 # 100 only in environments where very low latency is required.
1693 hz 10
1693 hz 10
1694
1694
1695 # Normally it is useful to have an HZ value which is proportional to the
1695 # Normally it is useful to have an HZ value which is proportional to the
1696 # number of clients connected. This is useful in order, for instance, to
1696 # number of clients connected. This is useful in order, for instance, to
1697 # avoid too many clients are processed for each background task invocation
1697 # avoid too many clients are processed for each background task invocation
1698 # in order to avoid latency spikes.
1698 # in order to avoid latency spikes.
1699 #
1699 #
1700 # Since the default HZ value by default is conservatively set to 10, Redis
1700 # Since the default HZ value by default is conservatively set to 10, Redis
1701 # offers, and enables by default, the ability to use an adaptive HZ value
1701 # offers, and enables by default, the ability to use an adaptive HZ value
1702 # which will temporarily raise when there are many connected clients.
1702 # which will temporarily raise when there are many connected clients.
1703 #
1703 #
1704 # When dynamic HZ is enabled, the actual configured HZ will be used
1704 # When dynamic HZ is enabled, the actual configured HZ will be used
1705 # as a baseline, but multiples of the configured HZ value will be actually
1705 # as a baseline, but multiples of the configured HZ value will be actually
1706 # used as needed once more clients are connected. In this way an idle
1706 # used as needed once more clients are connected. In this way an idle
1707 # instance will use very little CPU time while a busy instance will be
1707 # instance will use very little CPU time while a busy instance will be
1708 # more responsive.
1708 # more responsive.
1709 dynamic-hz yes
1709 dynamic-hz yes
1710
1710
1711 # When a child rewrites the AOF file, if the following option is enabled
1711 # When a child rewrites the AOF file, if the following option is enabled
1712 # the file will be fsync-ed every 32 MB of data generated. This is useful
1712 # the file will be fsync-ed every 32 MB of data generated. This is useful
1713 # in order to commit the file to the disk more incrementally and avoid
1713 # in order to commit the file to the disk more incrementally and avoid
1714 # big latency spikes.
1714 # big latency spikes.
1715 aof-rewrite-incremental-fsync yes
1715 aof-rewrite-incremental-fsync yes
1716
1716
1717 # When redis saves RDB file, if the following option is enabled
1717 # When redis saves RDB file, if the following option is enabled
1718 # the file will be fsync-ed every 32 MB of data generated. This is useful
1718 # the file will be fsync-ed every 32 MB of data generated. This is useful
1719 # in order to commit the file to the disk more incrementally and avoid
1719 # in order to commit the file to the disk more incrementally and avoid
1720 # big latency spikes.
1720 # big latency spikes.
1721 rdb-save-incremental-fsync yes
1721 rdb-save-incremental-fsync yes
1722
1722
1723 # Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good
1723 # Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good
1724 # idea to start with the default settings and only change them after investigating
1724 # idea to start with the default settings and only change them after investigating
1725 # how to improve the performances and how the keys LFU change over time, which
1725 # how to improve the performances and how the keys LFU change over time, which
1726 # is possible to inspect via the OBJECT FREQ command.
1726 # is possible to inspect via the OBJECT FREQ command.
1727 #
1727 #
1728 # There are two tunable parameters in the Redis LFU implementation: the
1728 # There are two tunable parameters in the Redis LFU implementation: the
1729 # counter logarithm factor and the counter decay time. It is important to
1729 # counter logarithm factor and the counter decay time. It is important to
1730 # understand what the two parameters mean before changing them.
1730 # understand what the two parameters mean before changing them.
1731 #
1731 #
1732 # The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis
1732 # The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis
1733 # uses a probabilistic increment with logarithmic behavior. Given the value
1733 # uses a probabilistic increment with logarithmic behavior. Given the value
1734 # of the old counter, when a key is accessed, the counter is incremented in
1734 # of the old counter, when a key is accessed, the counter is incremented in
1735 # this way:
1735 # this way:
1736 #
1736 #
1737 # 1. A random number R between 0 and 1 is extracted.
1737 # 1. A random number R between 0 and 1 is extracted.
1738 # 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1).
1738 # 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1).
1739 # 3. The counter is incremented only if R < P.
1739 # 3. The counter is incremented only if R < P.
1740 #
1740 #
1741 # The default lfu-log-factor is 10. This is a table of how the frequency
1741 # The default lfu-log-factor is 10. This is a table of how the frequency
1742 # counter changes with a different number of accesses with different
1742 # counter changes with a different number of accesses with different
1743 # logarithmic factors:
1743 # logarithmic factors:
1744 #
1744 #
1745 # +--------+------------+------------+------------+------------+------------+
1745 # +--------+------------+------------+------------+------------+------------+
1746 # | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits |
1746 # | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits |
1747 # +--------+------------+------------+------------+------------+------------+
1747 # +--------+------------+------------+------------+------------+------------+
1748 # | 0 | 104 | 255 | 255 | 255 | 255 |
1748 # | 0 | 104 | 255 | 255 | 255 | 255 |
1749 # +--------+------------+------------+------------+------------+------------+
1749 # +--------+------------+------------+------------+------------+------------+
1750 # | 1 | 18 | 49 | 255 | 255 | 255 |
1750 # | 1 | 18 | 49 | 255 | 255 | 255 |
1751 # +--------+------------+------------+------------+------------+------------+
1751 # +--------+------------+------------+------------+------------+------------+
1752 # | 10 | 10 | 18 | 142 | 255 | 255 |
1752 # | 10 | 10 | 18 | 142 | 255 | 255 |
1753 # +--------+------------+------------+------------+------------+------------+
1753 # +--------+------------+------------+------------+------------+------------+
1754 # | 100 | 8 | 11 | 49 | 143 | 255 |
1754 # | 100 | 8 | 11 | 49 | 143 | 255 |
1755 # +--------+------------+------------+------------+------------+------------+
1755 # +--------+------------+------------+------------+------------+------------+
1756 #
1756 #
1757 # NOTE: The above table was obtained by running the following commands:
1757 # NOTE: The above table was obtained by running the following commands:
1758 #
1758 #
1759 # redis-benchmark -n 1000000 incr foo
1759 # redis-benchmark -n 1000000 incr foo
1760 # redis-cli object freq foo
1760 # redis-cli object freq foo
1761 #
1761 #
1762 # NOTE 2: The counter initial value is 5 in order to give new objects a chance
1762 # NOTE 2: The counter initial value is 5 in order to give new objects a chance
1763 # to accumulate hits.
1763 # to accumulate hits.
1764 #
1764 #
1765 # The counter decay time is the time, in minutes, that must elapse in order
1765 # The counter decay time is the time, in minutes, that must elapse in order
1766 # for the key counter to be divided by two (or decremented if it has a value
1766 # for the key counter to be divided by two (or decremented if it has a value
1767 # less <= 10).
1767 # less <= 10).
1768 #
1768 #
1769 # The default value for the lfu-decay-time is 1. A special value of 0 means to
1769 # The default value for the lfu-decay-time is 1. A special value of 0 means to
1770 # decay the counter every time it happens to be scanned.
1770 # decay the counter every time it happens to be scanned.
1771 #
1771 #
1772 # lfu-log-factor 10
1772 # lfu-log-factor 10
1773 # lfu-decay-time 1
1773 # lfu-decay-time 1
1774
1774
1775 ########################### ACTIVE DEFRAGMENTATION #######################
1775 ########################### ACTIVE DEFRAGMENTATION #######################
1776 #
1776 #
1777 # What is active defragmentation?
1777 # What is active defragmentation?
1778 # -------------------------------
1778 # -------------------------------
1779 #
1779 #
1780 # Active (online) defragmentation allows a Redis server to compact the
1780 # Active (online) defragmentation allows a Redis server to compact the
1781 # spaces left between small allocations and deallocations of data in memory,
1781 # spaces left between small allocations and deallocations of data in memory,
1782 # thus allowing to reclaim back memory.
1782 # thus allowing to reclaim back memory.
1783 #
1783 #
1784 # Fragmentation is a natural process that happens with every allocator (but
1784 # Fragmentation is a natural process that happens with every allocator (but
1785 # less so with Jemalloc, fortunately) and certain workloads. Normally a server
1785 # less so with Jemalloc, fortunately) and certain workloads. Normally a server
1786 # restart is needed in order to lower the fragmentation, or at least to flush
1786 # restart is needed in order to lower the fragmentation, or at least to flush
1787 # away all the data and create it again. However thanks to this feature
1787 # away all the data and create it again. However thanks to this feature
1788 # implemented by Oran Agra for Redis 4.0 this process can happen at runtime
1788 # implemented by Oran Agra for Redis 4.0 this process can happen at runtime
1789 # in a "hot" way, while the server is running.
1789 # in a "hot" way, while the server is running.
1790 #
1790 #
1791 # Basically when the fragmentation is over a certain level (see the
1791 # Basically when the fragmentation is over a certain level (see the
1792 # configuration options below) Redis will start to create new copies of the
1792 # configuration options below) Redis will start to create new copies of the
1793 # values in contiguous memory regions by exploiting certain specific Jemalloc
1793 # values in contiguous memory regions by exploiting certain specific Jemalloc
1794 # features (in order to understand if an allocation is causing fragmentation
1794 # features (in order to understand if an allocation is causing fragmentation
1795 # and to allocate it in a better place), and at the same time, will release the
1795 # and to allocate it in a better place), and at the same time, will release the
1796 # old copies of the data. This process, repeated incrementally for all the keys
1796 # old copies of the data. This process, repeated incrementally for all the keys
1797 # will cause the fragmentation to drop back to normal values.
1797 # will cause the fragmentation to drop back to normal values.
1798 #
1798 #
1799 # Important things to understand:
1799 # Important things to understand:
1800 #
1800 #
1801 # 1. This feature is disabled by default, and only works if you compiled Redis
1801 # 1. This feature is disabled by default, and only works if you compiled Redis
1802 # to use the copy of Jemalloc we ship with the source code of Redis.
1802 # to use the copy of Jemalloc we ship with the source code of Redis.
1803 # This is the default with Linux builds.
1803 # This is the default with Linux builds.
1804 #
1804 #
1805 # 2. You never need to enable this feature if you don't have fragmentation
1805 # 2. You never need to enable this feature if you don't have fragmentation
1806 # issues.
1806 # issues.
1807 #
1807 #
1808 # 3. Once you experience fragmentation, you can enable this feature when
1808 # 3. Once you experience fragmentation, you can enable this feature when
1809 # needed with the command "CONFIG SET activedefrag yes".
1809 # needed with the command "CONFIG SET activedefrag yes".
1810 #
1810 #
1811 # The configuration parameters are able to fine tune the behavior of the
1811 # The configuration parameters are able to fine tune the behavior of the
1812 # defragmentation process. If you are not sure about what they mean it is
1812 # defragmentation process. If you are not sure about what they mean it is
1813 # a good idea to leave the defaults untouched.
1813 # a good idea to leave the defaults untouched.
1814
1814
1815 # Enabled active defragmentation
1815 # Enabled active defragmentation
1816 # activedefrag no
1816 # activedefrag no
1817
1817
1818 # Minimum amount of fragmentation waste to start active defrag
1818 # Minimum amount of fragmentation waste to start active defrag
1819 # active-defrag-ignore-bytes 100mb
1819 # active-defrag-ignore-bytes 100mb
1820
1820
1821 # Minimum percentage of fragmentation to start active defrag
1821 # Minimum percentage of fragmentation to start active defrag
1822 # active-defrag-threshold-lower 10
1822 # active-defrag-threshold-lower 10
1823
1823
1824 # Maximum percentage of fragmentation at which we use maximum effort
1824 # Maximum percentage of fragmentation at which we use maximum effort
1825 # active-defrag-threshold-upper 100
1825 # active-defrag-threshold-upper 100
1826
1826
1827 # Minimal effort for defrag in CPU percentage, to be used when the lower
1827 # Minimal effort for defrag in CPU percentage, to be used when the lower
1828 # threshold is reached
1828 # threshold is reached
1829 # active-defrag-cycle-min 1
1829 # active-defrag-cycle-min 1
1830
1830
1831 # Maximal effort for defrag in CPU percentage, to be used when the upper
1831 # Maximal effort for defrag in CPU percentage, to be used when the upper
1832 # threshold is reached
1832 # threshold is reached
1833 # active-defrag-cycle-max 25
1833 # active-defrag-cycle-max 25
1834
1834
1835 # Maximum number of set/hash/zset/list fields that will be processed from
1835 # Maximum number of set/hash/zset/list fields that will be processed from
1836 # the main dictionary scan
1836 # the main dictionary scan
1837 # active-defrag-max-scan-fields 1000
1837 # active-defrag-max-scan-fields 1000
1838
1838
1839 # Jemalloc background thread for purging will be enabled by default
1839 # Jemalloc background thread for purging will be enabled by default
1840 jemalloc-bg-thread yes
1840 jemalloc-bg-thread yes
1841
1841
1842 # It is possible to pin different threads and processes of Redis to specific
1842 # It is possible to pin different threads and processes of Redis to specific
1843 # CPUs in your system, in order to maximize the performances of the server.
1843 # CPUs in your system, in order to maximize the performances of the server.
1844 # This is useful both in order to pin different Redis threads in different
1844 # This is useful both in order to pin different Redis threads in different
1845 # CPUs, but also in order to make sure that multiple Redis instances running
1845 # CPUs, but also in order to make sure that multiple Redis instances running
1846 # in the same host will be pinned to different CPUs.
1846 # in the same host will be pinned to different CPUs.
1847 #
1847 #
1848 # Normally you can do this using the "taskset" command, however it is also
1848 # Normally you can do this using the "taskset" command, however it is also
1849 # possible to this via Redis configuration directly, both in Linux and FreeBSD.
1849 # possible to this via Redis configuration directly, both in Linux and FreeBSD.
1850 #
1850 #
1851 # You can pin the server/IO threads, bio threads, aof rewrite child process, and
1851 # You can pin the server/IO threads, bio threads, aof rewrite child process, and
1852 # the bgsave child process. The syntax to specify the cpu list is the same as
1852 # the bgsave child process. The syntax to specify the cpu list is the same as
1853 # the taskset command:
1853 # the taskset command:
1854 #
1854 #
1855 # Set redis server/io threads to cpu affinity 0,2,4,6:
1855 # Set redis server/io threads to cpu affinity 0,2,4,6:
1856 # server_cpulist 0-7:2
1856 # server_cpulist 0-7:2
1857 #
1857 #
1858 # Set bio threads to cpu affinity 1,3:
1858 # Set bio threads to cpu affinity 1,3:
1859 # bio_cpulist 1,3
1859 # bio_cpulist 1,3
1860 #
1860 #
1861 # Set aof rewrite child process to cpu affinity 8,9,10,11:
1861 # Set aof rewrite child process to cpu affinity 8,9,10,11:
1862 # aof_rewrite_cpulist 8-11
1862 # aof_rewrite_cpulist 8-11
1863 #
1863 #
1864 # Set bgsave child process to cpu affinity 1,10,11
1864 # Set bgsave child process to cpu affinity 1,10,11
1865 # bgsave_cpulist 1,10-11 No newline at end of file
1865 # bgsave_cpulist 1,10-11
@@ -1,56 +1,52 b''
1 volumes:
1 volumes:
2 nixstore:
2 nixstore:
3
3
4
4
5 services:
5 services:
6
6
7 rhodecode:
7 rhodecode:
8 volumes:
8 volumes:
9 - /media/psf/Home/workspace/rhodecode-vcsserver:/home/rhodecode/rhodecode-vcsserver:delegated
9 - /media/psf/Home/workspace/rhodecode-vcsserver:/home/rhodecode/rhodecode-vcsserver:delegated
10 - /media/psf/Home/workspace/rhodecode-enterprise-ce:/home/rhodecode/rhodecode-enterprise-ce:delegated
10 - /media/psf/Home/workspace/rhodecode-enterprise-ce:/home/rhodecode/rhodecode-enterprise-ce:delegated
11 - /media/psf/Home/workspace/rhodecode-enterprise-ee:/home/rhodecode/rhodecode-enterprise-ee:delegated
11 - /media/psf/Home/workspace/rhodecode-enterprise-ee:/home/rhodecode/rhodecode-enterprise-ee:delegated
12 - /media/psf/Home/workspace:/var/opt/rhodecode_repo_store:delegated
12 - /media/psf/Home/workspace:/var/opt/rhodecode_repo_store:delegated
13 - nixstore:/opt/nix/store
13 - nixstore:/opt/nix/store
14
14
15 ports:
15 ports:
16 - "8080:8080"
16 - "10020:10020"
17
17
18 vcsserver:
18 vcsserver:
19 volumes:
19 volumes:
20 - /media/psf/Home/workspace/rhodecode-vcsserver:/home/rhodecode/rhodecode-vcsserver:delegated
20 - /media/psf/Home/workspace/rhodecode-vcsserver:/home/rhodecode/rhodecode-vcsserver:delegated
21 - /media/psf/Home/workspace/rhodecode-enterprise-ce:/home/rhodecode/rhodecode-enterprise-ce:delegated
21 - /media/psf/Home/workspace/rhodecode-enterprise-ce:/home/rhodecode/rhodecode-enterprise-ce:delegated
22 - /media/psf/Home/workspace/rhodecode-enterprise-ee:/home/rhodecode/rhodecode-enterprise-ee:delegated
22 - /media/psf/Home/workspace/rhodecode-enterprise-ee:/home/rhodecode/rhodecode-enterprise-ee:delegated
23 - /media/psf/Home/workspace:/var/opt/rhodecode_repo_store:delegated
23 - /media/psf/Home/workspace:/var/opt/rhodecode_repo_store:delegated
24 - nixstore:/opt/nix/store
24 - nixstore:/opt/nix/store
25
25
26 celery:
26 celery:
27 volumes:
27 volumes:
28 - /media/psf/Home/workspace/rhodecode-vcsserver:/home/rhodecode/rhodecode-vcsserver:delegated
28 - /media/psf/Home/workspace/rhodecode-vcsserver:/home/rhodecode/rhodecode-vcsserver:delegated
29 - /media/psf/Home/workspace/rhodecode-enterprise-ce:/home/rhodecode/rhodecode-enterprise-ce:delegated
29 - /media/psf/Home/workspace/rhodecode-enterprise-ce:/home/rhodecode/rhodecode-enterprise-ce:delegated
30 - /media/psf/Home/workspace/rhodecode-enterprise-ee:/home/rhodecode/rhodecode-enterprise-ee:delegated
30 - /media/psf/Home/workspace/rhodecode-enterprise-ee:/home/rhodecode/rhodecode-enterprise-ee:delegated
31 - nixstore:/opt/nix/store
31 - nixstore:/opt/nix/store
32
32
33 beat:
33 celery-beat:
34 volumes:
34 volumes:
35 - /media/psf/Home/workspace/rhodecode-vcsserver:/home/rhodecode/rhodecode-vcsserver:delegated
35 - /media/psf/Home/workspace/rhodecode-vcsserver:/home/rhodecode/rhodecode-vcsserver:delegated
36 - /media/psf/Home/workspace/rhodecode-enterprise-ce:/home/rhodecode/rhodecode-enterprise-ce:delegated
36 - /media/psf/Home/workspace/rhodecode-enterprise-ce:/home/rhodecode/rhodecode-enterprise-ce:delegated
37 - /media/psf/Home/workspace/rhodecode-enterprise-ee:/home/rhodecode/rhodecode-enterprise-ee:delegated
37 - /media/psf/Home/workspace/rhodecode-enterprise-ee:/home/rhodecode/rhodecode-enterprise-ee:delegated
38 - nixstore:/opt/nix/store
38 - nixstore:/opt/nix/store
39
39
40 svn:
40 svn:
41 volumes:
41 volumes:
42 - /media/psf/Home/workspace/rhodecode-vcsserver:/home/rhodecode/rhodecode-vcsserver:delegated
42 - /media/psf/Home/workspace/rhodecode-vcsserver:/home/rhodecode/rhodecode-vcsserver:delegated
43 - /media/psf/Home/workspace/rhodecode-enterprise-ce:/home/rhodecode/rhodecode-enterprise-ce:delegated
43 - /media/psf/Home/workspace/rhodecode-enterprise-ce:/home/rhodecode/rhodecode-enterprise-ce:delegated
44 - /media/psf/Home/workspace/rhodecode-enterprise-ee:/home/rhodecode/rhodecode-enterprise-ee:delegated
44 - /media/psf/Home/workspace/rhodecode-enterprise-ee:/home/rhodecode/rhodecode-enterprise-ee:delegated
45 - nixstore:/opt/nix/store
45 - nixstore:/opt/nix/store
46
46
47 sshd:
47 sshd:
48 volumes:
48 volumes:
49 - /media/psf/Home/workspace/rhodecode-vcsserver:/home/rhodecode/rhodecode-vcsserver:delegated
49 - /media/psf/Home/workspace/rhodecode-vcsserver:/home/rhodecode/rhodecode-vcsserver:delegated
50 - /media/psf/Home/workspace/rhodecode-enterprise-ce:/home/rhodecode/rhodecode-enterprise-ce:delegated
50 - /media/psf/Home/workspace/rhodecode-enterprise-ce:/home/rhodecode/rhodecode-enterprise-ce:delegated
51 - /media/psf/Home/workspace/rhodecode-enterprise-ee:/home/rhodecode/rhodecode-enterprise-ee:delegated
51 - /media/psf/Home/workspace/rhodecode-enterprise-ee:/home/rhodecode/rhodecode-enterprise-ee:delegated
52 - nixstore:/opt/nix/store
52 - nixstore:/opt/nix/store
53
54 database:
55 volumes:
56 - /tmp:/usr-tmp
@@ -1,80 +1,64 b''
1 volumes:
1 volumes:
2 bashhistory:
2 bashhistory:
3
3
4
4
5 services:
5 services:
6
6
7 rhodecode:
7 rhodecode:
8 environment:
8 environment:
9 HISTFILE: /home/rhodecode/.bash_history_docker
9 HISTFILE: /home/rhodecode/.bash_history_docker
10 DB_UPGRADE: 0 # run the DB upgrade
10 DB_UPGRADE: 0 # run the DB upgrade
11
11
12 volumes:
12 volumes:
13 - bashhistory:/home/rhodecode
13 - bashhistory:/home/rhodecode/.bash_history_docker
14
14
15 build:
15 build:
16 context: .
16 context: .
17 dockerfile: service/rhodecode/rhodecode_source.dockerfile
17 dockerfile: service/rhodecode/rhodecode_source.dockerfile
18
18
19 image: rhodecode/app_source:${SOURCE_VER:?specify-SOURCE_VER-env-var}
19 image: rhodecode/rhodecode-${RC_EDITION}:${RC_VERSION:?specify-RC_VERSION-env-var}_SOURCE
20
21 command: [
22 "/home/rhodecode/rhodecode-enterprise-ee/result/bin/gunicorn",
23 "--name=gunicorn-rhodecode-1",
24 "--error-logfile=-",
25 "--paster=/etc/rhodecode/conf/compose/rhodecode.optimized.ini",
26 "--config=/etc/rhodecode/conf/gunicorn_conf.py"
27 ]
28
20
29 vcsserver:
21 vcsserver:
30 environment:
22 environment:
31 HISTFILE: /home/rhodecode/.bash_history_docker
23 HISTFILE: /home/rhodecode/.bash_history_docker
32
24
33 volumes:
25 volumes:
34 - bashhistory:/home/rhodecode
26 - bashhistory:/home/rhodecode/.bash_history_docker
35
27
36 build:
28 build:
37 context: .
29 context: .
38 dockerfile: service/rhodecode/rhodecode_source.dockerfile
30 dockerfile: service/rhodecode/rhodecode_source.dockerfile
39
31
40 image: rhodecode/app_source:${SOURCE_VER:?specify-SOURCE_VER-env-var}
32 image: rhodecode/rhodecode-${RC_EDITION}:${RC_VERSION:?specify-RC_VERSION-env-var}_SOURCE
41
42 command: [
43 "/home/rhodecode/rhodecode-vcsserver/result/bin/gunicorn",
44 "--name=gunicorn-vcsserver-1",
45 "--error-logfile=-",
46 "--paster=/etc/rhodecode/conf/compose/vcsserver.optimized.ini",
47 "--config=/etc/rhodecode/conf/gunicorn_conf.py"
48 ]
49
33
50 celery:
34 celery:
51
35
52 build:
36 build:
53 context: .
37 context: .
54 dockerfile: service/rhodecode/rhodecode_source.dockerfile
38 dockerfile: service/rhodecode/rhodecode_source.dockerfile
55
39
56 image: rhodecode/app_source:${SOURCE_VER:?specify-SOURCE_VER-env-var}
40 image: rhodecode/rhodecode-${RC_EDITION}:${RC_VERSION:?specify-RC_VERSION-env-var}_SOURCE
57
41
58 beat:
42 celery-beat:
59
43
60 build:
44 build:
61 context: .
45 context: .
62 dockerfile: service/rhodecode/rhodecode_source.dockerfile
46 dockerfile: service/rhodecode/rhodecode_source.dockerfile
63
47
64 image: rhodecode/app_source:${SOURCE_VER:?specify-SOURCE_VER-env-var}
48 image: rhodecode/rhodecode-${RC_EDITION}:${RC_VERSION:?specify-RC_VERSION-env-var}_SOURCE
65
49
66 svn:
50 svn:
67
51
68 build:
52 build:
69 context: .
53 context: .
70 dockerfile: service/rhodecode/rhodecode_source.dockerfile
54 dockerfile: service/rhodecode/rhodecode_source.dockerfile
71
55
72 image: rhodecode/app_source:${SOURCE_VER:?specify-SOURCE_VER-env-var}
56 image: rhodecode/rhodecode-${RC_EDITION}:${RC_VERSION:?specify-RC_VERSION-env-var}_SOURCE
73
57
74 sshd:
58 sshd:
75
59
76 build:
60 build:
77 context: .
61 context: .
78 dockerfile: service/rhodecode/rhodecode_source.dockerfile
62 dockerfile: service/rhodecode/rhodecode_source.dockerfile
79
63
80 image: rhodecode/app_source:${SOURCE_VER:?specify-SOURCE_VER-env-var} No newline at end of file
64 image: rhodecode/rhodecode-${RC_EDITION}:${RC_VERSION:?specify-RC_VERSION-env-var}_SOURCE No newline at end of file
@@ -1,110 +1,128 b''
1 version: '3.9'
1 version: '3.9'
2
2
3 volumes:
3 volumes:
4 prometheus_data: {}
5 grafana_data: {}
6 logvolume:
7 driver: local
8 driver_opts:
9 type: none
10 o: bind
11 device: $PWD/logs
12
4
13 networks:
5 prometheus_data:
14 rhodecode_network:
6 labels:
15 name: rhodecode_network
7 "keep": 1
16
8
9 grafana_data:
10 labels:
11 "keep": 1
17
12
18 services:
13 services:
19
14
20 statsd-exporter:
15 statsd-exporter:
21 image: prom/statsd-exporter:v0.22.8
16 image: prom/statsd-exporter:v0.22.8
22 restart: unless-stopped
17 restart: always
23 command: [
18 command: [
24 '--statsd.mapping-config=/etc/statsd/statsd.yaml',
19 '--statsd.mapping-config=/etc/statsd/statsd.yaml',
25 '--statsd.listen-udp=:9125',
20 '--statsd.listen-udp=:9125',
26 '--web.listen-address=:9102'
21 '--web.listen-address=:9102'
27 ]
22 ]
28 networks:
23 networks:
29 - rhodecode_network
24 - rhodecode_network
30 ports:
25 # ports:
31 - "127.0.0.1:9125:9125/udp"
26 # - "9125:9125/udp"
32 - "127.0.0.1:9102:9102"
27 # - "9102:9102"
28
33 volumes:
29 volumes:
34 - ./config/statsd-exporter:/etc/statsd
30 - ./config/statsd-exporter:/etc/statsd
31 labels:
32 - "traefik.enable=false"
33 - "traefik.http.services.statsd-exporter.loadbalancer.server.port=9125"
34 - "traefik.http.services.statsd-exporter.loadbalancer.server.port=9102"
35
35
36 node-exporter:
36 node-exporter:
37 image: prom/node-exporter:v1.4.0
37 image: prom/node-exporter:v1.4.0
38 command:
38 command:
39 - "--path.procfs=/host/proc"
39 - "--path.procfs=/host/proc"
40 - "--path.rootfs=/rootfs"
40 - "--path.rootfs=/rootfs"
41 - "--path.sysfs=/host/sys"
41 - "--path.sysfs=/host/sys"
42 - "--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)"
42 - "--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)"
43 - "--collector.systemd"
43 - "--collector.systemd"
44 - "--collector.processes"
44 - "--collector.processes"
45 networks:
45 networks:
46 - rhodecode_network
46 - rhodecode_network
47 pid: host
47 pid: host
48 restart: unless-stopped
48 restart: always
49 ports:
49 # ports:
50 - "127.0.0.1:9100:9100"
50 # - "9100:9100"
51 volumes:
51 volumes:
52 - "/proc:/host/proc:ro"
52 - "/proc:/host/proc:ro"
53 - "/sys:/host/sys:ro"
53 - "/sys:/host/sys:ro"
54 - "/:/rootfs:ro"
54 - "/:/rootfs:ro"
55 labels:
56 - "traefik.enable=false"
57 - "traefik.http.services.node-exporter.loadbalancer.server.port=9100"
55
58
56 prometheus:
59 prometheus:
57 image: prom/prometheus:v2.39.1
60 image: prom/prometheus:v2.40.2
58 restart: unless-stopped
61 restart: always
59 command:
62 command:
60 - "--config.file=/etc/prometheus/prometheus.yml"
63 - "--config.file=/etc/prometheus/prometheus.yml"
61 - "--storage.tsdb.path=/prometheus"
64 - "--storage.tsdb.path=/prometheus"
62 - "--web.console.libraries=/etc/prometheus/console_libraries"
65 - "--web.console.libraries=/etc/prometheus/console_libraries"
63 - "--web.console.templates=/etc/prometheus/consoles"
66 - "--web.console.templates=/etc/prometheus/consoles"
64 - "--storage.tsdb.retention.time=24h"
67 - "--storage.tsdb.retention.time=24h"
65 - "--web.enable-lifecycle"
68 - "--web.enable-lifecycle"
66 ports:
69 # ports:
67 - "127.0.0.1:9090:9090"
70 # - "9090:9090"
68 networks:
71 networks:
69 - rhodecode_network
72 - rhodecode_network
70 volumes:
73 volumes:
71 - ./config/prometheus:/etc/prometheus
74 - ./config/prometheus:/etc/prometheus
72 - prometheus_data:/prometheus
75 - prometheus_data:/prometheus
76 labels:
77 - "traefik.enable=false"
78 - "traefik.http.services.prometheus.loadbalancer.server.port=9090"
73
79
74 grafana:
80 grafana:
75 image: grafana/grafana:9.2.1-ubuntu
81 image: grafana/grafana:9.2.5
76 restart: unless-stopped
82 restart: always
77 env_file:
83 # ports:
78 - ./config/grafana/grafana.env
84 # - "3000:3000"
79 ports:
80 - "4000:3000"
81 volumes:
85 volumes:
82 - grafana_data:/var/lib/grafana
86 - grafana_data:/var/lib/grafana
87 - ./config/grafana:/etc/grafana
83 networks:
88 networks:
84 - rhodecode_network
89 - rhodecode_network
90 labels:
91 - "traefik.enable=true"
92 - "traefik.http.routers.grafana.entrypoints=http"
93 - "traefik.http.routers.grafana.rule=PathPrefix(`/_grafana`)"
94 - "traefik.http.routers.grafana.service=grafana-http"
95 - "traefik.http.services.grafana-http.loadbalancer.server.port=3000"
85
96
86 loki:
97 loki:
87 image: grafana/loki:2.6.1
98 image: grafana/loki:2.7.0
88 restart: unless-stopped
99 restart: always
89 ports:
100 # ports:
90 - "127.0.0.1:3100:3100"
101 # - "3100:3100"
91 - "127.0.0.1:9095:9095"
102 # - "9095:9095"
92 command: [
103 command: [
93 "-config.file=/etc/loki/loki-config.yaml"
104 "-config.file=/etc/loki/loki-config.yaml"
94 ]
105 ]
95 networks:
106 networks:
96 - rhodecode_network
107 - rhodecode_network
97 volumes:
108 volumes:
98 - ./config/loki:/etc/loki
109 - ./config/loki:/etc/loki
99
110
111 labels:
112 - "traefik.enable=false"
113 - "traefik.http.services.loki.loadbalancer.server.port=3100"
114 - "traefik.http.services.loki.loadbalancer.server.port=9095"
115
100 promtail:
116 promtail:
101 image: grafana/promtail:latest
117 image: grafana/promtail:latest
102 command: [
118 command: [
103 "-config.file=/etc/promtail/promtail-config.yaml"
119 "-config.file=/etc/promtail/promtail-config.yaml"
104 ]
120 ]
105 networks:
121 networks:
106 - rhodecode_network
122 - rhodecode_network
107 volumes:
123 volumes:
108 - /var/log:/var/log
124 - /var/log:/var/log
109 - logvolume:/var/log_volume
125 - logvolume:/var/log_volume
110 - ./config/promtail:/etc/promtail
126 - ./config/promtail:/etc/promtail
127 labels:
128 - "traefik.enable=false"
@@ -1,151 +1,152 b''
1 #!/usr/bin/env bash
1 #!/usr/bin/env bash
2 set -Eeo pipefail
2 set -Eeo pipefail
3
3
4 function config_copy() {
4 function config_copy() {
5 # copy over the configs if they don't exist
5 # copy over the configs if they don't exist
6 for f in /etc/rhodecode/conf_build/*; do
6 for f in /etc/rhodecode/conf_build/*; do
7 fname=${f##*/}
7 fname=${f##*/}
8 if [ ! -f "/etc/rhodecode/conf/$fname" ]; then
8 if [ ! -f "/etc/rhodecode/conf/$fname" ]; then
9 echo "$fname not exists copying over as default conf..."
9 echo "$fname not exists copying over as default conf..."
10 cp -v $f /etc/rhodecode/conf/$fname
10 cp -v $f /etc/rhodecode/conf/$fname
11 fi
11 fi
12 done
12 done
13
13
14 }
14 }
15
15
16 function db_upgrade() {
16 function db_upgrade() {
17 echo 'ENTRYPOINT: Upgrading database.'
17 echo 'ENTRYPOINT: Upgrading database.'
18 /var/opt/rhodecode_bin/bin/rc-upgrade-db $MAIN_INI_PATH --force-yes
18 /usr/local/bin/rhodecode_bin/bin/rc-upgrade-db $MAIN_INI_PATH --force-yes
19 }
19 }
20
20
21 function db_init() {
21 function db_init() {
22
22
23 gosu $RC_USER \
23 gosu $RC_USER \
24 /home/$RC_USER/.rccontrol/$RC_TYPE_ID/profile/bin/rc-setup-app \
24 /usr/local/bin/rhodecode_bin/bin/rc-setup-app \
25 $MAIN_INI_PATH \
25 $MAIN_INI_PATH \
26 --force-yes \
26 --force-yes \
27 --skip-existing-db \
27 --skip-existing-db \
28 --user=$RHODECODE_USER_NAME \
28 --user=$RHODECODE_USER_NAME \
29 --password=$RHODECODE_USER_PASS \
29 --password=$RHODECODE_USER_PASS \
30 --email=$RHODECODE_USER_EMAIL \
30 --email=$RHODECODE_USER_EMAIL \
31 --repos=$RHODECODE_REPO_DIR
31 --repos=$RHODECODE_REPO_DIR
32 }
32 }
33
33
34 function rhodecode_setup() {
34 function rhodecode_setup() {
35 for f in /home/$RC_USER/.rccontrol/bootstrap/*.py; do
35 for f in /home/$RC_USER/.rccontrol/bootstrap/*.py; do
36 fname=${f##*/}
36 fname=${f##*/}
37
37
38 echo "Running script $fname on $RC_TYPE_ID"
38 echo "Running script $fname on $RC_TYPE_ID"
39 gosu $RC_USER /home/$RC_USER/.rccontrol/$RC_TYPE_ID/profile/bin/rc-ishell $MAIN_INI_PATH <<< "%run $f"
39 gosu $RC_USER /usr/local/bin/rhodecode_bin/bin/rc-ishell $MAIN_INI_PATH <<< "%run $f"
40
40
41 done
41 done
42 }
42 }
43
43
44 function generate_ssh_keys() {
44 function generate_ssh_keys() {
45 keys_dir=/etc/rhodecode/conf/ssh
45 keys_dir=/etc/rhodecode/conf/ssh
46
46
47 if [[ ! -d $keys_dir ]]; then
47 if [[ ! -d $keys_dir ]]; then
48 echo "Generating $keys_dir/ssh_host_rsa_key ..."
48 echo "Generating $keys_dir/ssh_host_rsa_key ..."
49 gosu "$RC_USER" mkdir -p $keys_dir
49 gosu "$RC_USER" mkdir -p $keys_dir
50 fi
50 fi
51
51
52 # Generate ssh host key for the first time
52 # Generate ssh host key for the first time
53 if [[ ! -f $keys_dir/ssh_host_rsa_key ]]; then
53 if [[ ! -f $keys_dir/ssh_host_rsa_key ]]; then
54 echo "Generating $keys_dir/ssh_host_rsa_key ..."
54 echo "Generating $keys_dir/ssh_host_rsa_key ..."
55 gosu "$RC_USER" ssh-keygen -f $keys_dir/ssh_host_rsa_key -N '' -t rsa
55 gosu "$RC_USER" ssh-keygen -f $keys_dir/ssh_host_rsa_key -N '' -t rsa
56 gosu "$RC_USER" chmod 0600 $keys_dir/ssh_host_rsa_key
56 gosu "$RC_USER" chmod 0600 $keys_dir/ssh_host_rsa_key
57 fi
57 fi
58
58
59 if [[ ! -f $keys_dir/ssh_host_ecdsa_key ]]; then
59 if [[ ! -f $keys_dir/ssh_host_ecdsa_key ]]; then
60 echo "Generating $keys_dir/ssh_host_ecdsa_key ..."
60 echo "Generating $keys_dir/ssh_host_ecdsa_key ..."
61 gosu "$RC_USER" ssh-keygen -f $keys_dir/ssh_host_ecdsa_key -N '' -t ecdsa
61 gosu "$RC_USER" ssh-keygen -f $keys_dir/ssh_host_ecdsa_key -N '' -t ecdsa
62 gosu "$RC_USER" chmod 0600 $keys_dir/ssh_host_ecdsa_key
62 gosu "$RC_USER" chmod 0600 $keys_dir/ssh_host_ecdsa_key
63 fi
63 fi
64
64
65 if [[ ! -f $keys_dir/ssh_host_ed25519_key ]]; then
65 if [[ ! -f $keys_dir/ssh_host_ed25519_key ]]; then
66 echo "Generating $keys_dir/ssh_host_ed25519_key ..."
66 echo "Generating $keys_dir/ssh_host_ed25519_key ..."
67 gosu "$RC_USER" ssh-keygen -f $keys_dir/ssh_host_ed25519_key -N '' -t ed25519
67 gosu "$RC_USER" ssh-keygen -f $keys_dir/ssh_host_ed25519_key -N '' -t ed25519
68 gosu "$RC_USER" chmod 0600 $keys_dir/ssh_host_ed25519_key
68 gosu "$RC_USER" chmod 0600 $keys_dir/ssh_host_ed25519_key
69 fi
69 fi
70
70
71 if [[ ! -f $keys_dir/authorized_keys ]]; then
71 if [[ ! -f $keys_dir/authorized_keys ]]; then
72 echo "Generating $keys_dir/authorized_keys..."
72 echo "Generating $keys_dir/authorized_keys..."
73 gosu "$RC_USER" touch $keys_dir/authorized_keys
73 gosu "$RC_USER" touch $keys_dir/authorized_keys
74 fi
74 fi
75
75
76 sed -i "s/AllowUsers USER/AllowUsers $RC_USER/" $SSHD_CONF_FILE
76 sed -i "s/AllowUsers USER/AllowUsers $RC_USER/" $SSHD_CONF_FILE
77 }
77 }
78
78
79
79
80 echo "ENTRYPOINT: Running $RC_APP_TYPE with cmd '$1'"
80 echo "ENTRYPOINT: Running $RC_APP_TYPE with cmd '$1'"
81
81
82 if [ "$SSH_BOOTSTRAP" = 1 ]; then
82 if [ "$SSH_BOOTSTRAP" = 1 ]; then
83 # generate SSH keys
83 # generate SSH keys
84 generate_ssh_keys
84 generate_ssh_keys
85 fi
85 fi
86
86
87 isLikelyWeb=
87 isLikelyWeb=
88 case "$1" in
88 case "$1" in
89 supervisord | pserve | gunicorn ) isLikelyWeb=1 ;;
89 supervisord | pserve | gunicorn ) isLikelyWeb=1 ;;
90 esac
90 esac
91
91
92 if [[ $RC_APP_TYPE = "rhodecode_http" ]]; then
92 if [[ $RC_APP_TYPE = "rhodecode_http" ]]; then
93
93
94 DB_INIT_FILE=/var/opt/rhodecode_data/.dbinit_bootstrapped
94 DB_INIT_FILE=/var/opt/rhodecode_data/.dbinit_bootstrapped
95 if [ "$FORCE_DB_INIT_FILE" = 1 ]; then
95 if [ "$FORCE_DB_INIT_FILE" = 1 ]; then
96 rm $DB_INIT_FILE
96 rm $DB_INIT_FILE
97 fi
97 fi
98
98
99 # Avoid DB_INIT to run multiple times
99 # Avoid DB_INIT to run multiple times
100 if [[ ! -e $DB_INIT_FILE ]]; then
100 if [[ ! -e $DB_INIT_FILE ]]; then
101 echo "ENTRYPOINT: Starting $RC_APP_TYPE initial db bootstrap"
101 echo "ENTRYPOINT: Starting $RC_APP_TYPE initial db bootstrap"
102
102
103 db_init
103 db_init
104
104
105 gosu $RC_USER touch "$DB_INIT_FILE"
105 gosu $RC_USER touch "$DB_INIT_FILE"
106 echo "ENTRYPOINT: marked as db-bootstrapped at $DB_INIT_FILE"
106 echo "ENTRYPOINT: marked as db-bootstrapped at $DB_INIT_FILE"
107
107
108 fi
108 fi
109
109
110 RC_SETUP_FILE=/var/opt/rhodecode_data/.setup_bootstrapped
110 RC_SETUP_FILE=/var/opt/rhodecode_data/.setup_bootstrapped
111 if [ "$FORCE_RC_SETUP_APP" = 1 ]; then
111 if [ "$FORCE_RC_SETUP_APP" = 1 ]; then
112 rm $RC_SETUP_FILE
112 rm $RC_SETUP_FILE
113 fi
113 fi
114
114
115 # Avoid destroying bootstrapping by simple start/stop
115 # Avoid destroying bootstrapping by simple start/stop
116 if [[ ! -e $RC_SETUP_FILE ]]; then
116 if [[ ! -e $RC_SETUP_FILE ]]; then
117 echo "ENTRYPOINT: Starting $RC_APP_TYPE initial bootstrap"
117 echo "ENTRYPOINT: Starting $RC_APP_TYPE initial bootstrap"
118
118
119 # copy over default configuration files
119 # copy over default configuration files
120 config_copy
120 config_copy
121
121
122 # setup application with specific options
122 # setup application with specific options
123 if [ "$SETUP_APP" = 1 ]; then
123 if [ "$SETUP_APP" = 1 ]; then
124 rhodecode_setup
124 rhodecode_setup
125 fi
125 fi
126
126
127 gosu $RC_USER touch "$RC_SETUP_FILE"
127 gosu $RC_USER touch "$RC_SETUP_FILE"
128 echo "ENTRYPOINT: marked as setup-bootstrapped at $RC_SETUP_FILE"
128 echo "ENTRYPOINT: marked as setup-bootstrapped at $RC_SETUP_FILE"
129
129
130 fi
130 fi
131
131
132 if [ "$DB_UPGRADE" = 1 ]; then
132 if [ "$DB_UPGRADE" = 1 ]; then
133 # run DB migrate
133 # run DB migrate
134 echo "Found DB_UPGRADE flag, running DB upgrade"
134 db_upgrade
135 db_upgrade
135 fi
136 fi
136
137
137 fi
138 fi
138
139
139
140
140 if [ "$RC_APP_PROC" = 1 ]; then
141 if [ "$RC_APP_PROC" = 1 ]; then
141 # Fix problem with zombie processes when using executables like supervisord/gunicorn
142 # Fix problem with zombie processes when using executables like supervisord/gunicorn
142 set -- tini -- "$@"
143 set -- tini -- "$@"
143 set -- gosu $RC_USER "$@"
144 set -- gosu $RC_USER "$@"
144 fi
145 fi
145
146
146 if [ "$RC_APP_TYPE" = "rhodecode_sshd" ]; then
147 if [ "$RC_APP_TYPE" = "rhodecode_sshd" ]; then
147 # Fix problem with Missing privilege separation directory error
148 # Fix problem with Missing privilege separation directory error
148 mkdir -p /run/sshd
149 mkdir -p /run/sshd
149 fi
150 fi
150
151
151 exec "$@"
152 exec "$@"
@@ -1,258 +1,283 b''
1 FROM ubuntu:22.04
1 FROM ubuntu:22.04
2 # Using 22.04 LTS Release
2 # Using 22.04 LTS Release
3 MAINTAINER RhodeCode Inc. <support@rhodecode.com>
3 MAINTAINER RhodeCode Inc. <support@rhodecode.com>
4
4
5 ARG TZ="UTC"
5 ARG TZ="UTC"
6 ARG LOCALE_TYPE=en_US.UTF-8
6 ARG LOCALE_TYPE=en_US.UTF-8
7 ARG RHODECODE_TYPE=Enterprise
7 ARG RHODECODE_TYPE=Enterprise
8 # binary-install
8 # binary-install
9 ARG RHODECODE_VERSION=4.27.1
9 ARG RHODECODE_VERSION=4.28.0
10
10
11 ARG RHODECODE_DB=sqlite
11 ARG RHODECODE_DB=sqlite
12 ARG RHODECODE_USER_NAME=admin
12 ARG RHODECODE_USER_NAME=admin
13 ARG RHODECODE_USER_PASS=secret4
13 ARG RHODECODE_USER_PASS=secret4
14 ARG RHODECODE_USER_EMAIL=support@rhodecode.com
14 ARG RHODECODE_USER_EMAIL=support@rhodecode.com
15
15
16 # nix ver/channels
17 ARG DEV_NIX_VERSION=2.0.4
18 ARG DEV_NIX_CHANNEL=nixos-18.03
19
16 # env are runtime
20 # env are runtime
17 ENV \
21 ENV \
18 TZ=${TZ} \
22 TZ=${TZ} \
19 LOCALE_TYPE=${LOCALE_TYPE} \
23 LOCALE_TYPE=${LOCALE_TYPE} \
20 \
24 \
21 ## Define type we build, and the instance we'll create
25 ## Define type we build, and the instance we'll create
22 RHODECODE_TYPE=${RHODECODE_TYPE} \
26 RHODECODE_TYPE=${RHODECODE_TYPE} \
23 RC_TYPE_ID=enterprise-1 \
27 RC_TYPE_ID=enterprise-1 \
24 \
28 \
25 ## SETUP ARGS FOR INSTALLATION ##
29 ## SETUP ARGS FOR INSTALLATION ##
26 ## set version we build on, get from .env or set default ver
30 ## set version we build on, get from .env or set default ver
27 RHODECODE_VERSION=${RHODECODE_VERSION} \
31 RHODECODE_VERSION=${RHODECODE_VERSION} \
28 \
32 \
29 ## set DB, default sqlite
33 ## set DB, default sqlite
30 RHODECODE_DB=${RHODECODE_DB} \
34 RHODECODE_DB=${RHODECODE_DB} \
31 \
35 \
32 ## set app bootstrap required data
36 ## set app bootstrap required data
33 RHODECODE_USER_NAME=${RHODECODE_USER_NAME} \
37 RHODECODE_USER_NAME=${RHODECODE_USER_NAME} \
34 RHODECODE_USER_PASS=${RHODECODE_USER_PASS} \
38 RHODECODE_USER_PASS=${RHODECODE_USER_PASS} \
35 RHODECODE_USER_EMAIL=${RHODECODE_USER_EMAIL} \
39 RHODECODE_USER_EMAIL=${RHODECODE_USER_EMAIL} \
36 \
40 \
37 RC_USER=rhodecode \
41 RC_USER=rhodecode \
38 \
42 \
39 # SVN CONFIG
43 # SVN CONFIG
40 MOD_DAV_SVN_CONF_FILE=/etc/rhodecode/conf/svn/mod_dav_svn.conf \
44 MOD_DAV_SVN_CONF_FILE=/etc/rhodecode/conf/svn/mod_dav_svn.conf \
41 MOD_DAV_SVN_PORT=8090 \
45 MOD_DAV_SVN_PORT=8090 \
42 \
46 \
43 # SSHD CONFIG
47 # SSHD CONFIG
44 SSHD_CONF_FILE=/etc/rhodecode/sshd_config \
48 SSHD_CONF_FILE=/etc/rhodecode/sshd_config \
45 \
49 \
50 SHARED_CONF_DIR=/etc/rhodecode/conf \
46 BUILD_CONF=/etc/rhodecode/conf_build \
51 BUILD_CONF=/etc/rhodecode/conf_build \
47 BUILD_BIN_DIR=/var/opt/rhodecode_bin \
52 BUILD_BIN_DIR=/usr/local/bin/rhodecode_bin \
48 RHODECODE_DATA_DIR=/var/opt/rhodecode_data \
53 RHODECODE_DATA_DIR=/var/opt/rhodecode_data \
49 RHODECODE_REPO_DIR=/var/opt/rhodecode_repo_store \
54 RHODECODE_REPO_DIR=/var/opt/rhodecode_repo_store \
50 RHODECODE_HTTP_PORT=10020 \
55 RHODECODE_HTTP_PORT=10020 \
51 RHODECODE_VCS_PORT=10010 \
56 RHODECODE_VCS_PORT=10010 \
52 RHODECODE_HOST=0.0.0.0 \
57 RHODECODE_HOST=0.0.0.0 \
53 RHODECODE_VCS_HOST=127.0.0.1
58 RHODECODE_VCS_HOST=127.0.0.1
54
59
55 ENV \
60 ENV \
56 RCCONTROL=/home/$RC_USER/.rccontrol-profile/bin/rccontrol \
61 RCCONTROL=/home/$RC_USER/.rccontrol-profile/bin/rccontrol \
57 SUPERVISOR_CONF=/home/$RC_USER/.rccontrol/supervisor/supervisord.ini \
62 SUPERVISOR_CONF=/home/$RC_USER/.rccontrol/supervisor/supervisord.ini \
58 # make application scripts visible
63 # make application scripts visible
59 PATH=$PATH:/home/$RC_USER/.rccontrol-profile/bin
64 PATH=$PATH:/home/$RC_USER/.rccontrol-profile/bin
60
65
61 ENV SVN_LOCALE_DEPS apache2 apache2-utils libapache2-mod-svn
66 ENV SVN_LOCALE_DEPS apache2 apache2-utils libapache2-mod-svn
62 ENV SSH_LOCALE_DEPS openssh-server
67 ENV SSH_LOCALE_DEPS openssh-server
63 ENV PYTHON_DEPS python2
68 ENV PYTHON_DEPS python2
64 ENV EXTRA_DEPS ""
69 ENV EXTRA_DEPS ""
65
70
66 RUN \
71 RUN \
67 echo "** install base packages **" && \
72 echo "** install base packages **" && \
68 set -eux; \
73 set -eux; \
69 \
74 \
70 savedAptMark="$(apt-mark showmanual)"; \
75 savedAptMark="$(apt-mark showmanual)"; \
71 apt-get update; \
76 apt-get update; \
72 DEBIAN_FRONTEND="noninteractive" \
77 DEBIAN_FRONTEND="noninteractive" \
73 apt-get install -y --no-install-recommends \
78 apt-get install -y --no-install-recommends \
74 tini \
79 tini \
75 bash \
80 bash \
76 binutils \
81 binutils \
77 tzdata \
82 tzdata \
78 locales \
83 locales \
79 openssl \
84 openssl \
80 curl \
85 curl \
81 sudo \
86 sudo \
82 gosu \
87 gosu \
83 bzip2 \
88 bzip2 \
84 ca-certificates \
89 ca-certificates \
85 $PYTHON_DEPS \
90 $PYTHON_DEPS \
86 $SSH_LOCALE_DEPS \
91 $SSH_LOCALE_DEPS \
87 $SVN_LOCALE_DEPS \
92 $SVN_LOCALE_DEPS \
88 $EXTRA_DEPS \
93 $EXTRA_DEPS \
89 ; \
94 ; \
90 rm -rf /var/lib/apt/lists/*;
95 rm -rf /var/lib/apt/lists/*;
91
96
92 RUN \
97 RUN \
93 echo "** Configure the python executable for py2/3 compat **" && \
98 echo "** Configure the python executable for py2/3 compat **" && \
94 ISPY=$(which python3 || which python2) && \
99 IS_PY=$(which python3 || which python2) && \
95 if [ -n $ISPY ] ; then ln -s $ISPY /usr/bin/python ; fi
100 if [ -n $IS_PY ] ; then ln -s $IS_PY /usr/bin/python ; fi
96
101
97 RUN \
102 RUN \
98 echo "** Configure the locales **" && \
103 echo "** Configure the locales **" && \
99 sed -i "s/^# ${LOCALE_TYPE}/${LOCALE_TYPE}/g" /etc/locale.gen && \
104 sed -i "s/^# ${LOCALE_TYPE}/${LOCALE_TYPE}/g" /etc/locale.gen && \
100 locale-gen
105 locale-gen
101
106
102 # locale-archive is a fix for old nix glibc2.26 locales available
107 # locale-archive is a fix for old nix glibc2.26 locales available
103 ENV \
108 ENV \
104 LOCALE_ARCHIVE=/var/opt/locale-archive \
109 LOCALE_ARCHIVE=/var/opt/locale-archive \
105 LANG=${LOCALE_TYPE} \
110 LANG=${LOCALE_TYPE} \
106 LANGUAGE=${LOCALE_TYPE} \
111 LANGUAGE=${LOCALE_TYPE} \
107 LC_ALL=${LOCALE_TYPE}
112 LC_ALL=${LOCALE_TYPE}
108
113
109 # configure the system user
114 # configure the system user
110 # explicitly set uid/gid to guarantee that it won't change in the future
115 # explicitly set uid/gid to guarantee that it won't change in the future
111 # the values 999:999 are identical to the current user/group id assigned
116 # the values 999:999 are identical to the current user/group id assigned
112 RUN \
117 RUN \
113 echo "** Create system user $RC_USER **" && \
118 echo "** Create system user $RC_USER **" && \
114 groupadd --system --gid 999 $RC_USER && \
119 groupadd --system --gid 999 $RC_USER && \
115 useradd --system --gid $RC_USER --uid 999 --shell /bin/bash $RC_USER && \
120 useradd --system --gid $RC_USER --uid 999 --shell /bin/bash $RC_USER && \
116 usermod -G $RC_USER $RC_USER
121 usermod -G $RC_USER $RC_USER
117
122
123 RUN \
124 echo "** Create nix-build user $NIX_BLD_USER **" && \
125 groupadd --system --gid 1099 $NIX_BLD_USER && \
126 useradd --system --gid $NIX_BLD_USER --uid 1099 --shell /bin/bash $NIX_BLD_USER && \
127 usermod -G $NIX_BLD_USER $NIX_BLD_USER
128
129 RUN \
130 echo "** disable nix sandboxing **" && \
131 mkdir /etc/nix && echo 'sandbox = false' > /etc/nix/nix.conf
132
133
118 # set the defult bash shell
134 # set the defult bash shell
119 SHELL ["/bin/bash", "-c"]
135 SHELL ["/bin/bash", "-c"]
120
136
121 # Fix and set a timezone
137 # Fix and set a timezone
122 RUN \
138 RUN \
123 echo "** configure the timezone **" && \
139 echo "** configure the timezone **" && \
124 rm /etc/localtime && cp /usr/share/zoneinfo/$TZ /etc/localtime && \
140 rm /etc/localtime && cp /usr/share/zoneinfo/$TZ /etc/localtime && \
125 echo $TZ > /etc/timezone
141 echo $TZ > /etc/timezone
126
142
127
143
128 RUN \
144 RUN \
129 echo "** prepare rhodecode store and cache **" && \
145 echo "** prepare rhodecode store and cache **" && \
130 install -d -m 0700 -o $RC_USER -g $RC_USER /nix && \
146 install -d -m 0700 -o $RC_USER -g $RC_USER /nix && \
131 install -d -m 0755 -o $RC_USER -g $RC_USER /opt/rhodecode && \
147 install -d -m 0755 -o $RC_USER -g $RC_USER /opt/rhodecode && \
132 install -d -m 0755 -o $RC_USER -g $RC_USER /var/opt/rhodecode_bin && \
148 install -d -m 0755 -o $RC_USER -g $RC_USER /usr/local/bin/rhodecode_bin && \
133 install -d -m 0755 -o $RC_USER -g $RC_USER $RHODECODE_REPO_DIR && \
149 install -d -m 0755 -o $RC_USER -g $RC_USER $RHODECODE_REPO_DIR && \
134 install -d -m 0755 -o $RC_USER -g $RC_USER $RHODECODE_DATA_DIR && \
150 install -d -m 0755 -o $RC_USER -g $RC_USER $RHODECODE_DATA_DIR && \
135 install -d -m 0755 -o $RC_USER -g $RC_USER $BUILD_CONF && \
151 install -d -m 0755 -o $RC_USER -g $RC_USER $BUILD_CONF && \
152 install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/rhodecode-vcsserver && \
153 install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/rhodecode-enterprise-ce && \
154 install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/rhodecode-enterprise-ee && \
136 install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/ && \
155 install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/ && \
137 install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/.rccontrol && \
156 install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/.rccontrol && \
138 install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/.rccontrol/cache && \
157 install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/.rccontrol/cache && \
139 install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/.rccontrol/bootstrap && \
158 install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/.rccontrol/bootstrap && \
140 install -d -m 0700 -o $RC_USER -g $RC_USER /home/$RC_USER/.ssh && \
159 install -d -m 0700 -o $RC_USER -g $RC_USER /home/$RC_USER/.ssh && \
141 install -d -m 0700 -o $RC_USER -g $RC_USER /home/$RC_USER/.rhoderc
160 install -d -m 0700 -o $RC_USER -g $RC_USER /home/$RC_USER/.rhoderc
142
161
143 # expose our custom sshd config
162 # expose our custom sshd config
144 COPY service/sshd/sshd_config $SSHD_CONF_FILE
163 COPY service/sshd/sshd_config $SSHD_CONF_FILE
145
164
146 # Apache SVN setup
165 # Apache SVN setup
147 RUN \
166 RUN \
148 echo "**** Apache config cleanup ****" && \
167 echo "**** Apache config cleanup ****" && \
149 rm -f /etc/apache2/conf.d/info.conf \
168 rm -f /etc/apache2/conf.d/info.conf \
150 /etc/apache2/conf.d/mpm.conf \
169 /etc/apache2/conf.d/mpm.conf \
151 /etc/apache2/conf.d/userdir.conf && \
170 /etc/apache2/conf.d/userdir.conf && \
152 rm -f /etc/apache2/sites-enabled/* && \
171 rm -f /etc/apache2/sites-enabled/* && \
153 rm -f /etc/apache2/sites-available/*
172 rm -f /etc/apache2/sites-available/*
154
173
155 # custom SVN virtualhost
174 # custom SVN virtualhost
156 COPY service/svn/virtualhost.conf /etc/apache2/sites-enabled/
175 COPY service/svn/virtualhost.conf /etc/apache2/sites-enabled/
157
176
158 RUN \
177 RUN \
159 echo "**** Apache config ****" && \
178 echo "**** Apache config ****" && \
160 echo $(strings /usr/lib/apache2/modules/mod_dav_svn.so | grep 'Powered by') > /var/opt/dav.version && \
179 echo $(strings /usr/lib/apache2/modules/mod_dav_svn.so | grep 'Powered by') > /var/opt/dav.version && \
161 mkdir -p /run/apache2 && \
180 mkdir -p /run/apache2 && \
162 mkdir -p /var/opt/www && \
181 mkdir -p /var/opt/www && \
163 echo "unset HOME" > /etc/apache2/envvars && \
182 echo "unset HOME" > /etc/apache2/envvars && \
164 echo "export APACHE_RUN_USER=${RC_USER}" >> /etc/apache2/envvars && \
183 echo "export APACHE_RUN_USER=${RC_USER}" >> /etc/apache2/envvars && \
165 echo "export APACHE_PID_FILE=/var/run/apache2/apache2.pid" >> /etc/apache2/envvars && \
184 echo "export APACHE_PID_FILE=/var/run/apache2/apache2.pid" >> /etc/apache2/envvars && \
166 echo "export APACHE_RUN_DIR=/var/run/apache2" >> /etc/apache2/envvars && \
185 echo "export APACHE_RUN_DIR=/var/run/apache2" >> /etc/apache2/envvars && \
167 echo "export APACHE_LOCK_DIR=/var/lock/apache2" >> /etc/apache2/envvars && \
186 echo "export APACHE_LOCK_DIR=/var/lock/apache2" >> /etc/apache2/envvars && \
168 echo "export APACHE_RUN_USER=${RC_USER}" >> /etc/apache2/envvars && \
187 echo "export APACHE_RUN_USER=${RC_USER}" >> /etc/apache2/envvars && \
169 echo "export APACHE_RUN_GROUP=${RC_USER}" >> /etc/apache2/envvars && \
188 echo "export APACHE_RUN_GROUP=${RC_USER}" >> /etc/apache2/envvars && \
170 sed -i "s/Listen 80/Listen ${MOD_DAV_SVN_PORT}/g" /etc/apache2/ports.conf
189 sed -i "s/Listen 80/Listen ${MOD_DAV_SVN_PORT}/g" /etc/apache2/ports.conf
171
190
172
191
173 # Copy artifacts
192 # Copy artifacts
174
193
175 COPY --chown=$RC_USER:$RC_USER .cache/* /home/$RC_USER/.rccontrol/cache/
194 COPY --chown=$RC_USER:$RC_USER .cache/* /home/$RC_USER/.rccontrol/cache/
176 COPY --chown=$RC_USER:$RC_USER config/compose/rhodecode_enterprise.license /home/$RC_USER/.rccontrol/bootstrap/
195 COPY --chown=$RC_USER:$RC_USER config/_shared/rhodecode_enterprise.license /home/$RC_USER/.rccontrol/bootstrap/
177 COPY --chown=$RC_USER:$RC_USER service/rhodecode/bootstrap/* /home/$RC_USER/.rccontrol/bootstrap/
196 COPY --chown=$RC_USER:$RC_USER service/rhodecode/bootstrap/* /home/$RC_USER/.rccontrol/bootstrap/
178
197
179 RUN \
198 RUN \
180 echo "**** locale-archive path ****" && \
199 echo "**** locale-archive path ****" && \
181 mv -v /home/$RC_USER/.rccontrol/cache/locale-archive /var/opt/locale-archive
200 mv -v /home/$RC_USER/.rccontrol/cache/locale-archive /var/opt/locale-archive
182
201
183 # change to non-root user for RUN commands
202 # change to non-root user for RUN commands
184 USER $RC_USER
203 USER $RC_USER
185 WORKDIR /home/$RC_USER
204 WORKDIR /home/$RC_USER
186
205
187 RUN \
206 RUN \
188 echo "** install rhodecode control **" && \
207 echo "** install rhodecode control **" && \
189 cd /home/$RC_USER/.rccontrol/cache && \
208 cd /home/$RC_USER/.rccontrol/cache && \
190 INSTALLER=$(ls -Art /home/$RC_USER/.rccontrol/cache/RhodeCode-installer-* | tail -n 1) && \
209 INSTALLER=$(ls -Art /home/$RC_USER/.rccontrol/cache/RhodeCode-installer-* | tail -n 1) && \
191 chmod +x ${INSTALLER} && \
210 chmod +x ${INSTALLER} && \
192 ${INSTALLER} --accept-license && \
211 ${INSTALLER} --accept-license && \
193 ${RCCONTROL} self-init && \
212 ${RCCONTROL} self-init && \
194 cp -v /home/$RC_USER/.rccontrol-profile/etc/ca-bundle.crt $BUILD_CONF/ && \
213 cp -v /home/$RC_USER/.rccontrol-profile/etc/ca-bundle.crt $BUILD_CONF/ && \
195 echo "Done"
214 echo "Done installing rhodecode control"
196
215
197 RUN \
216 RUN \
198 echo "** install vcsserver ${RHODECODE_VERSION} **" && \
217 echo "** install vcsserver ${RHODECODE_VERSION} **" && \
199 ${RCCONTROL} install VCSServer --version ${RHODECODE_VERSION} --start-at-boot=yes --accept-license --offline \
218 ${RCCONTROL} install VCSServer --version ${RHODECODE_VERSION} --start-at-boot=yes --accept-license --offline \
200 '{"host":"'"$RHODECODE_VCS_HOST"'", "port":"'"$RHODECODE_VCS_PORT"'"}' && \
219 '{"host":"'"$RHODECODE_VCS_HOST"'", "port":"'"$RHODECODE_VCS_PORT"'"}' && \
201 VCSSERVER_PATH=/home/$RC_USER/.rccontrol/vcsserver-1 && \
220 VCSSERVER_PATH=/home/$RC_USER/.rccontrol/vcsserver-1 && \
202 rm -rf $BUILD_BIN_DIR/vcs_bin && ln -s ${VCSSERVER_PATH}/profile/bin $BUILD_BIN_DIR/vcs_bin && \
221 rm -rf $BUILD_BIN_DIR/vcs_bin && \
203 cp -v ${VCSSERVER_PATH}/vcsserver.ini $BUILD_CONF/vcsserver.ini
222 ln -s ${VCSSERVER_PATH}/profile/bin $BUILD_BIN_DIR/vcs_bin && \
223 cp -v ${VCSSERVER_PATH}/vcsserver.ini $BUILD_CONF/vcsserver.ini && \
224 cp -v ${VCSSERVER_PATH}/gunicorn_conf.py $BUILD_CONF/gunicorn_conf_vcs.py && \
225 echo "Done installing vcsserver"
204
226
205 RUN \
227 RUN \
206 echo "** install rhodecode ${RHODECODE_TYPE} ${RHODECODE_VERSION} **" && \
228 echo "** install rhodecode ${RHODECODE_TYPE} ${RHODECODE_VERSION} **" && \
207 RHODECODE_DB_INIT=sqlite && \
229 RHODECODE_DB_INIT=sqlite && \
208 ${RCCONTROL} install ${RHODECODE_TYPE} --version ${RHODECODE_VERSION} --start-at-boot=yes --accept-license --offline \
230 ${RCCONTROL} install ${RHODECODE_TYPE} --version ${RHODECODE_VERSION} --start-at-boot=yes --accept-license --offline \
209 '{"host":"'"$RHODECODE_HOST"'", "port":"'"$RHODECODE_HTTP_PORT"'", "username":"'"$RHODECODE_USER_NAME"'", "password":"'"$RHODECODE_USER_PASS"'", "email":"'"$RHODECODE_USER_EMAIL"'", "repo_dir":"'"$RHODECODE_REPO_DIR"'", "database": "'"$RHODECODE_DB_INIT"'", "skip_existing_db": "1"}' && \
231 '{"host":"'"$RHODECODE_HOST"'", "port":"'"$RHODECODE_HTTP_PORT"'", "username":"'"$RHODECODE_USER_NAME"'", "password":"'"$RHODECODE_USER_PASS"'", "email":"'"$RHODECODE_USER_EMAIL"'", "repo_dir":"'"$RHODECODE_REPO_DIR"'", "database": "'"$RHODECODE_DB_INIT"'", "skip_existing_db": "1"}' && \
210 RHODECODE_PATH=/home/$RC_USER/.rccontrol/${RC_TYPE_ID} && \
232 RHODECODE_PATH=/home/$RC_USER/.rccontrol/${RC_TYPE_ID} && \
211 rm -rf $BUILD_BIN_DIR/bin && ln -s ${RHODECODE_PATH}/profile/bin $BUILD_BIN_DIR/ && \
233 rm -rf $BUILD_BIN_DIR/bin && ln -s ${RHODECODE_PATH}/profile/bin $BUILD_BIN_DIR/ && \
212 cp -v ${RHODECODE_PATH}/rhodecode.ini $BUILD_CONF/rhodecode.ini && \
234 cp -v ${RHODECODE_PATH}/rhodecode.ini $BUILD_CONF/rhodecode.ini && \
213 cp -v ${RHODECODE_PATH}/gunicorn_conf.py $BUILD_CONF/gunicorn_conf.py && \
235 cp -v ${RHODECODE_PATH}/gunicorn_conf.py $BUILD_CONF/gunicorn_conf.py && \
214 cp -v ${RHODECODE_PATH}/search_mapping.ini $BUILD_CONF/search_mapping.ini && \
236 cp -v ${RHODECODE_PATH}/search_mapping.ini $BUILD_CONF/search_mapping.ini && \
215 mkdir -p $RHODECODE_DATA_DIR/static && cp -r ${RHODECODE_PATH}/public/* $RHODECODE_DATA_DIR/static/ && \
237 mkdir -p $RHODECODE_DATA_DIR/static && cp -r ${RHODECODE_PATH}/public/* $RHODECODE_DATA_DIR/static/ && \
216 rm ${RHODECODE_PATH}/rhodecode.db
238 rm ${RHODECODE_PATH}/rhodecode.db && \
239 echo "Done installing rhodecode"
217
240
218
241
219 RUN \
242 RUN \
220 echo "** configure supervisord **" && \
243 echo "** configure supervisord **" && \
221 cp -v ${SUPERVISOR_CONF} $BUILD_CONF/ && \
244 cp -v ${SUPERVISOR_CONF} $BUILD_CONF/ && \
222 sed -i "s/self_managed_supervisor = False/self_managed_supervisor = True/g" /home/$RC_USER/.rccontrol.ini && \
245 sed -i "s/self_managed_supervisor = False/self_managed_supervisor = True/g" /home/$RC_USER/.rccontrol.ini && \
223 echo "done"
246 echo "Done installing supervisord"
224
247
225 USER root
248 USER root
226
249
227
228 RUN \
250 RUN \
229 echo "**** cleanup ****" && \
251 echo "**** cleanup ****" && \
230 apt-get remove -y $PYTHON_DEPS && \
252 apt-get remove -y $PYTHON_DEPS && \
231 apt-get autoclean -y && \
253 apt-get autoclean -y && \
232 rm -f /tmp/* && \
254 rm -f /tmp/* && \
255 rm -rf /var/lib/apt/lists/* && \
256 rm -rf /var/cache/apk/* && \
233 rm -f /home/$RC_USER/.rccontrol/cache/RhodeCode-installer-* && \
257 rm -f /home/$RC_USER/.rccontrol/cache/RhodeCode-installer-* && \
234 rm -f /home/$RC_USER/.rccontrol/cache/*.bz2 && \
258 rm -f /home/$RC_USER/.rccontrol/cache/*.bz2 && \
235 rm -rf /var/lib/apt/lists/* \
259 rm -f ${SUPERVISOR_CONF} && \
236 rm -rf /var/cache/apk/* \
260 echo "Done cleanup"
237 rm ${SUPERVISOR_CONF}
261
238
262
239 # copy entrypoints
263 # copy entrypoints
240 COPY entrypoints.d/entrypoint.sh /opt/entrypoints.d/entrypoint.sh
264 COPY entrypoints.d/entrypoint.sh /opt/entrypoints.d/entrypoint.sh
265
241 RUN chmod +x /opt/entrypoints.d/entrypoint.sh
266 RUN chmod +x /opt/entrypoints.d/entrypoint.sh
242
267
243 # config volume
268 # config volume
244 VOLUME /etc/rhodecode/conf
269 VOLUME /etc/rhodecode/conf
245
270
246 # repo store volume
271 # repo store volume
247 VOLUME /var/opt/rhodecode_repo_store
272 VOLUME /var/opt/rhodecode_repo_store
248
273
249 # data volume
274 # data volume
250 VOLUME /var/opt/rhodecode_data
275 VOLUME /var/opt/rhodecode_data
251
276
252 # logs volume
277 # logs volume
253 VOLUME /var/log/rhodecode
278 VOLUME /var/log/rhodecode
254
279
255 ENTRYPOINT ["/opt/entrypoints.d/entrypoint.sh"]
280 ENTRYPOINT ["/opt/entrypoints.d/entrypoint.sh"]
256
281
257 # compose can override this
282 # compose can override this
258 CMD ["supervisord", "--nodaemon", "-c", "/etc/rhodecode/conf/supervisord.ini"]
283 CMD [ "$BUILD_BIN_DIR/bin/gunicorn", "--error-logfile=-", "--paster=/etc/rhodecode/conf_build/run.ini", "--config=/etc/rhodecode/conf_build/gunicorn_conf.py" ]
@@ -1,305 +1,324 b''
1 FROM ubuntu:22.04
1 FROM ubuntu:22.04
2 # Using 22.04 LTS Release
2 # Using 22.04 LTS Release
3 MAINTAINER RhodeCode Inc. <support@rhodecode.com>
3 MAINTAINER RhodeCode Inc. <support@rhodecode.com>
4
4
5 ARG TZ="UTC"
5 ARG TZ="UTC"
6 ARG LOCALE_TYPE=en_US.UTF-8
6 ARG LOCALE_TYPE=en_US.UTF-8
7 ARG RHODECODE_TYPE=Enterprise
7 ARG RHODECODE_TYPE=Enterprise
8 # source-install
8 # source-install
9 ARG RHODECODE_VERSION=4.26.0
9 ARG RHODECODE_VERSION=4.28.0
10
10
11 ARG RHODECODE_DB=sqlite
11 ARG RHODECODE_DB=sqlite
12 ARG RHODECODE_USER_NAME=admin
12 ARG RHODECODE_USER_NAME=admin
13 ARG RHODECODE_USER_PASS=secret4
13 ARG RHODECODE_USER_PASS=secret4
14 ARG RHODECODE_USER_EMAIL=support@rhodecode.com
14 ARG RHODECODE_USER_EMAIL=support@rhodecode.com
15
15
16 # nix ver/channels
16 # nix ver/channels
17 ARG DEV_NIX_VERSION=2.0.4
17 ARG DEV_NIX_VERSION=2.0.4
18 ARG DEV_NIX_CHANNEL=nixos-18.03
18 ARG DEV_NIX_CHANNEL=nixos-18.03
19
19
20 # env are runtime
20 # env are runtime
21 ENV \
21 ENV \
22 TZ=${TZ} \
22 TZ=${TZ} \
23 LOCALE_TYPE=${LOCALE_TYPE} \
23 LOCALE_TYPE=${LOCALE_TYPE} \
24 \
24 \
25 ## Define type we build, and the instance we'll create
25 ## Define type we build, and the instance we'll create
26 RHODECODE_TYPE=${RHODECODE_TYPE} \
26 RHODECODE_TYPE=${RHODECODE_TYPE} \
27 RC_TYPE_ID=enterprise-1 \
27 RC_TYPE_ID=enterprise-1 \
28 \
28 \
29 ## SETUP ARGS FOR INSTALLATION ##
29 ## SETUP ARGS FOR INSTALLATION ##
30 ## set version we build on, get from .env or set default ver
30 ## set version we build on, get from .env or set default ver
31 RHODECODE_VERSION=${RHODECODE_VERSION} \
31 RHODECODE_VERSION=${RHODECODE_VERSION} \
32 \
32 \
33 ## set DB, default sqlite
33 ## set DB, default sqlite
34 RHODECODE_DB=${RHODECODE_DB} \
34 RHODECODE_DB=${RHODECODE_DB} \
35 \
35 \
36 ## set app bootstrap required data
36 ## set app bootstrap required data
37 RHODECODE_USER_NAME=${RHODECODE_USER_NAME} \
37 RHODECODE_USER_NAME=${RHODECODE_USER_NAME} \
38 RHODECODE_USER_PASS=${RHODECODE_USER_PASS} \
38 RHODECODE_USER_PASS=${RHODECODE_USER_PASS} \
39 RHODECODE_USER_EMAIL=${RHODECODE_USER_EMAIL} \
39 RHODECODE_USER_EMAIL=${RHODECODE_USER_EMAIL} \
40 \
40 \
41 RC_USER=rhodecode \
41 RC_USER=rhodecode \
42 \
42 \
43 # SVN CONFIG
43 # SVN CONFIG
44 MOD_DAV_SVN_CONF_FILE=/etc/rhodecode/conf/svn/mod_dav_svn.conf \
44 MOD_DAV_SVN_CONF_FILE=/etc/rhodecode/conf/svn/mod_dav_svn.conf \
45 MOD_DAV_SVN_PORT=8090 \
45 MOD_DAV_SVN_PORT=8090 \
46 \
46 \
47 # SSHD CONFIG
47 # SSHD CONFIG
48 SSHD_CONF_FILE=/etc/rhodecode/sshd_config \
48 SSHD_CONF_FILE=/etc/rhodecode/sshd_config \
49 \
49 \
50 SHARED_CONF_DIR=/etc/rhodecode/conf \
50 BUILD_CONF=/etc/rhodecode/conf_build \
51 BUILD_CONF=/etc/rhodecode/conf_build \
51 BUILD_BIN_DIR=/var/opt/rhodecode_bin \
52 BUILD_BIN_DIR=/usr/local/bin/rhodecode_bin \
52 RHODECODE_DATA_DIR=/var/opt/rhodecode_data \
53 RHODECODE_DATA_DIR=/var/opt/rhodecode_data \
53 RHODECODE_REPO_DIR=/var/opt/rhodecode_repo_store \
54 RHODECODE_REPO_DIR=/var/opt/rhodecode_repo_store \
54 RHODECODE_HTTP_PORT=10020 \
55 RHODECODE_HTTP_PORT=10020 \
55 RHODECODE_VCS_PORT=10010 \
56 RHODECODE_VCS_PORT=10010 \
56 RHODECODE_HOST=0.0.0.0 \
57 RHODECODE_HOST=0.0.0.0 \
57 RHODECODE_VCS_HOST=127.0.0.1
58 RHODECODE_VCS_HOST=127.0.0.1
58
59
59 ENV \
60 ENV \
60 RCCONTROL=/home/$RC_USER/.rccontrol-profile/bin/rccontrol \
61 RCCONTROL=/home/$RC_USER/.rccontrol-profile/bin/rccontrol \
61 SUPERVISOR_CONF=/home/$RC_USER/.rccontrol/supervisor/supervisord.ini \
62 SUPERVISOR_CONF=/home/$RC_USER/.rccontrol/supervisor/supervisord.ini \
62 # make application scripts visible
63 # make application scripts visible
63 PATH=$PATH:/home/$RC_USER/.rccontrol-profile/bin
64 PATH=$PATH:/home/$RC_USER/.rccontrol-profile/bin
64
65
65 ENV SVN_LOCALE_DEPS apache2 apache2-utils libapache2-mod-svn
66 ENV SVN_LOCALE_DEPS apache2 apache2-utils libapache2-mod-svn
66 ENV SSH_LOCALE_DEPS openssh-server
67 ENV SSH_LOCALE_DEPS openssh-server
67 ENV PYTHON_DEPS python2
68 ENV PYTHON_DEPS python2
68 ENV EXTRA_DEPS vim
69 ENV EXTRA_DEPS ""
69
70
70 ENV \
71 ENV \
71 PATH=$PATH:/nix/var/nix/profiles/per-user/$RC_USER/profile/bin:/home/$RC_USER/rhodecode-enterprise-ee/profile/bin \
72 PATH=$PATH:/nix/var/nix/profiles/per-user/$RC_USER/profile/bin:/home/$RC_USER/rhodecode-enterprise-ee/profile/bin \
72 NIX_BLD_USER=nixbld \
73 NIX_BLD_USER=nixbld \
73 NIX_PATH=/nix/var/nix/profiles/per-user/$RC_USER/channels \
74 NIX_PATH=/nix/var/nix/profiles/per-user/$RC_USER/channels \
74 NIX_SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt
75 NIX_SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt
75
76
76
77
77 RUN \
78 RUN \
78 echo "** install base packages **" && \
79 echo "** install base packages **" && \
79 set -eux; \
80 set -eux; \
80 \
81 \
81 savedAptMark="$(apt-mark showmanual)"; \
82 savedAptMark="$(apt-mark showmanual)"; \
82 apt-get update; \
83 apt-get update; \
83 DEBIAN_FRONTEND="noninteractive" \
84 DEBIAN_FRONTEND="noninteractive" \
84 apt-get install -y --no-install-recommends \
85 apt-get install -y --no-install-recommends \
85 tini \
86 tini \
86 bash \
87 bash \
87 binutils \
88 binutils \
88 tzdata \
89 tzdata \
89 locales \
90 locales \
90 openssl \
91 openssl \
91 curl \
92 curl \
92 sudo \
93 sudo \
93 gosu \
94 gosu \
94 bzip2 \
95 bzip2 \
95 ca-certificates \
96 ca-certificates \
96 $PYTHON_DEPS \
97 $PYTHON_DEPS \
97 $SSH_LOCALE_DEPS \
98 $SSH_LOCALE_DEPS \
98 $SVN_LOCALE_DEPS \
99 $SVN_LOCALE_DEPS \
99 $EXTRA_DEPS \
100 $EXTRA_DEPS \
100 ; \
101 ; \
101 rm -rf /var/lib/apt/lists/*;
102 rm -rf /var/lib/apt/lists/*;
102
103
103 RUN \
104 RUN \
104 echo "** Configure the python executable for py2/3 compat **" && \
105 echo "** Configure the python executable for py2/3 compat **" && \
105 ISPY=$(which python3 || which python2) && \
106 IS_PY=$(which python3 || which python2) && \
106 if [ -n $ISPY ] ; then ln -s $ISPY /usr/bin/python ; fi
107 if [ -n $IS_PY ] ; then ln -s $IS_PY /usr/bin/python ; fi
107
108
108 RUN \
109 RUN \
109 echo "** Configure the locales **" && \
110 echo "** Configure the locales **" && \
110 sed -i "s/^# ${LOCALE_TYPE}/${LOCALE_TYPE}/g" /etc/locale.gen && \
111 sed -i "s/^# ${LOCALE_TYPE}/${LOCALE_TYPE}/g" /etc/locale.gen && \
111 locale-gen
112 locale-gen
112
113
113 # locale-archive is a fix for old nix glibc2.26 locales available
114 # locale-archive is a fix for old nix glibc2.26 locales available
114 ENV \
115 ENV \
115 LOCALE_ARCHIVE=/var/opt/locale-archive \
116 LOCALE_ARCHIVE=/var/opt/locale-archive \
116 LANG=${LOCALE_TYPE} \
117 LANG=${LOCALE_TYPE} \
117 LANGUAGE=${LOCALE_TYPE} \
118 LANGUAGE=${LOCALE_TYPE} \
118 LC_ALL=${LOCALE_TYPE}
119 LC_ALL=${LOCALE_TYPE}
119
120
120 # configure the system user
121 # configure the system user
121 # explicitly set uid/gid to guarantee that it won't change in the future
122 # explicitly set uid/gid to guarantee that it won't change in the future
122 # the values 999:999 are identical to the current user/group id assigned
123 # the values 999:999 are identical to the current user/group id assigned
123 RUN \
124 RUN \
124 echo "** Create system user $RC_USER **" && \
125 echo "** Create system user $RC_USER **" && \
125 groupadd --system --gid 999 $RC_USER && \
126 groupadd --system --gid 999 $RC_USER && \
126 useradd --system --gid $RC_USER --uid 999 --shell /bin/bash $RC_USER && \
127 useradd --system --gid $RC_USER --uid 999 --shell /bin/bash $RC_USER && \
127 usermod -G $RC_USER $RC_USER
128 usermod -G $RC_USER $RC_USER
128
129
129 RUN \
130 RUN \
130 echo "** Create nix-build user $NIX_BLD_USER **" && \
131 echo "** Create nix-build user $NIX_BLD_USER **" && \
131 groupadd --system --gid 1099 $NIX_BLD_USER && \
132 groupadd --system --gid 1099 $NIX_BLD_USER && \
132 useradd --system --gid $NIX_BLD_USER --uid 1099 --shell /bin/bash $NIX_BLD_USER && \
133 useradd --system --gid $NIX_BLD_USER --uid 1099 --shell /bin/bash $NIX_BLD_USER && \
133 usermod -G $NIX_BLD_USER $NIX_BLD_USER
134 usermod -G $NIX_BLD_USER $NIX_BLD_USER
134
135
135 RUN \
136 RUN \
136 echo "** disable nix sandboxing **" && \
137 echo "** disable nix sandboxing **" && \
137 mkdir /etc/nix && echo 'sandbox = false' > /etc/nix/nix.conf
138 mkdir /etc/nix && echo 'sandbox = false' > /etc/nix/nix.conf
138
139
139
140
140 # set the defult bash shell
141 # set the defult bash shell
141 SHELL ["/bin/bash", "-c"]
142 SHELL ["/bin/bash", "-c"]
142
143
143 # Fix and set a timezone
144 # Fix and set a timezone
144 RUN \
145 RUN \
145 echo "** configure the timezone **" && \
146 echo "** configure the timezone **" && \
146 rm /etc/localtime && cp /usr/share/zoneinfo/$TZ /etc/localtime && \
147 rm /etc/localtime && cp /usr/share/zoneinfo/$TZ /etc/localtime && \
147 echo $TZ > /etc/timezone
148 echo $TZ > /etc/timezone
148
149
149
150
150 RUN \
151 RUN \
151 echo "** prepare rhodecode store and cache **" && \
152 echo "** prepare rhodecode store and cache **" && \
152 install -d -m 0700 -o $RC_USER -g $RC_USER /nix && \
153 install -d -m 0700 -o $RC_USER -g $RC_USER /nix && \
153 install -d -m 0755 -o $RC_USER -g $RC_USER /opt/rhodecode && \
154 install -d -m 0755 -o $RC_USER -g $RC_USER /opt/rhodecode && \
154 install -d -m 0755 -o $RC_USER -g $RC_USER /var/opt/rhodecode_bin && \
155 install -d -m 0755 -o $RC_USER -g $RC_USER /usr/local/bin/rhodecode_bin && \
155 install -d -m 0755 -o $RC_USER -g $RC_USER $RHODECODE_REPO_DIR && \
156 install -d -m 0755 -o $RC_USER -g $RC_USER $RHODECODE_REPO_DIR && \
156 install -d -m 0755 -o $RC_USER -g $RC_USER $RHODECODE_DATA_DIR && \
157 install -d -m 0755 -o $RC_USER -g $RC_USER $RHODECODE_DATA_DIR && \
157 install -d -m 0755 -o $RC_USER -g $RC_USER $BUILD_CONF && \
158 install -d -m 0755 -o $RC_USER -g $RC_USER $BUILD_CONF && \
158 install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/rhodecode-vcsserver && \
159 install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/rhodecode-vcsserver && \
159 install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/rhodecode-enterprise-ce && \
160 install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/rhodecode-enterprise-ce && \
160 install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/rhodecode-enterprise-ee && \
161 install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/rhodecode-enterprise-ee && \
161 install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/ && \
162 install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/ && \
162 install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/.rccontrol && \
163 install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/.rccontrol && \
163 install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/.rccontrol/cache && \
164 install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/.rccontrol/cache && \
164 install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/.rccontrol/bootstrap && \
165 install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/.rccontrol/bootstrap && \
165 install -d -m 0700 -o $RC_USER -g $RC_USER /home/$RC_USER/.ssh
166 install -d -m 0700 -o $RC_USER -g $RC_USER /home/$RC_USER/.ssh && \
167 install -d -m 0700 -o $RC_USER -g $RC_USER /home/$RC_USER/.rhoderc
166
168
167 # expose our custom sshd config
169 # expose our custom sshd config
168 COPY service/sshd/sshd_config $SSHD_CONF_FILE
170 COPY service/sshd/sshd_config $SSHD_CONF_FILE
169
171
170 # Apache SVN setup
172 # Apache SVN setup
171 RUN \
173 RUN \
172 echo "**** Apache config cleanup ****" && \
174 echo "**** Apache config cleanup ****" && \
173 rm -f /etc/apache2/conf.d/info.conf \
175 rm -f /etc/apache2/conf.d/info.conf \
174 /etc/apache2/conf.d/mpm.conf \
176 /etc/apache2/conf.d/mpm.conf \
175 /etc/apache2/conf.d/userdir.conf && \
177 /etc/apache2/conf.d/userdir.conf && \
176 rm -f /etc/apache2/sites-enabled/* && \
178 rm -f /etc/apache2/sites-enabled/* && \
177 rm -f /etc/apache2/sites-available/*
179 rm -f /etc/apache2/sites-available/*
178
180
179 # custom SVN virtualhost
181 # custom SVN virtualhost
180 COPY service/svn/virtualhost.conf /etc/apache2/sites-enabled/
182 COPY service/svn/virtualhost.conf /etc/apache2/sites-enabled/
181
183
182 RUN \
184 RUN \
183 echo "**** Apache config ****" && \
185 echo "**** Apache config ****" && \
184 echo $(strings /usr/lib/apache2/modules/mod_dav_svn.so | grep 'Powered by') > /var/opt/dav.version && \
186 echo $(strings /usr/lib/apache2/modules/mod_dav_svn.so | grep 'Powered by') > /var/opt/dav.version && \
185 mkdir -p /run/apache2 && \
187 mkdir -p /run/apache2 && \
186 mkdir -p /var/opt/www && \
188 mkdir -p /var/opt/www && \
187 echo "unset HOME" > /etc/apache2/envvars && \
189 echo "unset HOME" > /etc/apache2/envvars && \
188 echo "export APACHE_RUN_USER=${RC_USER}" >> /etc/apache2/envvars && \
190 echo "export APACHE_RUN_USER=${RC_USER}" >> /etc/apache2/envvars && \
189 echo "export APACHE_PID_FILE=/var/run/apache2/apache2.pid" >> /etc/apache2/envvars && \
191 echo "export APACHE_PID_FILE=/var/run/apache2/apache2.pid" >> /etc/apache2/envvars && \
190 echo "export APACHE_RUN_DIR=/var/run/apache2" >> /etc/apache2/envvars && \
192 echo "export APACHE_RUN_DIR=/var/run/apache2" >> /etc/apache2/envvars && \
191 echo "export APACHE_LOCK_DIR=/var/lock/apache2" >> /etc/apache2/envvars && \
193 echo "export APACHE_LOCK_DIR=/var/lock/apache2" >> /etc/apache2/envvars && \
192 echo "export APACHE_RUN_USER=${RC_USER}" >> /etc/apache2/envvars && \
194 echo "export APACHE_RUN_USER=${RC_USER}" >> /etc/apache2/envvars && \
193 echo "export APACHE_RUN_GROUP=${RC_USER}" >> /etc/apache2/envvars && \
195 echo "export APACHE_RUN_GROUP=${RC_USER}" >> /etc/apache2/envvars && \
194 sed -i "s/Listen 80/Listen ${MOD_DAV_SVN_PORT}/g" /etc/apache2/ports.conf
196 sed -i "s/Listen 80/Listen ${MOD_DAV_SVN_PORT}/g" /etc/apache2/ports.conf
195
197
196
198
197 # Copy artifacts
199 # Copy artifacts
198 COPY --chown=$RC_USER:$RC_USER .source/ /home/$RC_USER/
200
199 COPY --chown=$RC_USER:$RC_USER .cache/* /home/$RC_USER/.rccontrol/cache/
201 COPY --chown=$RC_USER:$RC_USER .cache/* /home/$RC_USER/.rccontrol/cache/
200 COPY --chown=$RC_USER:$RC_USER config/compose/rhodecode_enterprise.license /home/$RC_USER/.rccontrol/bootstrap/
202 COPY --chown=$RC_USER:$RC_USER config/_shared/rhodecode_enterprise.license /home/$RC_USER/.rccontrol/bootstrap/
201 COPY --chown=$RC_USER:$RC_USER service/rhodecode/bootstrap/* /home/$RC_USER/.rccontrol/bootstrap/
203 COPY --chown=$RC_USER:$RC_USER service/rhodecode/bootstrap/* /home/$RC_USER/.rccontrol/bootstrap/
202
204
205 COPY --chown=$RC_USER:$RC_USER .source/ /home/$RC_USER/
206 RUN \
207 echo "** prepare rhodecode dirs **" && \
208 install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/.rccontrol/vcsserver-1 && \
209 install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/.rccontrol/community-1 && \
210 install -d -m 0755 -o $RC_USER -g $RC_USER /home/$RC_USER/.rccontrol/enterprise-1 && \
211
203 RUN \
212 RUN \
204 echo "**** locale-archive path ****" && \
213 echo "**** locale-archive path ****" && \
205 mv -v /home/$RC_USER/.rccontrol/cache/locale-archive /var/opt/locale-archive
214 mv -v /home/$RC_USER/.rccontrol/cache/locale-archive /var/opt/locale-archive
206
215
207 # change to non-root user for RUN commands
216 # change to non-root user for RUN commands
208 USER $RC_USER
217 USER $RC_USER
209 WORKDIR /home/$RC_USER
218 WORKDIR /home/$RC_USER
210
219
211
220
212 RUN \
221 RUN \
213 echo "** download and install nix ** from $DEV_NIX_VERSION/install" && \
222 echo "** download and install nix ** from $DEV_NIX_VERSION/install" && \
214 curl -L https://releases.nixos.org/nix/nix-$DEV_NIX_VERSION/install | USER=$RC_USER /bin/bash
223 curl -L https://releases.nixos.org/nix/nix-$DEV_NIX_VERSION/install | USER=$RC_USER /bin/bash
215
224
216 RUN \
225 RUN \
217 echo "** update nix package database and set channel to $DEV_NIX_CHANNEL **" && \
226 echo "** update nix package database and set channel to $DEV_NIX_CHANNEL **" && \
218 . /home/rhodecode/.nix-profile/etc/profile.d/nix.sh && \
227 . /home/rhodecode/.nix-profile/etc/profile.d/nix.sh && \
219 nix-channel --add https://channels.nixos.org/$DEV_NIX_CHANNEL nixpkgs && \
228 nix-channel --add https://channels.nixos.org/$DEV_NIX_CHANNEL nixpkgs && \
220 nix-channel --update
229 nix-channel --update
221
230
222
231
223 RUN \
232 RUN \
224 echo "** save nix config **" && \
233 echo "** save nix config **" && \
225 touch /home/$RC_USER/.rhoderc && \
234 touch /home/$RC_USER/.rhoderc && \
226 mkdir -p /home/$RC_USER/.nixpkgs && touch /home/$RC_USER/.nixpkgs/config.nix && \
235 mkdir -p /home/$RC_USER/.nixpkgs && touch /home/$RC_USER/.nixpkgs/config.nix && \
227 printf '{\n rc = {\n sources = {\n rhodecode-vcsserver = "/home/'$RC_USER'/rhodecode-vcsserver";\n rhodecode-enterprise-ce = "/home/'$RC_USER'/rhodecode-enterprise-ce";\n rhodecode-enterprise-ee = "/home/'$RC_USER'/rhodecode-enterprise-ee";\n };\n };\n}\n' > /home/$RC_USER/.nixpkgs/config.nix
236 printf '{\n rc = {\n sources = {\n rhodecode-vcsserver = "/home/'$RC_USER'/rhodecode-vcsserver";\n rhodecode-enterprise-ce = "/home/'$RC_USER'/rhodecode-enterprise-ce";\n rhodecode-enterprise-ee = "/home/'$RC_USER'/rhodecode-enterprise-ee";\n };\n };\n}\n' > /home/$RC_USER/.nixpkgs/config.nix
228
237
229
238
230 RUN \
239 RUN \
231 echo "** install rhodecode control **" && \
240 echo "** install rhodecode control **" && \
232 # cd /home/$RC_USER/.rccontrol/cache && \
241 # cd /home/$RC_USER/.rccontrol/cache && \
233 # INSTALLER=$(ls -Art /home/$RC_USER/.rccontrol/cache/RhodeCode-installer-* | tail -n 1) && \
242 # INSTALLER=$(ls -Art /home/$RC_USER/.rccontrol/cache/RhodeCode-installer-* | tail -n 1) && \
234 # chmod +x ${INSTALLER} && \
243 # chmod +x ${INSTALLER} && \
235 # ${INSTALLER} --accept-license && \
244 # ${INSTALLER} --accept-license && \
236 # ${RCCONTROL} self-init && \
245 # ${RCCONTROL} self-init && \
237 # cp -v /home/$RC_USER/.rccontrol-profile/etc/ca-bundle.crt $BUILD_CONF/ && \
246 # cp -v /home/$RC_USER/.rccontrol-profile/etc/ca-bundle.crt $BUILD_CONF/ && \
238 echo "Done"
247 echo "Done installing rhodecode control"
239
248
240 RUN \
249 RUN \
241 echo "** install build vcsserver ${RHODECODE_VERSION} **" && \
250 echo "** install vcsserver ${RHODECODE_VERSION} **" && \
242 . /home/rhodecode/.nix-profile/etc/profile.d/nix.sh && \
251 . /home/rhodecode/.nix-profile/etc/profile.d/nix.sh && \
243 nix-build --show-trace --cores 0 --max-jobs 4 --no-build-output --out-link rhodecode-vcsserver/result rhodecode-vcsserver/default.nix && \
252 nix-build --show-trace --cores 0 --max-jobs 4 --no-build-output --out-link /home/$RC_USER/.rccontrol/vcsserver-1/profile rhodecode-vcsserver/default.nix && \
244 nix-shell --command 'echo ok' rhodecode-vcsserver/default.nix && \
253 nix-shell --command 'echo COMMAND FROM NIX-SHELL TEST' rhodecode-vcsserver/default.nix && \
245 VCSSERVER_PATH=/home/$RC_USER/rhodecode-vcsserver && \
254 VCSSERVER_PATH=/home/$RC_USER/rhodecode-vcsserver && \
246 rm -rf $BUILD_BIN_DIR/vcs_bin && ln -s ${VCSSERVER_PATH}/result/bin $BUILD_BIN_DIR/vcs_bin && \
255 rm -rf $BUILD_BIN_DIR/vcs_bin && \
247 cp -v ${VCSSERVER_PATH}/configs/production.ini $BUILD_CONF/vcsserver.ini
256 cp -rv --preserve=links /home/$RC_USER/.rccontrol/vcsserver-1/profile/bin $BUILD_BIN_DIR/vcs_bin && \
257 cp -v ${VCSSERVER_PATH}/configs/production.ini $BUILD_CONF/vcsserver.ini && \
258 cp -v ${VCSSERVER_PATH}/configs/gunicorn_config.py $BUILD_CONF/gunicorn_conf_vcs.py && \
259 echo "Done installing vcsserver"
248
260
249 RUN \
261 RUN \
250 echo "** install build Community ${RHODECODE_VERSION} **" && \
262 echo "** install build Community ${RHODECODE_VERSION} **" && \
251 . /home/rhodecode/.nix-profile/etc/profile.d/nix.sh && \
263 . /home/rhodecode/.nix-profile/etc/profile.d/nix.sh && \
252 echo "done"
264 echo "done"
253
265
254 RUN \
266 RUN \
255 echo "** install build Enterprise ${RHODECODE_VERSION} **" && \
267 echo "** install rhodecode ${RHODECODE_VERSION} **" && \
256 . /home/rhodecode/.nix-profile/etc/profile.d/nix.sh && \
268 . /home/rhodecode/.nix-profile/etc/profile.d/nix.sh && \
257 nix-build --show-trace --cores 0 --max-jobs 4 --no-build-output --out-link rhodecode-enterprise-ee/result rhodecode-enterprise-ee/default.nix && \
269 nix-build --show-trace --cores 0 --max-jobs 4 --no-build-output --out-link /home/$RC_USER/.rccontrol/enterprise-1/profile rhodecode-enterprise-ee/default.nix && \
258 nix-shell --command 'echo ok' rhodecode-enterprise-ee/default.nix && \
270 nix-shell --command 'echo ok' rhodecode-enterprise-ee/default.nix && \
259 RHODECODE_PATH=/home/$RC_USER/rhodecode-enterprise-ee && \
271 RHODECODE_PATH=/home/$RC_USER/rhodecode-enterprise-ee && \
260 rm -rf $BUILD_BIN_DIR/bin && ln -s ${RHODECODE_PATH}/result/bin $BUILD_BIN_DIR/ && \
272 rm -rf $BUILD_BIN_DIR/bin && \
273 cp -rv --preserve=links /home/$RC_USER/.rccontrol/enterprise-1/profile/bin $BUILD_BIN_DIR/ && \
261 cp -v ${RHODECODE_PATH}/configs/production.ini $BUILD_CONF/rhodecode.ini && \
274 cp -v ${RHODECODE_PATH}/configs/production.ini $BUILD_CONF/rhodecode.ini && \
262 cp -v ${RHODECODE_PATH}/configs/gunicorn_config.py $BUILD_CONF/gunicorn_conf.py && \
275 cp -v ${RHODECODE_PATH}/configs/gunicorn_config.py $BUILD_CONF/gunicorn_conf.py && \
263 mkdir -p $RHODECODE_DATA_DIR/static && cp -r ${RHODECODE_PATH}/result/etc/static/* $RHODECODE_DATA_DIR/static/
276 mkdir -p $RHODECODE_DATA_DIR/static && cp -r /home/$RC_USER/.rccontrol/enterprise-1/profile/etc/static/* $RHODECODE_DATA_DIR/static/ && \
277 echo "Done installing rhodecode"
264
278
265
279
266 RUN \
280 RUN \
267 echo "** configure supervisord **" && \
281 echo "** configure supervisord **" && \
268 #cp -v ${SUPERVISOR_CONF} $BUILD_CONF/ && \
282 #cp -v ${SUPERVISOR_CONF} $BUILD_CONF/ && \
269 #sed -i "s/self_managed_supervisor = False/self_managed_supervisor = True/g" /home/$RC_USER/.rccontrol.ini && \
283 #sed -i "s/self_managed_supervisor = False/self_managed_supervisor = True/g" /home/$RC_USER/.rccontrol.ini && \
270 echo "done"
284 echo "Done installing supervisord"
271
285
272 USER root
286 USER root
273
287
274
275 RUN \
288 RUN \
276 echo "**** cleanup ****" && \
289 echo "**** cleanup ****" && \
277 apt-get remove -y $PYTHON_DEPS && \
290 apt-get remove -y $PYTHON_DEPS && \
278 apt-get autoclean -y && \
291 apt-get autoclean -y && \
279 rm -f /tmp/* && \
292 rm -f /tmp/* && \
280 rm -f /home/$RC_USER/.rccontrol/cache/RhodeCode-installer-* && \
293 rm -f /home/$RC_USER/.rccontrol/cache/RhodeCode-installer-* && \
281 rm -f /home/$RC_USER/.rccontrol/cache/*.bz2 && \
294 rm -f /home/$RC_USER/.rccontrol/cache/*.bz2 && \
282 rm -rf /var/lib/apt/lists/* \
295 rm -rf /var/lib/apt/lists/* && \
283 rm -rf /var/cache/apk/* \
296 rm -rf /var/cache/apk/* && \
284 rm ${SUPERVISOR_CONF}
297 rm -f ${SUPERVISOR_CONF} && \
298 rm -rf /home/$RC_USER/rhodecode-vcsserver && \
299 rm -rf /home/$RC_USER/rhodecode-enterprise-ce && \
300 rm -rf /home/$RC_USER/rhodecode-enterprise-ee && \
301 echo "Done cleanup"
302
285
303
286 # copy entrypoints
304 # copy entrypoints
287 COPY entrypoints.d/entrypoint.sh /opt/entrypoints.d/entrypoint.sh
305 COPY entrypoints.d/entrypoint.sh /opt/entrypoints.d/entrypoint.sh
306
288 RUN chmod +x /opt/entrypoints.d/entrypoint.sh
307 RUN chmod +x /opt/entrypoints.d/entrypoint.sh
289
308
290 # config volume
309 # config volume
291 VOLUME /etc/rhodecode/conf
310 VOLUME /etc/rhodecode/conf
292
311
293 # repo store volume
312 # repo store volume
294 VOLUME /var/opt/rhodecode_repo_store
313 VOLUME /var/opt/rhodecode_repo_store
295
314
296 # data volume
315 # data volume
297 VOLUME /var/opt/rhodecode_data
316 VOLUME /var/opt/rhodecode_data
298
317
299 # logs volume
318 # logs volume
300 VOLUME /var/log/rhodecode
319 VOLUME /var/log/rhodecode
301
320
302 ENTRYPOINT ["/opt/entrypoints.d/entrypoint.sh"]
321 ENTRYPOINT ["/opt/entrypoints.d/entrypoint.sh"]
303
322
304 # compose can override this
323 # compose can override this
305 CMD ["supervisord", "--nodaemon", "-c", "/etc/rhodecode/conf/supervisord.ini"]
324 CMD [ "$BUILD_BIN_DIR/bin/gunicorn", "--error-logfile=-", "--paster=/etc/rhodecode/conf_build/run.ini", "--config=/etc/rhodecode/conf_build/gunicorn_conf.py" ]
@@ -1,76 +1,77 b''
1 FROM debian:buster
1 FROM ubuntu:22.04
2 # Using 22.04 LTS Release
2 MAINTAINER RhodeCode Inc. <support@rhodecode.com>
3 MAINTAINER RhodeCode Inc. <support@rhodecode.com>
3
4
4 # env are runtime/build
5 # env are runtime/build
5 ENV \
6 ENV \
6 TZ="UTC" \
7 TZ="UTC" \
7 RC_USER=rhodecode \
8 RC_USER=rhodecode \
8 RHODECODE_USER_NAME=rhodecode \
9 RHODECODE_USER_NAME=rhodecode \
9 SSHD_CONF_FILE=/etc/rhodecode/sshd_config
10 SSHD_CONF_FILE=/etc/rhodecode/sshd_config
10
11
11 RUN \
12 RUN \
12 echo "** install base packages **" && \
13 echo "** install base packages **" && \
13 set -eux; \
14 set -eux; \
14 \
15 \
15 savedAptMark="$(apt-mark showmanual)"; \
16 savedAptMark="$(apt-mark showmanual)"; \
16 apt-get update; \
17 apt-get update; \
17 apt-get install -y --no-install-recommends \
18 apt-get install -y --no-install-recommends \
18 bash \
19 bash \
19 tzdata \
20 tzdata \
20 vim \
21 vim \
21 openssl \
22 openssl \
22 openssh-server \
23 openssh-server \
23 ; \
24 ; \
24 rm -rf /var/lib/apt/lists/*;
25 rm -rf /var/lib/apt/lists/*;
25
26
26 # # reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
27 # # reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
27 # apt-mark auto '.*' > /dev/null; \
28 # apt-mark auto '.*' > /dev/null; \
28 # [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; \
29 # [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; \
29 # apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false
30 # apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false
30
31
31 # configure the system user
32 # configure the system user
32 # explicitly set uid/gid to guarantee that it won't change in the future
33 # explicitly set uid/gid to guarantee that it won't change in the future
33 # the values 999:999 are identical to the current user/group id assigned
34 # the values 999:999 are identical to the current user/group id assigned
34 RUN \
35 RUN \
35 echo "** Create system user $RC_USER **" && \
36 echo "** Create system user $RC_USER **" && \
36 groupadd --system --gid 999 $RC_USER && \
37 groupadd --system --gid 999 $RC_USER && \
37 useradd --system --gid $RC_USER --uid 999 --shell /bin/bash $RC_USER
38 useradd --system --gid $RC_USER --uid 999 --shell /bin/bash $RC_USER
38
39
39
40
40 RUN \
41 RUN \
41 echo "** prepare rhodecode store and cache **" && \
42 echo "** prepare rhodecode store and cache **" && \
42 install -d -m 0700 -o $RC_USER -g $RC_USER /home/$RC_USER/.ssh
43 install -d -m 0700 -o $RC_USER -g $RC_USER /home/$RC_USER/.ssh
43
44
44 # set the defult bash shell
45 # set the defult bash shell
45 SHELL ["/bin/bash", "-c"]
46 SHELL ["/bin/bash", "-c"]
46
47
47 # Fix and set a timezone
48 # Fix and set a timezone
48 RUN \
49 RUN \
49 echo "** configure the timezone **" && \
50 echo "** configure the timezone **" && \
50 echo $TZ > /etc/timezone
51 echo $TZ > /etc/timezone
51
52
52 # expose our custom sshd config
53 # expose our custom sshd config
53 COPY service/sshd/sshd_config $SSHD_CONF_FILE
54 COPY service/sshd/sshd_config $SSHD_CONF_FILE
54
55
55 USER root
56 USER root
56
57
57 RUN \
58 RUN \
58 echo "**** cleanup ****" && \
59 echo "**** cleanup ****" && \
59 rm -f /tmp/* && \
60 rm -f /tmp/* && \
60 rm -rf /var/lib/apt/lists/* \
61 rm -rf /var/lib/apt/lists/* \
61 rm -rf /var/cache/apk/*
62 rm -rf /var/cache/apk/*
62
63
63 # copy entrypoints
64 # copy entrypoints
64 COPY entrypoints.d/ssh-entrypoint.sh /opt/entrypoints.d/ssh-entrypoint.sh
65 COPY entrypoints.d/ssh-entrypoint.sh /opt/entrypoints.d/ssh-entrypoint.sh
65 RUN chmod +x /opt/entrypoints.d/ssh-entrypoint.sh
66 RUN chmod +x /opt/entrypoints.d/ssh-entrypoint.sh
66
67
67 # config volume
68 # config volume
68 VOLUME /etc/rhodecode/conf
69 VOLUME /etc/rhodecode/conf
69
70
70 # logs volume
71 # logs volume
71 VOLUME /var/log/rhodecode
72 VOLUME /var/log/rhodecode
72
73
73 ENTRYPOINT ["/opt/entrypoints.d/ssh-entrypoint.sh"]
74 ENTRYPOINT ["/opt/entrypoints.d/ssh-entrypoint.sh"]
74
75
75 # compose can override this
76 # compose can override this
76 CMD ["/usr/sbin/sshd", "-f", "/etc/rhodecode/sshd_config", "-D", "-e"]
77 CMD ["/usr/sbin/sshd", "-f", "/etc/rhodecode/sshd_config", "-D", "-e"]
@@ -1,72 +1,99 b''
1 FROM ubuntu:22.04
1 FROM ubuntu:22.04
2 # Using 22.04 LTS Release
2 # Using 22.04 LTS Release
3 MAINTAINER RhodeCode Inc. <support@rhodecode.com>
3 MAINTAINER RhodeCode Inc. <support@rhodecode.com>
4
4
5
5
6 ENV \
6 ENV \
7 RC_USER=rhodecode \
7 RC_USER=rhodecode \
8 MOD_DAV_SVN_CONF_FILE=/etc/rhodecode/conf/svn/mod_dav_svn.conf
8 # SVN CONFIG
9 MOD_DAV_SVN_CONF_FILE=/etc/rhodecode/conf/svn/mod_dav_svn.conf \
10 MOD_DAV_SVN_PORT=8090
11
12 ENV SVN_LOCALE_DEPS apache2 apache2-utils libapache2-mod-svn subversion
13 #apache2-webdav
9
14
10 RUN \
15 RUN \
11 echo "** install base svn packages **" && \
16 echo "** install svn base packages **" && \
12 apt-get update && \
17 set -eux; \
13 apt-get install --no-cache \
18 \
14 tini \
19 savedAptMark="$(apt-mark showmanual)"; \
15 bash \
20 apt-get update; \
16 curl \
21 DEBIAN_FRONTEND="noninteractive" \
17 apache2 \
22 apt-get install -y --no-install-recommends \
18 apache2-utils \
23 tini \
19 apache2-webdav \
24 bash \
20 mod_dav_svn \
25 binutils \
21 subversion
26 tzdata \
27 locales \
28 openssl \
29 curl \
30 sudo \
31 gosu \
32 bzip2 \
33 ca-certificates \
34 $SVN_LOCALE_DEPS \
35 ; \
36 rm -rf /var/lib/apt/lists/*;
37
22
38
23 # configure the system user
39 # configure the system user
24 # explicitly set uid/gid to guarantee that it won't change in the future
40 # explicitly set uid/gid to guarantee that it won't change in the future
25 # the values 999:999 are identical to the current user/group id assigned
41 # the values 999:999 are identical to the current user/group id assigned
26 RUN \
42 RUN \
27 echo "** Create system user $RC_USER **" && \
43 echo "** Create system user $RC_USER **" && \
28 groupadd --system --gid 999 $RC_USER && \
44 groupadd --system --gid 999 $RC_USER && \
29 useradd --system --gid $RC_USER --uid 999 --shell /bin/bash $RC_USER
45 useradd --system --gid $RC_USER --uid 999 --shell /bin/bash $RC_USER && \
46 usermod -G $RC_USER $RC_USER
47
48 # Apache SVN setup
49 RUN \
50 echo "**** Apache config cleanup ****" && \
51 rm -f /etc/apache2/conf.d/info.conf \
52 /etc/apache2/conf.d/mpm.conf \
53 /etc/apache2/conf.d/userdir.conf && \
54 rm -f /etc/apache2/sites-enabled/* && \
55 rm -f /etc/apache2/sites-available/*
30
56
57 # custom SVN virtualhost
58 COPY service/svn/virtualhost.conf /etc/apache2/sites-enabled/
59
60 RUN \
61 echo "**** Apache config ****" && \
62 echo $(strings /usr/lib/apache2/modules/mod_dav_svn.so | grep 'Powered by') > /var/opt/dav.version && \
63 mkdir -p /run/apache2 && \
64 mkdir -p /var/opt/www && \
65 echo "unset HOME" > /etc/apache2/envvars && \
66 echo "export APACHE_RUN_USER=${RC_USER}" >> /etc/apache2/envvars && \
67 echo "export APACHE_PID_FILE=/var/run/apache2/apache2.pid" >> /etc/apache2/envvars && \
68 echo "export APACHE_RUN_DIR=/var/run/apache2" >> /etc/apache2/envvars && \
69 echo "export APACHE_LOCK_DIR=/var/lock/apache2" >> /etc/apache2/envvars && \
70 echo "export APACHE_RUN_USER=${RC_USER}" >> /etc/apache2/envvars && \
71 echo "export APACHE_RUN_GROUP=${RC_USER}" >> /etc/apache2/envvars && \
72 sed -i "s/Listen 80/Listen ${MOD_DAV_SVN_PORT}/g" /etc/apache2/ports.conf
73
74 USER root
31
75
32 RUN \
76 RUN \
33 echo "**** cleanup ****" && \
77 echo "**** cleanup ****" && \
34 apt-get purge tzdata python2 && \
78 apt-get autoclean -y && \
35 rm -f /tmp/* && \
79 rm -f /tmp/* && \
36 rm -rf /var/lib/apt/lists/* \
80 rm -rf /var/lib/apt/lists/* \
37 rm -rf /var/cache/apk/*
81 rm -rf /var/cache/apk/*
38
82
39 RUN \
40 echo "**** Apache config cleanup ****" && \
41 rm -f /etc/apache2/conf.d/info.conf \
42 /etc/apache2/conf.d/mpm.conf \
43 /etc/apache2/conf.d/userdir.conf
44
45
46 COPY svn/virtualhost.conf /etc/apache2/conf.d/
47
83
48 # copy entrypoints
84 # copy entrypoints
49 COPY entrypoints.d/svn-entrypoint.sh /opt/entrypoints.d/svn-entrypoint.sh
85 COPY entrypoints.d/svn-entrypoint.sh /opt/entrypoints.d/svn-entrypoint.sh
50 RUN chmod +x /opt/entrypoints.d/svn-entrypoint.sh
86 RUN chmod +x /opt/entrypoints.d/svn-entrypoint.sh
51
87
52 RUN \
88 # config volume
53 echo $(strings /usr/lib/apache2/mod_dav_svn.so | grep 'Powered by') > /var/opt/dav.version && \
89 VOLUME /etc/rhodecode/conf
54 mkdir -p /run/apache2 && \
55 mkdir -p /var/opt/www && \
56 echo "export APACHE_RUN_USER=${RC_USER}" > /etc/apache2/envvars && \
57 echo "export APACHE_RUN_GROUP=${RC_USER}" >> /etc/apache2/envvars && \
58 sed -i "s/User apache/User ${RC_USER}/g" /etc/apache2/httpd.conf && \
59 sed -i "s/Group apache/Group ${RC_USER}/g" /etc/apache2/httpd.conf
60
90
61 # repo store volume
91 # repo store volume
62 VOLUME /var/opt/rhodecode_repo_store
92 VOLUME /var/opt/rhodecode_repo_store
63
93
64 # config volume
65 VOLUME /etc/rhodecode/conf
66
67 # logs volume
94 # logs volume
68 VOLUME /var/log/rhodecode
95 VOLUME /var/log/rhodecode
69
96
70 ENTRYPOINT ["/opt/entrypoints.d/svn-entrypoint.sh"]
97 ENTRYPOINT ["/opt/entrypoints.d/svn-entrypoint.sh"]
71
98
72 CMD ["apachectl", "-D", "FOREGROUND"]
99 CMD ["apachectl", "-D", "FOREGROUND"] No newline at end of file
@@ -1,22 +1,33 b''
1 LoadModule headers_module /usr/lib/apache2/modules/mod_headers.so
1 LoadModule headers_module /usr/lib/apache2/modules/mod_headers.so
2 LoadModule authn_anon_module /usr/lib/apache2/modules/mod_authn_anon.so
2 LoadModule authn_anon_module /usr/lib/apache2/modules/mod_authn_anon.so
3 LoadModule dav_svn_module /usr/lib/apache2/modules/mod_dav_svn.so
3 LoadModule dav_svn_module /usr/lib/apache2/modules/mod_dav_svn.so
4
4
5 ErrorLogFormat "{ \"time\":\"%{%Y-%m-%d}tT%{%T}t.%{msec_frac}tZ\", \"function\" : \"[%-m:%l]\" , \"process\" : \"[pid %P:tid %T]\" , \"message\" : \"%M\" ,\ \"referer\"\ : \"%{Referer}i\" }"
6 LogFormat "{ \"time\":\"%{%Y-%m-%d}tT%{%T}t.%{msec_frac}tZ\", \"process\":\"%D\", \"filename\":\"%f\", \"remoteIP\":\"%a\", \"host\":\"%V\", \"request\":\"%U\", \"query\":\"%q\", \"method\":\"%m\", \"status\":\"%>s\", \"userAgent\":\"%{User-agent}i\", \"referer\":\"%{Referer}i\" }" combined
7
8 ErrorLog /dev/stderr
9 CustomLog /dev/stdout combined
10
11
5 <VirtualHost *:${MOD_DAV_SVN_PORT}>
12 <VirtualHost *:${MOD_DAV_SVN_PORT}>
6 ServerAdmin admin@localhost
13 ServerAdmin admin@localhost
7 DocumentRoot /var/opt/www
14 DocumentRoot /var/opt/www
8 ErrorLog ${APACHE_LOG_DIR}/svn_error.log
15
9 CustomLog ${APACHE_LOG_DIR}/svn_access.log combined
16 ErrorLogFormat "{ \"time\":\"%{%Y-%m-%d}tT%{%T}t.%{msec_frac}tZ\", \"function\" : \"[%-m:%l]\" , \"process\" : \"[pid %P:tid %T]\" , \"message\" : \"%M\" ,\ \"referer\"\ : \"%{Referer}i\" }"
17 LogFormat "{ \"time\":\"%{%Y-%m-%d}tT%{%T}t.%{msec_frac}tZ\", \"process\":\"%D\", \"filename\":\"%f\", \"remoteIP\":\"%a\", \"host\":\"%V\", \"request\":\"%U\", \"query\":\"%q\", \"method\":\"%m\", \"status\":\"%>s\", \"userAgent\":\"%{User-agent}i\", \"referer\":\"%{Referer}i\" }" combined
18
19 ErrorLog /dev/stderr
20 CustomLog /dev/stdout combined
10 LogLevel info
21 LogLevel info
11
22
12 <Location /_server_status>
23 <Location /_server_status>
13 Require ip 127
24 Require ip 127
14 SetHandler server-status
25 SetHandler server-status
15 Require all granted
26 Require all granted
16 </Location>
27 </Location>
17
28
18 # allows custom host names, prevents 400 errors on checkout
29 # allows custom host names, prevents 400 errors on checkout
19 HttpProtocolOptions Unsafe
30 HttpProtocolOptions Unsafe
20
31
21 Include ${MOD_DAV_SVN_CONF_FILE}
32 Include ${MOD_DAV_SVN_CONF_FILE}
22 </VirtualHost> No newline at end of file
33 </VirtualHost>
1 NO CONTENT: file was removed
NO CONTENT: file was removed
1 NO CONTENT: file was removed
NO CONTENT: file was removed
1 NO CONTENT: file was removed
NO CONTENT: file was removed
1 NO CONTENT: file was removed
NO CONTENT: file was removed
This diff has been collapsed as it changes many lines, (746 lines changed) Show them Hide them
1 NO CONTENT: file was removed
NO CONTENT: file was removed
1 NO CONTENT: file was removed
NO CONTENT: file was removed
1 NO CONTENT: file was removed
NO CONTENT: file was removed
1 NO CONTENT: file was removed
NO CONTENT: file was removed
1 NO CONTENT: file was removed
NO CONTENT: file was removed
1 NO CONTENT: file was removed
NO CONTENT: file was removed
1 NO CONTENT: file was removed
NO CONTENT: file was removed
General Comments 0
You need to be logged in to leave comments. Login now