##// END OF EJS Templates
docs: added expire static files nginx directive for better caching
marcink -
r4061:84a1ac96 default
parent child Browse files
Show More
@@ -1,192 +1,193 b''
1 1 Nginx Configuration Example
2 2 ---------------------------
3 3
4 4 Use the following example to configure Nginx as a your web server.
5 5
6 6
7 7 .. code-block:: nginx
8 8
9 9 ## Rate limiter for certain pages to prevent brute force attacks
10 10 limit_req_zone $binary_remote_addr zone=req_limit:10m rate=1r/s;
11 11
12 12 ## cache zone
13 13 proxy_cache_path /etc/nginx/nginx_cache levels=1:2 use_temp_path=off keys_zone=cache_zone:10m inactive=720h max_size=10g;
14 14
15 15 ## Custom log format
16 16 log_format log_custom '$remote_addr - $remote_user [$time_local] '
17 17 '"$request" $status $body_bytes_sent '
18 18 '"$http_referer" "$http_user_agent" '
19 19 '$request_time $upstream_response_time $pipe';
20 20
21 21 ## Define one or more upstreams (local RhodeCode instance) to connect to
22 22 upstream rc {
23 23 # Url to running RhodeCode instance.
24 24 # This is shown as `- URL: <host>` in output from rccontrol status.
25 25 server 127.0.0.1:10002;
26 26
27 27 # add more instances for load balancing
28 28 # server 127.0.0.1:10003;
29 29 # server 127.0.0.1:10004;
30 30 }
31 31
32 32 ## HTTP to HTTPS rewrite
33 33 server {
34 34 listen 80;
35 35 server_name rhodecode.myserver.com;
36 36
37 37 if ($http_host = rhodecode.myserver.com) {
38 38 rewrite (.*) https://rhodecode.myserver.com$1 permanent;
39 39 }
40 40 }
41 41
42 42 ## Optional gist alias server, for serving nicer GIST urls.
43 43 server {
44 44 listen 443;
45 45 server_name gist.myserver.com;
46 46 access_log /var/log/nginx/gist.access.log log_custom;
47 47 error_log /var/log/nginx/gist.error.log;
48 48
49 49 ssl on;
50 50 ssl_certificate gist.rhodecode.myserver.com.crt;
51 51 ssl_certificate_key gist.rhodecode.myserver.com.key;
52 52
53 53 ssl_session_timeout 5m;
54 54
55 55 ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
56 56 ssl_prefer_server_ciphers on;
57 57 ssl_ciphers 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA';
58 58
59 59 ## Strict http prevents from https -> http downgrade
60 60 add_header Strict-Transport-Security "max-age=31536000; includeSubdomains;";
61 61
62 62 ## Diffie-Hellman parameter for DHE ciphersuites, recommended 2048 bits
63 63 #ssl_dhparam /etc/nginx/ssl/dhparam.pem;
64 64
65 65 rewrite ^/(.+)$ https://rhodecode.myserver.com/_admin/gists/$1;
66 66 rewrite (.*) https://rhodecode.myserver.com/_admin/gists;
67 67 }
68 68
69 69
70 70 ## MAIN SSL enabled server
71 71 server {
72 72 listen 443 ssl http2;
73 73 server_name rhodecode.myserver.com;
74 74
75 75 access_log /var/log/nginx/rhodecode.access.log log_custom;
76 76 error_log /var/log/nginx/rhodecode.error.log;
77 77
78 78 ssl_certificate rhodecode.myserver.com.crt;
79 79 ssl_certificate_key rhodecode.myserver.com.key;
80 80
81 81 # enable session resumption to improve https performance
82 82 # http://vincent.bernat.im/en/blog/2011-ssl-session-reuse-rfc5077.html
83 83 ssl_session_cache shared:SSL:50m;
84 84 ssl_session_timeout 5m;
85 85
86 86 ## Diffie-Hellman parameter for DHE ciphersuites, recommended 2048 bits
87 87 #ssl_dhparam /etc/nginx/ssl/dhparam.pem;
88 88
89 89 # enables server-side protection from BEAST attacks
90 90 # http://blog.ivanristic.com/2013/09/is-beast-still-a-threat.html
91 91 ssl_prefer_server_ciphers on;
92 92
93 93 # disable SSLv3(enabled by default since nginx 0.8.19) since it's less secure then TLS http://en.wikipedia.org/wiki/Secure_Sockets_Layer#SSL_3.0
94 94 ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
95 95
96 96 # ciphers chosen for forward secrecy and compatibility
97 97 # http://blog.ivanristic.com/2013/08/configuring-apache-nginx-and-openssl-for-forward-secrecy.html
98 98 ssl_ciphers "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES256-GCM-SHA384:AES128-GCM-SHA256:AES256-SHA256:AES128-SHA256:AES256-SHA:AES128-SHA:DES-CBC3-SHA:HIGH:!aNULL:!eNULL:!EXPORT:!DES:!MD5:!PSK:!RC4";
99 99
100 100 client_body_buffer_size 128k;
101 101 # maximum number and size of buffers for large headers to read from client request
102 102 large_client_header_buffers 16 256k;
103 103
104 104 ## uncomment to serve static files by Nginx, recommended for performance
105 105 # location /_static/rhodecode {
106 106 # gzip on;
107 107 # gzip_min_length 500;
108 108 # gzip_proxied any;
109 109 # gzip_comp_level 4;
110 110 # gzip_types text/css text/javascript text/xml text/plain text/x-component application/javascript application/json application/xml application/rss+xml font/truetype font/opentype application/vnd.ms-fontobject image/svg+xml;
111 111 # gzip_vary on;
112 112 # gzip_disable "msie6";
113 # expires 60d;
113 114 # alias /path/to/.rccontrol/community-1/static;
114 115 # alias /path/to/.rccontrol/enterprise-1/static;
115 116 # }
116 117
117 118 ## channelstream location handler, if channelstream live chat and notifications
118 119 ## are enable this will proxy the requests to channelstream websocket server
119 120 location /_channelstream {
120 121 rewrite /_channelstream/(.*) /$1 break;
121 122 gzip off;
122 123 tcp_nodelay off;
123 124
124 125 proxy_connect_timeout 10;
125 126 proxy_send_timeout 10m;
126 127 proxy_read_timeout 10m;
127 128
128 129 proxy_set_header Host $host;
129 130 proxy_set_header X-Real-IP $remote_addr;
130 131 proxy_set_header X-Url-Scheme $scheme;
131 132 proxy_set_header X-Forwarded-Proto $scheme;
132 133 proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
133 134
134 135 proxy_http_version 1.1;
135 136 proxy_set_header Upgrade $http_upgrade;
136 137 proxy_set_header Connection "upgrade";
137 138
138 139 proxy_pass http://127.0.0.1:9800;
139 140 }
140 141
141 142 ## rate limit this endpoint to prevent login page brute-force attacks
142 143 location /_admin/login {
143 144 limit_req zone=req_limit burst=10 nodelay;
144 145 try_files $uri @rhodecode_http;
145 146 }
146 147
147 148 ## Special Cache for file store, make sure you enable this intentionally as
148 149 ## it could bypass upload files permissions
149 150 # location /_file_store/download {
150 151 #
151 152 # proxy_cache cache_zone;
152 153 # # ignore Set-Cookie
153 154 # proxy_ignore_headers Set-Cookie;
154 155 # proxy_ignore_headers Cookie;
155 156 #
156 157 # proxy_cache_key $host$uri$is_args$args;
157 158 # proxy_cache_methods GET;
158 159 #
159 160 # proxy_cache_bypass $http_cache_control;
160 161 # proxy_cache_valid 200 302 720h;
161 162 #
162 163 # proxy_cache_use_stale error timeout http_500 http_502 http_503 http_504;
163 164 #
164 165 # # returns cache status in headers
165 166 # add_header X-Proxy-Cache $upstream_cache_status;
166 167 # add_header Cache-Control "public";
167 168 #
168 169 # proxy_cache_lock on;
169 170 # proxy_cache_lock_age 5m;
170 171 #
171 172 # proxy_pass http://rc;
172 173 #
173 174 # }
174 175
175 176 location / {
176 177 try_files $uri @rhodecode_http;
177 178 }
178 179
179 180 location @rhodecode_http {
180 181 # example of proxy.conf can be found in our docs.
181 182 include /etc/nginx/proxy.conf;
182 183 proxy_pass http://rc;
183 184 }
184 185
185 186 ## Custom 502 error page.
186 187 ## Will be displayed while RhodeCode server is turned off
187 188 error_page 502 /502.html;
188 189 location = /502.html {
189 190 #root /path/to/.rccontrol/community-1/static;
190 191 root /path/to/.rccontrol/enterprise-1/static;
191 192 }
192 193 } No newline at end of file
@@ -1,395 +1,396 b''
1 1 .. _scale-horizontal-cluster:
2 2
3 3
4 4 Scale Horizontally / RhodeCode Cluster
5 5 --------------------------------------
6 6
7 7 |RCE| is built in a way it support horizontal scaling across multiple machines.
8 8 There are three main pre-requisites for that:
9 9
10 10 - Shared storage that each machine can access. Using NFS or other shared storage system.
11 11 - Shared DB connection across machines. Using `MySQL`/`PostgreSQL` that each node can access.
12 12 - |RCE| user sessions and caches need to use a shared storage (e.g `Redis`_/`Memcached`)
13 13
14 14
15 15 Horizontal scaling means adding more machines or workers into your pool of
16 16 resources. Horizontally scaling |RCE| gives a huge performance increase,
17 17 especially under large traffic scenarios with a high number of requests.
18 18 This is very beneficial when |RCE| is serving many users simultaneously,
19 19 or if continuous integration servers are automatically pulling and pushing code.
20 20 It also adds High-Availability to your running system.
21 21
22 22
23 23 Cluster Overview
24 24 ^^^^^^^^^^^^^^^^
25 25
26 26 Below we'll present a configuration example that will use two separate nodes to serve
27 27 |RCE| in a load-balanced environment. The 3rd node will act as a shared storage/cache
28 28 and handle load-balancing. In addition 3rd node will be used as shared database instance.
29 29
30 30 This setup can be used both in Docker based configuration or with individual
31 31 physical/virtual machines. Using the 3rd node for Storage/Redis/PostgreSQL/Nginx is
32 32 optional. All those components can be installed on one of the two nodes used for |RCE|.
33 33 We'll use following naming for our nodes:
34 34
35 35 - `rc-node-1` (NFS, DB, Cache node)
36 36 - `rc-node-2` (Worker node1)
37 37 - `rc-node-3` (Worker node2)
38 38
39 39 Our shares NFS storage in the example is located on `/home/rcdev/storage` and
40 40 it's RW accessible on **each** node.
41 41
42 42 In this example we used certain recommended components, however many
43 43 of those can be replaced by other, in case your organization already uses them, for example:
44 44
45 45 - `MySQL`/`PostgreSQL`: Aren't replaceable and are the two only supported databases.
46 46 - `Nginx`_ on `rc-node-1` can be replaced by: `Hardware Load Balancer (F5)`, `Apache`_, `HA-Proxy` etc.
47 47 - `Nginx`_ on rc-node-2/3 acts as a reverse proxy and can be replaced by other HTTP server
48 48 acting as reverse proxy such as `Apache`_.
49 49 - `Redis`_ on `rc-node-1` can be replaced by: `Memcached`
50 50
51 51
52 52 Here's an overview what components should be installed/setup on each server in our example:
53 53
54 54 - **rc-node-1**:
55 55
56 56 - main storage acting as NFS host.
57 57 - `nginx` acting as a load-balancer.
58 58 - `postgresql-server` used for database and sessions.
59 59 - `redis-server` used for storing shared caches.
60 60 - optionally `rabbitmq-server` or `redis` for `Celery` if used.
61 61 - optionally if `Celery` is used Enterprise/Community instance + VCSServer.
62 62 - optionally mailserver that can be shared by other instances.
63 63 - optionally channelstream server to handle live communication for all instances.
64 64
65 65
66 66 - **rc-node-2/3**:
67 67
68 68 - `nginx` acting as a reverse proxy to handle requests to |RCE|.
69 69 - 1x RhodeCode Enterprise/Community instance.
70 70 - 1x VCSServer instance.
71 71 - optionally for testing connection: postgresql-client, redis-client (redis-tools).
72 72
73 73
74 74 Before we start here are few assumptions that should be fulfilled:
75 75
76 76 - make sure each node can access each other.
77 77 - make sure `Redis`_/`MySQL`/`PostgreSQL`/`RabbitMQ`_ are running on `rc-node-1`
78 78 - make sure both `rc-node-2`/`3` can access NFS storage with RW access
79 79 - make sure rc-node-2/3 can access `Redis`_/`PostgreSQL`, `MySQL` database on `rc-node-1`.
80 80 - make sure `Redis`_/Database/`RabbitMQ`_ are password protected and accessible only from rc-node-2/3.
81 81
82 82
83 83
84 84 Setup rc-node-2/3
85 85 ^^^^^^^^^^^^^^^^^
86 86
87 87 Initially before `rc-node-1` we'll configure both nodes 2 and 3 to operate as standalone
88 88 nodes with their own hostnames. Use a default installation settings, and use
89 89 the default local addresses (127.0.0.1) to configure VCSServer and Community/Enterprise instances.
90 90 All external connectivity will be handled by the reverse proxy (`Nginx`_ in our example).
91 91
92 92 This way we can ensure each individual host works,
93 93 accepts connections, or do some operations explicitly on chosen node.
94 94
95 95 In addition this would allow use to explicitly direct certain traffic to a node, e.g
96 96 CI server will only call directly `rc-node-3`. This should be done similar to normal
97 97 installation so check out `Nginx`_/`Apache`_ configuration example to configure each host.
98 98 Each one should already connect to shared database during installation.
99 99
100 100
101 101 1) Assuming our final url will be http://rc-node-1, Configure `instances_id`, `app.base_url`
102 102
103 103 a) On **rc-node-2** find the following settings and edit :file:`/home/{user}/.rccontrol/{instance-id}/rhodecode.ini`
104 104
105 105 .. code-block:: ini
106 106
107 107 ## required format is: *NAME-
108 108 instance_id = *rc-node-2-
109 109 app.base_url = http://rc-node-1
110 110
111 111
112 112 b) On **rc-node-3** find the following settings and edit :file:`/home/{user}/.rccontrol/{instance-id}/rhodecode.ini`
113 113
114 114 .. code-block:: ini
115 115
116 116 ## required format is: *NAME-
117 117 instance_id = *rc-node-3-
118 118 app.base_url = http://rc-node-1
119 119
120 120
121 121
122 122 2) Configure `User Session` to use a shared database. Example config that should be
123 123 changed on both **rc-node-2** and **rc-node-3** .
124 124 Edit :file:`/home/{user}/.rccontrol/{instance-id}/rhodecode.ini`
125 125
126 126 .. code-block:: ini
127 127
128 128 ####################################
129 129 ### BEAKER SESSION ####
130 130 ####################################
131 131
132 132 ## Disable the default `file` sessions
133 133 #beaker.session.type = file
134 134 #beaker.session.data_dir = %(here)s/data/sessions
135 135
136 136 ## use shared db based session, fast, and allows easy management over logged in users
137 137 beaker.session.type = ext:database
138 138 beaker.session.table_name = db_session
139 139 # use our rc-node-1 here
140 140 beaker.session.sa.url = postgresql://postgres:qweqwe@rc-node-1/rhodecode
141 141 beaker.session.sa.pool_recycle = 3600
142 142 beaker.session.sa.echo = false
143 143
144 144 In addition make sure both instances use the same `session.secret` so users have
145 145 persistent sessions across nodes. Please generate other one then in this example.
146 146
147 147 .. code-block:: ini
148 148
149 149 # use a unique generated long string
150 150 beaker.session.secret = 70e116cae2274656ba7265fd860aebbd
151 151
152 152 3) Configure stored cached/archive cache to our shared NFS `rc-node-1`
153 153
154 154 .. code-block:: ini
155 155
156 156 # note the `_` prefix that allows using a directory without
157 157 # remap and rescan checking for vcs inside it.
158 158 cache_dir = /home/rcdev/storage/_cache_dir/data
159 159 # note archive cache dir is disabled by default, however if you enable
160 160 # it also needs to be shared
161 161 #archive_cache_dir = /home/rcdev/storage/_tarball_cache_dir
162 162
163 163
164 164 4) Use shared exception store. Example config that should be
165 165 changed on both **rc-node-2** and **rc-node-3**, and also for VCSServer.
166 166 Edit :file:`/home/{user}/.rccontrol/{instance-id}/rhodecode.ini` and
167 167 :file:`/home/{user}/.rccontrol/{vcsserver-instance-id}/vcsserver.ini`
168 168 and add/change following setting.
169 169
170 170 .. code-block:: ini
171 171
172 172 exception_tracker.store_path = /home/rcdev/storage/_exception_store_data
173 173
174 174
175 175 5) Change cache backends to use `Redis`_ based caches. Below full example config
176 176 that replaces default file-based cache to shared `Redis`_ with Distributed Lock.
177 177
178 178
179 179 .. code-block:: ini
180 180
181 181 #####################################
182 182 ### DOGPILE CACHE ####
183 183 #####################################
184 184
185 185 ## `cache_perms` cache settings for permission tree, auth TTL.
186 186 #rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace
187 187 #rc_cache.cache_perms.expiration_time = 300
188 188
189 189 ## alternative `cache_perms` redis backend with distributed lock
190 190 rc_cache.cache_perms.backend = dogpile.cache.rc.redis
191 191 rc_cache.cache_perms.expiration_time = 300
192 192 ## redis_expiration_time needs to be greater then expiration_time
193 193 rc_cache.cache_perms.arguments.redis_expiration_time = 7200
194 194 rc_cache.cache_perms.arguments.socket_timeout = 30
195 195 rc_cache.cache_perms.arguments.host = rc-node-1
196 196 rc_cache.cache_perms.arguments.password = qweqwe
197 197 rc_cache.cache_perms.arguments.port = 6379
198 198 rc_cache.cache_perms.arguments.db = 0
199 199 rc_cache.cache_perms.arguments.distributed_lock = true
200 200
201 201 ## `cache_repo` cache settings for FileTree, Readme, RSS FEEDS
202 202 #rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace
203 203 #rc_cache.cache_repo.expiration_time = 2592000
204 204
205 205 ## alternative `cache_repo` redis backend with distributed lock
206 206 rc_cache.cache_repo.backend = dogpile.cache.rc.redis
207 207 rc_cache.cache_repo.expiration_time = 2592000
208 208 ## redis_expiration_time needs to be greater then expiration_time
209 209 rc_cache.cache_repo.arguments.redis_expiration_time = 2678400
210 210 rc_cache.cache_repo.arguments.socket_timeout = 30
211 211 rc_cache.cache_repo.arguments.host = rc-node-1
212 212 rc_cache.cache_repo.arguments.password = qweqwe
213 213 rc_cache.cache_repo.arguments.port = 6379
214 214 rc_cache.cache_repo.arguments.db = 1
215 215 rc_cache.cache_repo.arguments.distributed_lock = true
216 216
217 217 ## cache settings for SQL queries, this needs to use memory type backend
218 218 rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru
219 219 rc_cache.sql_cache_short.expiration_time = 30
220 220
221 221 ## `cache_repo_longterm` cache for repo object instances, this needs to use memory
222 222 ## type backend as the objects kept are not pickle serializable
223 223 rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru
224 224 ## by default we use 96H, this is using invalidation on push anyway
225 225 rc_cache.cache_repo_longterm.expiration_time = 345600
226 226 ## max items in LRU cache, reduce this number to save memory, and expire last used
227 227 ## cached objects
228 228 rc_cache.cache_repo_longterm.max_size = 10000
229 229
230 230
231 231 6) Configure `Nginx`_ as reverse proxy on `rc-node-2/3`:
232 232 Minimal `Nginx`_ config used:
233 233
234 234
235 235 .. code-block:: nginx
236 236
237 237 ## rate limiter for certain pages to prevent brute force attacks
238 238 limit_req_zone $binary_remote_addr zone=req_limit:10m rate=1r/s;
239 239
240 240 ## custom log format
241 241 log_format log_custom '$remote_addr - $remote_user [$time_local] '
242 242 '"$request" $status $body_bytes_sent '
243 243 '"$http_referer" "$http_user_agent" '
244 244 '$request_time $upstream_response_time $pipe';
245 245
246 246 server {
247 247 listen 80;
248 248 server_name rc-node-2;
249 249 #server_name rc-node-3;
250 250
251 251 access_log /var/log/nginx/rhodecode.access.log log_custom;
252 252 error_log /var/log/nginx/rhodecode.error.log;
253 253
254 254 # example of proxy.conf can be found in our docs.
255 255 include /etc/nginx/proxy.conf;
256 256
257 257 ## serve static files by Nginx, recommended for performance
258 258 location /_static/rhodecode {
259 259 gzip on;
260 260 gzip_min_length 500;
261 261 gzip_proxied any;
262 262 gzip_comp_level 4;
263 263 gzip_types text/css text/javascript text/xml text/plain text/x-component application/javascript application/json application/xml application/rss+xml font/truetype font/opentype application/vnd.ms-fontobject image/svg+xml;
264 264 gzip_vary on;
265 265 gzip_disable "msie6";
266 expires 60d;
266 267 #alias /home/rcdev/.rccontrol/community-1/static;
267 268 alias /home/rcdev/.rccontrol/enterprise-1/static;
268 269 }
269 270
270 271
271 272 location /_admin/login {
272 273 limit_req zone=req_limit burst=10 nodelay;
273 274 try_files $uri @rhode;
274 275 }
275 276
276 277 location / {
277 278 try_files $uri @rhode;
278 279 }
279 280
280 281 location @rhode {
281 282 # Url to running RhodeCode instance.
282 283 # This is shown as `- URL: <host>` in output from rccontrol status.
283 284 proxy_pass http://127.0.0.1:10020;
284 285 }
285 286
286 287 ## custom 502 error page. Will be displayed while RhodeCode server
287 288 ## is turned off
288 289 error_page 502 /502.html;
289 290 location = /502.html {
290 291 #root /home/rcdev/.rccontrol/community-1/static;
291 292 root /home/rcdev/.rccontrol/enterprise-1/static;
292 293 }
293 294 }
294 295
295 296
296 297 7) Optional: Full text search, in case you use `Whoosh` full text search we also need a
297 298 shared storage for the index. In our example our NFS is mounted at `/home/rcdev/storage`
298 299 which represents out storage so we can use the following:
299 300
300 301 .. code-block:: ini
301 302
302 303 # note the `_` prefix that allows using a directory without
303 304 # remap and rescan checking for vcs inside it.
304 305 search.location = /home/rcdev/storage/_index_data/index
305 306
306 307
307 308 .. note::
308 309
309 310 If you use ElasticSearch it's by default shared, and simply running ES node is
310 311 by default cluster compatible.
311 312
312 313
313 314 8) Optional: If you intend to use mailing all instances need to use either a shared
314 315 mailing node, or each will use individual local mail agent. Simply put node-1/2/3
315 316 needs to use same mailing configuration.
316 317
317 318
318 319
319 320 Setup rc-node-1
320 321 ^^^^^^^^^^^^^^^
321 322
322 323
323 324 Configure `Nginx`_ as Load Balancer to rc-node-2/3.
324 325 Minimal `Nginx`_ example below:
325 326
326 327 .. code-block:: nginx
327 328
328 329 ## define rc-cluster which contains a pool of our instances to connect to
329 330 upstream rc-cluster {
330 331 # rc-node-2/3 are stored in /etc/hosts with correct IP addresses
331 332 server rc-node-2:80;
332 333 server rc-node-3:80;
333 334 }
334 335
335 336 server {
336 337 listen 80;
337 338 server_name rc-node-1;
338 339
339 340 location / {
340 341 proxy_pass http://rc-cluster;
341 342 }
342 343 }
343 344
344 345
345 346 .. note::
346 347
347 348 You should configure your load balancing accordingly. We recommend writing
348 349 load balancing rules that will separate regular user traffic from
349 350 automated process traffic like continuous servers or build bots. Sticky sessions
350 351 are not required.
351 352
352 353
353 354 Show which instance handles a request
354 355 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
355 356
356 357 You can easily check if load-balancing is working as expected. Visit our main node
357 358 `rc-node-1` URL which at that point should already handle incoming requests and balance
358 359 it across node-2/3.
359 360
360 361 Add a special GET param `?showrcid=1` to show current instance handling your request.
361 362
362 363 For example: visiting url `http://rc-node-1/?showrcid=1` will show, in the bottom
363 364 of the screen` cluster instance info.
364 365 e.g: `RhodeCode instance id: rc-node-3-rc-node-3-3246`
365 366 which is generated from::
366 367
367 368 <NODE_HOSTNAME>-<INSTANCE_ID>-<WORKER_PID>
368 369
369 370
370 371 Using Celery with cluster
371 372 ^^^^^^^^^^^^^^^^^^^^^^^^^
372 373
373 374
374 375 If `Celery` is used we recommend setting also an instance of Enterprise/Community+VCSserver
375 376 on the node that is running `RabbitMQ`_ or `Redis`_. Those instances will be used to
376 377 executed async tasks on the `rc-node-1`. This is the most efficient setup.
377 378 `Celery` usually handles tasks such as sending emails, forking repositories, importing
378 379 repositories from external location etc. Using workers on instance that has
379 380 the direct access to disks used by NFS as well as email server gives noticeable
380 381 performance boost. Running local workers to the NFS storage results in faster
381 382 execution of forking large repositories or sending lots of emails.
382 383
383 384 Those instances need to be configured in the same way as for other nodes.
384 385 The instance in rc-node-1 can be added to the cluster, but we don't recommend doing it.
385 386 For best results let it be isolated to only executing `Celery` tasks in the cluster setup.
386 387
387 388
388 389 .. _Gunicorn: http://gunicorn.org/
389 390 .. _Whoosh: https://pypi.python.org/pypi/Whoosh/
390 391 .. _Elasticsearch: https://www.elastic.co/..
391 392 .. _RabbitMQ: http://www.rabbitmq.com/
392 393 .. _Nginx: http://nginx.io
393 394 .. _Apache: http://nginx.io
394 395 .. _Redis: http://redis.io
395 396
General Comments 0
You need to be logged in to leave comments. Login now