##// END OF EJS Templates
docs: updated scaling/cluster docs
marcink -
r3052:3b733c3c default
parent child Browse files
Show More
@@ -1,14 +1,14 b''
1 .. _apache-ws-ref:
1 .. _apache-ws-ref:
2
2
3 Apache HTTP Server Configuration
3 Apache HTTP Server Configuration
4 --------------------------------
4 --------------------------------
5
5
6 To set up your Apache Web Server for optimal performance and security, use
6 To set up your Apache Web Server for optimal performance and security, use
7 the information in the following sections.
7 the information in the following sections.
8
8
9 .. toctree::
9 .. toctree::
10
10
11 apache-conf-example
11 apache-diffie-hellman
12 apache-diffie-hellman
12 apache-conf-example
13 apache-subdirectory
13 apache-subdirectory
14 apache-wsgi-coding
14 apache-wsgi-coding
@@ -1,143 +1,146 b''
1 Nginx Configuration Example
1 Nginx Configuration Example
2 ---------------------------
2 ---------------------------
3
3
4 Use the following example to configure Nginx as a your web server.
4 Use the following example to configure Nginx as a your web server.
5
5
6
6
7 .. code-block:: nginx
7 .. code-block:: nginx
8
8
9 ## rate limiter for certain pages to prevent brute force attacks
9 ## rate limiter for certain pages to prevent brute force attacks
10 limit_req_zone $binary_remote_addr zone=dl_limit:10m rate=1r/s;
10 limit_req_zone $binary_remote_addr zone=req_limit:10m rate=1r/s;
11
11
12 ## custom log format
12 ## custom log format
13 log_format log_custom '$remote_addr - $remote_user [$time_local] '
13 log_format log_custom '$remote_addr - $remote_user [$time_local] '
14 '"$request" $status $body_bytes_sent '
14 '"$request" $status $body_bytes_sent '
15 '"$http_referer" "$http_user_agent" '
15 '"$http_referer" "$http_user_agent" '
16 '$request_time $upstream_response_time $pipe';
16 '$request_time $upstream_response_time $pipe';
17
17
18 ## define upstream (local RhodeCode instance) to connect to
18 ## define upstream (local RhodeCode instance) to connect to
19 upstream rc {
19 upstream rc {
20 # Url to running RhodeCode instance.
20 # Url to running RhodeCode instance.
21 # This is shown as `- URL:` in output from rccontrol status.
21 # This is shown as `- URL: <host>` in output from rccontrol status.
22 server 127.0.0.1:10002;
22 server 127.0.0.1:10002;
23
23
24 # add more instances for load balancing
24 # add more instances for load balancing
25 # server 127.0.0.1:10003;
25 # server 127.0.0.1:10003;
26 # server 127.0.0.1:10004;
26 # server 127.0.0.1:10004;
27 }
27 }
28
28
29 ## HTTP to HTTPS rewrite
29 ## HTTP to HTTPS rewrite
30 server {
30 server {
31 listen 80;
31 listen 80;
32 server_name rhodecode.myserver.com;
32 server_name rhodecode.myserver.com;
33
33
34 if ($http_host = rhodecode.myserver.com) {
34 if ($http_host = rhodecode.myserver.com) {
35 rewrite (.*) https://rhodecode.myserver.com$1 permanent;
35 rewrite (.*) https://rhodecode.myserver.com$1 permanent;
36 }
36 }
37 }
37 }
38
38
39 ## Optional gist alias server, for serving nicer GIST urls.
39 ## Optional gist alias server, for serving nicer GIST urls.
40 server {
40 server {
41 listen 443;
41 listen 443;
42 server_name gist.myserver.com;
42 server_name gist.myserver.com;
43 access_log /var/log/nginx/gist.access.log log_custom;
43 access_log /var/log/nginx/gist.access.log log_custom;
44 error_log /var/log/nginx/gist.error.log;
44 error_log /var/log/nginx/gist.error.log;
45
45
46 ssl on;
46 ssl on;
47 ssl_certificate gist.rhodecode.myserver.com.crt;
47 ssl_certificate gist.rhodecode.myserver.com.crt;
48 ssl_certificate_key gist.rhodecode.myserver.com.key;
48 ssl_certificate_key gist.rhodecode.myserver.com.key;
49
49
50 ssl_session_timeout 5m;
50 ssl_session_timeout 5m;
51
51
52 ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
52 ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
53 ssl_prefer_server_ciphers on;
53 ssl_prefer_server_ciphers on;
54 ssl_ciphers 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA';
54 ssl_ciphers 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA';
55
55
56 # strict http prevents from https -> http downgrade
56 # strict http prevents from https -> http downgrade
57 add_header Strict-Transport-Security "max-age=31536000; includeSubdomains;";
57 add_header Strict-Transport-Security "max-age=31536000; includeSubdomains;";
58
58
59 # Diffie-Hellman parameter for DHE ciphersuites, recommended 2048 bits
59 # Diffie-Hellman parameter for DHE ciphersuites, recommended 2048 bits
60 #ssl_dhparam /etc/nginx/ssl/dhparam.pem;
60 #ssl_dhparam /etc/nginx/ssl/dhparam.pem;
61
61
62 rewrite ^/(.+)$ https://rhodecode.myserver.com/_admin/gists/$1;
62 rewrite ^/(.+)$ https://rhodecode.myserver.com/_admin/gists/$1;
63 rewrite (.*) https://rhodecode.myserver.com/_admin/gists;
63 rewrite (.*) https://rhodecode.myserver.com/_admin/gists;
64 }
64 }
65
65
66
66
67 ## MAIN SSL enabled server
67 ## MAIN SSL enabled server
68 server {
68 server {
69 listen 443 ssl;
69 listen 443 ssl;
70 server_name rhodecode.myserver.com;
70 server_name rhodecode.myserver.com;
71
71
72 access_log /var/log/nginx/rhodecode.access.log log_custom;
72 access_log /var/log/nginx/rhodecode.access.log log_custom;
73 error_log /var/log/nginx/rhodecode.error.log;
73 error_log /var/log/nginx/rhodecode.error.log;
74
74
75 ssl on;
75 ssl on;
76 ssl_certificate rhodecode.myserver.com.crt;
76 ssl_certificate rhodecode.myserver.com.crt;
77 ssl_certificate_key rhodecode.myserver.com.key;
77 ssl_certificate_key rhodecode.myserver.com.key;
78
78
79 ssl_session_timeout 5m;
79 ssl_session_timeout 5m;
80
80
81 ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
81 ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
82 ssl_prefer_server_ciphers on;
82 ssl_prefer_server_ciphers on;
83 ssl_ciphers 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA';
83 ssl_ciphers 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA';
84
84
85 # Diffie-Hellman parameter for DHE ciphersuites, recommended 2048 bits
85 # Diffie-Hellman parameter for DHE ciphersuites, recommended 2048 bits
86 #ssl_dhparam /etc/nginx/ssl/dhparam.pem;
86 #ssl_dhparam /etc/nginx/ssl/dhparam.pem;
87
87
88 # example of proxy.conf can be found in our docs.
88 include /etc/nginx/proxy.conf;
89 include /etc/nginx/proxy.conf;
89
90
90 ## serve static files by Nginx, recommended for performance
91 ## uncomment to serve static files by Nginx, recommended for performance
91 # location /_static/rhodecode {
92 # location /_static/rhodecode {
92 # gzip on;
93 # gzip on;
93 # gzip_min_length 500;
94 # gzip_min_length 500;
94 # gzip_proxied any;
95 # gzip_proxied any;
95 # gzip_comp_level 4;
96 # gzip_comp_level 4;
96 # gzip_types text/css text/javascript text/xml text/plain text/x-component application/javascript application/json application/xml application/rss+xml font/truetype font/opentype application/vnd.ms-fontobject image/svg+xml;
97 # gzip_types text/css text/javascript text/xml text/plain text/x-component application/javascript application/json application/xml application/rss+xml font/truetype font/opentype application/vnd.ms-fontobject image/svg+xml;
97 # gzip_vary on;
98 # gzip_vary on;
98 # gzip_disable "msie6";
99 # gzip_disable "msie6";
100 # alias /path/to/.rccontrol/community-1/static;
99 # alias /path/to/.rccontrol/enterprise-1/static;
101 # alias /path/to/.rccontrol/enterprise-1/static;
100 # }
102 # }
101
103
102 ## channelstream websocket handling
104 ## channelstream websocket handling
103 location /_channelstream {
105 location /_channelstream {
104 rewrite /_channelstream/(.*) /$1 break;
106 rewrite /_channelstream/(.*) /$1 break;
105
107
106 proxy_pass http://127.0.0.1:9800;
108 proxy_pass http://127.0.0.1:9800;
107
109
108 proxy_connect_timeout 10;
110 proxy_connect_timeout 10;
109 proxy_send_timeout 10m;
111 proxy_send_timeout 10m;
110 proxy_read_timeout 10m;
112 proxy_read_timeout 10m;
111 tcp_nodelay off;
113 tcp_nodelay off;
112 proxy_set_header Host $host;
114 proxy_set_header Host $host;
113 proxy_set_header X-Real-IP $remote_addr;
115 proxy_set_header X-Real-IP $remote_addr;
114 proxy_set_header X-Url-Scheme $scheme;
116 proxy_set_header X-Url-Scheme $scheme;
115 proxy_set_header X-Forwarded-Proto $scheme;
117 proxy_set_header X-Forwarded-Proto $scheme;
116 proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
118 proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
117 gzip off;
119 gzip off;
118 proxy_http_version 1.1;
120 proxy_http_version 1.1;
119 proxy_set_header Upgrade $http_upgrade;
121 proxy_set_header Upgrade $http_upgrade;
120 proxy_set_header Connection "upgrade";
122 proxy_set_header Connection "upgrade";
121 }
123 }
122
124
125 ## rate limit this endpoint to prevent login page brute-force attacks
123 location /_admin/login {
126 location /_admin/login {
124 ## rate limit this endpoint
127 limit_req zone=req_limit burst=10 nodelay;
125 limit_req zone=dl_limit burst=10 nodelay;
126 try_files $uri @rhode;
128 try_files $uri @rhode;
127 }
129 }
128
130
129 location / {
131 location / {
130 try_files $uri @rhode;
132 try_files $uri @rhode;
131 }
133 }
132
134
133 location @rhode {
135 location @rhode {
134 proxy_pass http://rc;
136 proxy_pass http://rc;
135 }
137 }
136
138
137 ## custom 502 error page. Will be displayed while RhodeCode server
139 ## custom 502 error page. Will be displayed while RhodeCode server
138 ## is turned off
140 ## is turned off
139 error_page 502 /502.html;
141 error_page 502 /502.html;
140 location = /502.html {
142 location = /502.html {
143 #root /path/to/.rccontrol/community-1/static;
141 root /path/to/.rccontrol/enterprise-1/static;
144 root /path/to/.rccontrol/enterprise-1/static;
142 }
145 }
143 } No newline at end of file
146 }
@@ -1,14 +1,14 b''
1 .. _nginx-ws-ref:
1 .. _nginx-ws-ref:
2
2
3 Nginx HTTP Server Configuration
3 Nginx HTTP Server Configuration
4 -------------------------------
4 -------------------------------
5
5
6 To set up your Nginx Web Server for optimal performance and security, use
6 To set up your Nginx Web Server for optimal performance and security, use
7 the information in the following sections.
7 the information in the following sections.
8
8
9 .. toctree::
9 .. toctree::
10
10
11 nginx-config-example
11 nginx-diffie-hellman
12 nginx-diffie-hellman
12 nginx-config-example
13 nginx-proxy-conf
13 nginx-tuning
14 nginx-url-prefix
14 nginx-url-prefix
@@ -1,33 +1,34 b''
1 .. _nginx-tuning:
1 .. _nginx-proxy-conf:
2
2
3 Nginx Tuning
3 Nginx Proxy Config
4 ------------
4 ------------------
5
5
6
6 Set the following properties in your ``/etc/nginx/proxy.conf`` so it does not
7 Set the following properties in your ``/etc/nginx/proxy.conf`` so it does not
7 timeout during large pushes.
8 timeout during large pushes.
8
9
9 .. code-block:: nginx
10 .. code-block:: nginx
10
11
11 proxy_redirect off;
12 proxy_redirect off;
12 proxy_set_header Host $http_host;
13 proxy_set_header Host $http_host;
13
14
14 ## needed for container auth
15 ## needed for container auth
15 # proxy_set_header REMOTE_USER $remote_user;
16 # proxy_set_header REMOTE_USER $remote_user;
16 # proxy_set_header X-Forwarded-User $remote_user;
17 # proxy_set_header X-Forwarded-User $remote_user;
17
18
18 proxy_set_header X-Url-Scheme $scheme;
19 proxy_set_header X-Url-Scheme $scheme;
19 proxy_set_header X-Host $http_host;
20 proxy_set_header X-Host $http_host;
20 proxy_set_header X-Real-IP $remote_addr;
21 proxy_set_header X-Real-IP $remote_addr;
21 proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
22 proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
22 proxy_set_header Proxy-host $proxy_host;
23 proxy_set_header Proxy-host $proxy_host;
23 proxy_buffering off;
24 proxy_buffering off;
24 proxy_connect_timeout 7200;
25 proxy_connect_timeout 7200;
25 proxy_send_timeout 7200;
26 proxy_send_timeout 7200;
26 proxy_read_timeout 7200;
27 proxy_read_timeout 7200;
27 proxy_buffers 8 32k;
28 proxy_buffers 8 32k;
28 # Set this to a larger number if you experience timeouts
29 # Set this to a larger number if you experience timeouts
29 client_max_body_size 1024m;
30 client_max_body_size 1024m;
30 client_body_buffer_size 128k;
31 client_body_buffer_size 128k;
31 large_client_header_buffers 8 64k;
32 large_client_header_buffers 8 64k;
32 add_header X-Frame-Options SAMEORIGIN;
33 add_header X-Frame-Options SAMEORIGIN;
33 add_header Strict-Transport-Security "max-age=31536000; includeSubdomains;";
34 add_header Strict-Transport-Security "max-age=31536000; includeSubdomains;";
@@ -1,14 +1,14 b''
1 .. _hg-lrg-loc:
1 .. _hg-lrg-loc:
2
2
3 Change the |hg| Large Files Location
3 |hg| Large Files Location
4 ------------------------------------
4 -------------------------
5
5
6 |RCE| manages |hg| larges files from the following default location
6 |RCE| manages |hg| larges files from the following default location
7 :file:`/home/{user}/repos/.cache/largefiles`. If you wish to change this, use
7 :file:`/home/{user}/repos/.cache/largefiles`. If you wish to change this, use
8 the following steps:
8 the following steps:
9
9
10 1. Open :menuselection:`Admin --> Settings --> VCS` as super-admin.
10 1. Open :menuselection:`Admin --> Settings --> VCS` as super-admin.
11
11
12 In section called `Mercurial Settings` you can change where the largefiles
12 In section called `Mercurial Settings` you can change where the largefiles
13 objects should be stored.
13 objects should be stored.
14
14
@@ -1,14 +1,14 b''
1 .. _git-lfs-loc:
1 .. _git-lfs-loc:
2
2
3 Change the |git| LFS storage Location
3 |git| LFS storage Location
4 -------------------------------------
4 --------------------------
5
5
6 |RCE| manages |git| LFS files from the following default location
6 |RCE| manages |git| LFS files from the following default location
7 :file:`/home/{user}/repos/.cache/lfs_store`. If you wish to change this, use
7 :file:`/home/{user}/repos/.cache/lfs_store`. If you wish to change this, use
8 the following steps:
8 the following steps:
9
9
10 1. Open :menuselection:`Admin --> Settings --> VCS` as super-admin.
10 1. Open :menuselection:`Admin --> Settings --> VCS` as super-admin.
11
11
12 In section called `Git Settings` you can change where the LFS
12 In section called `Git Settings` you can change where the LFS
13 objects should be stored.
13 objects should be stored.
14
14
@@ -1,124 +1,126 b''
1 .. _increase-gunicorn:
1 .. _increase-gunicorn:
2
2
3 Increase Gunicorn Workers
3 Configure Gunicorn Workers
4 -------------------------
4 --------------------------
5
5
6
6
7 |RCE| comes with `Gunicorn`_ packaged in its Nix environment.
7 |RCE| comes with `Gunicorn`_ which is a Python WSGI HTTP Server for UNIX.
8 Gunicorn is a Python WSGI HTTP Server for UNIX.
9
8
10 To improve |RCE| performance you can increase the number of `Gunicorn`_ workers.
9 To improve |RCE| performance you can increase the number of `Gunicorn`_ workers.
11 This allows to handle more connections concurently, and provide better
10 This allows to handle more connections concurrently, and provide better
12 responsiveness and performance.
11 responsiveness and performance.
13
12
14 By default during installation |RCC| tries to detect how many CPUs are
13 By default during installation |RCC| tries to detect how many CPUs are
15 available in the system, and set the number workers based on that information.
14 available in the system, and set the number workers based on that information.
16 However sometimes it's better to manually set the number of workers.
15 However sometimes it's better to manually set the number of workers.
17
16
18 To do this, use the following steps:
17 To do this, use the following steps:
19
18
20 1. Open the :file:`home/{user}/.rccontrol/{instance-id}/rhodecode.ini` file.
19 1. Open the :file:`home/{user}/.rccontrol/{instance-id}/rhodecode.ini` file.
21 2. In the ``[server:main]`` section, increase the number of Gunicorn
20 2. In the ``[server:main]`` section, change the number of Gunicorn
22 ``workers`` using the following formula :math:`(2 * Cores) + 1`.
21 ``workers`` using the following default formula :math:`(2 * Cores) + 1`.
22 We however not recommend using more than 8-12 workers per server. It's better
23 to start using the :ref:`scale-horizontal-cluster` in case that performance
24 with 8-12 workers is not enough.
23
25
24 .. code-block:: ini
26 .. code-block:: ini
25
27
26 use = egg:gunicorn#main
28 use = egg:gunicorn#main
27 ## Sets the number of process workers. You must set `instance_id = *`
29 ## Sets the number of process workers. You must set `instance_id = *`
28 ## when this option is set to more than one worker, recommended
30 ## when this option is set to more than one worker, recommended
29 ## value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers
31 ## value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers
30 ## The `instance_id = *` must be set in the [app:main] section below
32 ## The `instance_id = *` must be set in the [app:main] section below
31 workers = 4
33 workers = 4
32 ## process name
34 ## process name
33 proc_name = rhodecode
35 proc_name = rhodecode
34 ## type of worker class, one of sync, gevent
36 ## type of worker class, one of sync, gevent
35 ## recommended for bigger setup is using of of other than sync one
37 ## recommended for bigger setup is using of of other than sync one
36 worker_class = sync
38 worker_class = sync
37 ## The maximum number of simultaneous clients. Valid only for Gevent
39 ## The maximum number of simultaneous clients. Valid only for Gevent
38 #worker_connections = 10
40 #worker_connections = 10
39 ## max number of requests that worker will handle before being gracefully
41 ## max number of requests that worker will handle before being gracefully
40 ## restarted, could prevent memory leaks
42 ## restarted, could prevent memory leaks
41 max_requests = 1000
43 max_requests = 1000
42 max_requests_jitter = 30
44 max_requests_jitter = 30
43 ## amount of time a worker can spend with handling a request before it
45 ## amount of time a worker can spend with handling a request before it
44 ## gets killed and restarted. Set to 6hrs
46 ## gets killed and restarted. Set to 6hrs
45 timeout = 21600
47 timeout = 21600
46
48
47 3. In the ``[app:main]`` section, set the ``instance_id`` property to ``*``.
49 3. In the ``[app:main]`` section, set the ``instance_id`` property to ``*``.
48
50
49 .. code-block:: ini
51 .. code-block:: ini
50
52
51 # In the [app:main] section
53 # In the [app:main] section
52 [app:main]
54 [app:main]
53 # You must set `instance_id = *`
55 # You must set `instance_id = *`
54 instance_id = *
56 instance_id = *
55
57
56 4. Change the VCSServer workers too. Open the
58 4. Change the VCSServer workers too. Open the
57 :file:`home/{user}/.rccontrol/{instance-id}/vcsserver.ini` file.
59 :file:`home/{user}/.rccontrol/{instance-id}/vcsserver.ini` file.
58
60
59 5. In the ``[server:main]`` section, increase the number of Gunicorn
61 5. In the ``[server:main]`` section, increase the number of Gunicorn
60 ``workers`` using the following formula :math:`(2 * Cores) + 1`.
62 ``workers`` using the following formula :math:`(2 * Cores) + 1`.
61
63
62 .. code-block:: ini
64 .. code-block:: ini
63
65
64 ## run with gunicorn --log-config vcsserver.ini --paste vcsserver.ini
66 ## run with gunicorn --log-config vcsserver.ini --paste vcsserver.ini
65 use = egg:gunicorn#main
67 use = egg:gunicorn#main
66 ## Sets the number of process workers. Recommended
68 ## Sets the number of process workers. Recommended
67 ## value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers
69 ## value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers
68 workers = 4
70 workers = 4
69 ## process name
71 ## process name
70 proc_name = rhodecode_vcsserver
72 proc_name = rhodecode_vcsserver
71 ## type of worker class, currently `sync` is the only option allowed.
73 ## type of worker class, currently `sync` is the only option allowed.
72 worker_class = sync
74 worker_class = sync
73 ## The maximum number of simultaneous clients. Valid only for Gevent
75 ## The maximum number of simultaneous clients. Valid only for Gevent
74 #worker_connections = 10
76 #worker_connections = 10
75 ## max number of requests that worker will handle before being gracefully
77 ## max number of requests that worker will handle before being gracefully
76 ## restarted, could prevent memory leaks
78 ## restarted, could prevent memory leaks
77 max_requests = 1000
79 max_requests = 1000
78 max_requests_jitter = 30
80 max_requests_jitter = 30
79 ## amount of time a worker can spend with handling a request before it
81 ## amount of time a worker can spend with handling a request before it
80 ## gets killed and restarted. Set to 6hrs
82 ## gets killed and restarted. Set to 6hrs
81 timeout = 21600
83 timeout = 21600
82
84
83 6. Save your changes.
85 6. Save your changes.
84 7. Restart your |RCE| instances, using the following command:
86 7. Restart your |RCE| instances, using the following command:
85
87
86 .. code-block:: bash
88 .. code-block:: bash
87
89
88 $ rccontrol restart '*'
90 $ rccontrol restart '*'
89
91
90
92
91 Gunicorn Gevent Backend
93 Gunicorn Gevent Backend
92 -----------------------
94 -----------------------
93
95
94 Gevent is an asynchronous worker type for Gunicorn. It allows accepting multiple
96 Gevent is an asynchronous worker type for Gunicorn. It allows accepting multiple
95 connections on a single `Gunicorn`_ worker. This means you can handle 100s
97 connections on a single `Gunicorn`_ worker. This means you can handle 100s
96 of concurrent clones, or API calls using just few workers. A setting called
98 of concurrent clones, or API calls using just few workers. A setting called
97 `worker_connections` defines on how many connections each worker can
99 `worker_connections` defines on how many connections each worker can
98 handle using `Gevent`.
100 handle using `Gevent`.
99
101
100
102
101 To enable `Gevent` on |RCE| do the following:
103 To enable `Gevent` on |RCE| do the following:
102
104
103
105
104 1. Open the :file:`home/{user}/.rccontrol/{instance-id}/rhodecode.ini` file.
106 1. Open the :file:`home/{user}/.rccontrol/{instance-id}/rhodecode.ini` file.
105 2. In the ``[server:main]`` section, change `worker_class` for Gunicorn.
107 2. In the ``[server:main]`` section, change `worker_class` for Gunicorn.
106
108
107
109
108 .. code-block:: ini
110 .. code-block:: ini
109
111
110 ## type of worker class, one of sync, gevent
112 ## type of worker class, one of sync, gevent
111 ## recommended for bigger setup is using of of other than sync one
113 ## recommended for bigger setup is using of of other than sync one
112 worker_class = gevent
114 worker_class = gevent
113 ## The maximum number of simultaneous clients. Valid only for Gevent
115 ## The maximum number of simultaneous clients. Valid only for Gevent
114 worker_connections = 30
116 worker_connections = 30
115
117
116
118
117 .. note::
119 .. note::
118
120
119 `Gevent` is currently only supported for Enterprise/Community instances.
121 `Gevent` is currently only supported for Enterprise/Community instances.
120 VCSServer doesn't yet support gevent.
122 VCSServer doesn't yet support gevent.
121
123
122
124
123
125
124 .. _Gunicorn: http://gunicorn.org/
126 .. _Gunicorn: http://gunicorn.org/
@@ -1,8 +1,8 b''
1 .. _db-session-ref:
1 .. _db-session-ref:
2
2
3 Increase Database Performance
3 Database Performance
4 -----------------------------
4 --------------------
5
5
6 For tuning PostgreSQL we recommend reading: http://www.revsys.com/writings/postgresql-performance.html
6 For tuning PostgreSQL we recommend reading: http://www.revsys.com/writings/postgresql-performance.html
7
7
8 For tuning MySQL we recommend reading: http://www.tecmint.com/mysql-mariadb-performance-tuning-and-optimization/ No newline at end of file
8 For tuning MySQL we recommend reading: http://www.tecmint.com/mysql-mariadb-performance-tuning-and-optimization/
@@ -1,22 +1,21 b''
1 .. _rhodecode-tuning-ref:
1 .. _rhodecode-tuning-ref:
2
2
3 Tuning |RCE|
3 Tuning |RCE|
4 ============
4 ============
5
5
6 To customize your |RCE| |version| installation for maximum performance you
6 To customize your |RCE| |version| installation for maximum performance you
7 may find some of the following methods useful.
7 may find some of the following methods useful.
8
8
9 .. toctree::
9 .. toctree::
10
10
11 tuning-gunicorn
11 tuning-gunicorn
12 tuning-vcs-memory-cache
12 tuning-vcs-memory-cache
13 tuning-user-sessions-performance
13 tuning-user-sessions-performance
14 tuning-increase-db-performance
14 tuning-increase-db-performance
15 tuning-scale-horizontally
15 tuning-scale-horizontally-cluster
16 tuning-increase-cache-size
17 tuning-mount-cache-memory
16 tuning-mount-cache-memory
18 tuning-change-encoding
17 tuning-change-encoding
19 tuning-change-large-file-dir
18 tuning-change-large-file-dir
20 tuning-change-lfs-dir
19 tuning-change-lfs-dir
21 tuning-hg-auth-loop
20 tuning-hg-auth-loop
22
21
@@ -1,58 +1,383 b''
1 .. _scale-horizontal:
1 .. _scale-horizontal-cluster:
2
2
3
3 Scale Horizontally
4 Scale Horizontally / RhodeCode Cluster
4 ------------------
5 --------------------------------------
5
6
6 |RCE| is built in a way it support horizontal scaling across multiple machines.
7 |RCE| is built in a way it support horizontal scaling across multiple machines.
7 There are two main pre-requisites for that:
8 There are three main pre-requisites for that:
8
9
9 - Shared storage that each machine can access.
10 - Shared storage that each machine can access. Using NFS or other shared storage system.
10 - Shared DB connection across machines.
11 - Shared DB connection across machines. Using `MySQL`/`PostgreSQL` that each node can access.
12 - |RCE| user sessions and caches need to use a shared storage (e.g `Redis`_/`Memcached`)
11
13
12
14
13 Horizontal scaling means adding more machines or workers into your pool of
15 Horizontal scaling means adding more machines or workers into your pool of
14 resources. Horizontally scaling |RCE| gives a huge performance increase,
16 resources. Horizontally scaling |RCE| gives a huge performance increase,
15 especially under large traffic scenarios with a high number of requests. This
17 especially under large traffic scenarios with a high number of requests.
16 is very beneficial when |RCE| is serving many users simultaneously,
18 This is very beneficial when |RCE| is serving many users simultaneously,
17 or if continuous integration servers are automatically pulling and pushing code.
19 or if continuous integration servers are automatically pulling and pushing code.
20 It also adds High-Availability to your running system.
21
22
23 Cluster Overview
24 ^^^^^^^^^^^^^^^^
25
26 Below we'll present a configuration example that will use two separate nodes to serve
27 |RCE| in a load-balanced environment. The 3rd node will act as a shared storage/cache
28 and handle load-balancing. In addition 3rd node will be used as shared database instance.
29
30 This setup can be used both in Docker based configuration or with individual
31 physical/virtual machines. Using the 3rd node for Storage/Redis/PostgreSQL/Nginx is
32 optional. All those components can be installed on one of the two nodes used for |RCE|.
33 We'll use following naming for our nodes:
34
35 - `rc-node-1` (NFS, DB, Cache node)
36 - `rc-node-2` (Worker node1)
37 - `rc-node-3` (Worker node2)
38
39 Our shares NFS storage in the example is located on `/home/rcdev/storage` and
40 it's RW accessible on **each** node.
41
42 In this example we used certain recommended components, however many
43 of those can be replaced by other, in case your organization already uses them, for example:
44
45 - `MySQL`/`PostgreSQL`: Aren't replaceable and are the two only supported databases.
46 - `Nginx`_ on `rc-node-1` can be replaced by: `Hardware Load Balancer (F5)`, `Apache`_, `HA-Proxy` etc.
47 - `Nginx`_ on rc-node-2/3 acts as a reverse proxy and can be replaced by other HTTP server
48 acting as reverse proxy such as `Apache`_.
49 - `Redis`_ on `rc-node-1` can be replaced by: `Memcached`
50
51
52 Here's an overview what components should be installed/setup on each server in our example:
53
54 - **rc-node-1**:
55
56 - main storage acting as NFS host.
57 - `nginx` acting as a load-balancer.
58 - `postgresql-server` used for database and sessions.
59 - `redis-server` used for storing shared caches.
60 - optionally `rabbitmq-server` for `Celery` if used.
61 - optionally if `Celery` is used Enterprise/Community instance + VCSServer.
62 - optionally mailserver that can be shared by other instances.
63 - optionally channelstream server to handle live communication for all instances.
64
65
66 - **rc-node-2/3**:
67
68 - `nginx` acting as a reverse proxy to handle requests to |RCE|.
69 - 1x RhodeCode Enterprise/Community instance.
70 - 1x VCSServer instance.
71 - optionally for testing connection: postgresql-client, redis-client (redis-tools).
72
73
74 Before we start here are few assumptions that should be fulfilled:
75
76 - make sure each node can access each other.
77 - make sure `Redis`_/`MySQL`/`PostgreSQL`/`RabbitMQ`_ are running on `rc-node-1`
78 - make sure both `rc-node-2`/`3` can access NFS storage with RW access
79 - make sure rc-node-2/3 can access `Redis`_/`PostgreSQL`, `MySQL` database on `rc-node-1`.
80 - make sure `Redis`_/Database/`RabbitMQ`_ are password protected and accessible only from rc-node-2/3.
81
18
82
19
83
20 If you scale across different machines, each |RCM| instance
84 Setup rc-node-2/3
21 needs to store its data on a shared disk, preferably together with your
85 ^^^^^^^^^^^^^^^^^
22 |repos|. This data directory contains template caches, a full text search index,
86
23 and is used for task locking to ensure safety across multiple instances.
87 Initially before `rc-node-1` we'll configure both nodes 2 and 3 to operate as standalone
24 To do this, set the following properties in the :file:`rhodecode.ini` file to
88 nodes with their own hostnames. Use a default installation settings, and use
25 set the shared location across all |RCM| instances.
89 the default local addresses (127.0.0.1) to configure VCSServer and Community/Enterprise instances.
90 All external connectivity will be handled by the reverse proxy (`Nginx`_ in our example).
91
92 This way we can ensure each individual host works,
93 accepts connections, or do some operations explicitly on chosen node.
94
95 In addition this would allow use to explicitly direct certain traffic to a node, e.g
96 CI server will only call directly `rc-node-3`. This should be done similar to normal
97 installation so check out `Nginx`_/`Apache`_ configuration example to configure each host.
98 Each one should already connect to shared database during installation.
99
100
101 1) Assuming our final url will be http://rc-node-1, Configure `instances_id`, `app.base_url`
102
103 a) On **rc-node-2** find the following settings and edit :file:`/home/{user}/.rccontrol/{instance-id}/rhodecode.ini`
104
105 .. code-block:: ini
106
107 ## required format is: *NAME-
108 instance_id = *rc-node-2-
109 app.base_url = http://rc-node-1
110
111
112 b) On **rc-node-3** find the following settings and edit :file:`/home/{user}/.rccontrol/{instance-id}/rhodecode.ini`
113
114 .. code-block:: ini
115
116 ## required format is: *NAME-
117 instance_id = *rc-node-3-
118 app.base_url = http://rc-node-1
119
120
121
122 2) Configure `User Session` to use a shared database. Example config that should be
123 changed on both node 2 and 3. Edit :file:`/home/{user}/.rccontrol/{instance-id}/rhodecode.ini`
124
125 .. code-block:: ini
126
127 ####################################
128 ### BEAKER SESSION ####
129 ####################################
130
131 ## Disable the default `file` sessions
132 #beaker.session.type = file
133 #beaker.session.data_dir = %(here)s/data/sessions
134
135 ## use shared db based session, fast, and allows easy management over logged in users
136 beaker.session.type = ext:database
137 beaker.session.table_name = db_session
138 # use our rc-node-1 here
139 beaker.session.sa.url = postgresql://postgres:qweqwe@rc-node-1/rhodecode
140 beaker.session.sa.pool_recycle = 3600
141 beaker.session.sa.echo = false
142
143 In addition make sure both instances use the same `session.secret` so users have
144 persistent sessions across nodes. Please generate other one then in this example.
145
146 .. code-block:: ini
147
148 # use an unique generated long string
149 beaker.session.secret = 70e116cae2274656ba7265fd860aebbd
150
151 3) Configure stored cached/archive cache to our shared NFS `rc-node-1`
26
152
27 .. code-block:: ini
153 .. code-block:: ini
28
154
29 cache_dir = /shared/path/caches # set to shared location
155 # note the `_` prefix that allows using a directory without
30 search.location = /shared/path/search_index # set to shared location
156 # remap and rescan checking for vcs inside it.
157 cache_dir = /home/rcdev/storage/_cache_dir/data
158 # note archive cache dir is disabled by default, however if you enable
159 # it also needs to be shared
160 #archive_cache_dir = /home/rcdev/storage/_tarball_cache_dir
161
162
163 4) Change cache backends to use `Redis`_ based caches. Below full example config
164 that replaces default file-based cache to shared `Redis`_ with Distributed Lock.
165
166
167 .. code-block:: ini
168
169 #####################################
170 ### DOGPILE CACHE ####
171 #####################################
172
173 ## `cache_perms` cache settings for permission tree, auth TTL.
174 #rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace
175 #rc_cache.cache_perms.expiration_time = 300
176
177 ## alternative `cache_perms` redis backend with distributed lock
178 rc_cache.cache_perms.backend = dogpile.cache.rc.redis
179 rc_cache.cache_perms.expiration_time = 300
180 ## redis_expiration_time needs to be greater then expiration_time
181 rc_cache.cache_perms.arguments.redis_expiration_time = 7200
182 rc_cache.cache_perms.arguments.socket_timeout = 30
183 rc_cache.cache_perms.arguments.host = rc-node-1
184 rc_cache.cache_perms.arguments.password = qweqwe
185 rc_cache.cache_perms.arguments.port = 6379
186 rc_cache.cache_perms.arguments.db = 0
187 rc_cache.cache_perms.arguments.distributed_lock = true
188
189 ## `cache_repo` cache settings for FileTree, Readme, RSS FEEDS
190 #rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace
191 #rc_cache.cache_repo.expiration_time = 2592000
192
193 ## alternative `cache_repo` redis backend with distributed lock
194 rc_cache.cache_repo.backend = dogpile.cache.rc.redis
195 rc_cache.cache_repo.expiration_time = 2592000
196 ## redis_expiration_time needs to be greater then expiration_time
197 rc_cache.cache_repo.arguments.redis_expiration_time = 2678400
198 rc_cache.cache_repo.arguments.socket_timeout = 30
199 rc_cache.cache_repo.arguments.host = rc-node-1
200 rc_cache.cache_repo.arguments.password = qweqwe
201 rc_cache.cache_repo.arguments.port = 6379
202 rc_cache.cache_repo.arguments.db = 1
203 rc_cache.cache_repo.arguments.distributed_lock = true
204
205 ## cache settings for SQL queries, this needs to use memory type backend
206 rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru
207 rc_cache.sql_cache_short.expiration_time = 30
208
209 ## `cache_repo_longterm` cache for repo object instances, this needs to use memory
210 ## type backend as the objects kept are not pickle serializable
211 rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru
212 ## by default we use 96H, this is using invalidation on push anyway
213 rc_cache.cache_repo_longterm.expiration_time = 345600
214 ## max items in LRU cache, reduce this number to save memory, and expire last used
215 ## cached objects
216 rc_cache.cache_repo_longterm.max_size = 10000
217
218
219 4) Configure `Nginx`_ as reverse proxy on `rc-node-2/3`:
220 Minimal `Nginx`_ config used:
221
31
222
32 ####################################
223 .. code-block:: nginx
33 ### BEAKER CACHE ####
224
34 ####################################
225 ## rate limiter for certain pages to prevent brute force attacks
35 beaker.cache.data_dir = /shared/path/data # set to shared location
226 limit_req_zone $binary_remote_addr zone=req_limit:10m rate=1r/s;
36 beaker.cache.lock_dir = /shared/path/lock # set to shared location
227
228 ## custom log format
229 log_format log_custom '$remote_addr - $remote_user [$time_local] '
230 '"$request" $status $body_bytes_sent '
231 '"$http_referer" "$http_user_agent" '
232 '$request_time $upstream_response_time $pipe';
233
234 server {
235 listen 80;
236 server_name rc-node-2;
237 #server_name rc-node-3;
238
239 access_log /var/log/nginx/rhodecode.access.log log_custom;
240 error_log /var/log/nginx/rhodecode.error.log;
241
242 # example of proxy.conf can be found in our docs.
243 include /etc/nginx/proxy.conf;
244
245 ## serve static files by Nginx, recommended for performance
246 location /_static/rhodecode {
247 gzip on;
248 gzip_min_length 500;
249 gzip_proxied any;
250 gzip_comp_level 4;
251 gzip_types text/css text/javascript text/xml text/plain text/x-component application/javascript application/json application/xml application/rss+xml font/truetype font/opentype application/vnd.ms-fontobject image/svg+xml;
252 gzip_vary on;
253 gzip_disable "msie6";
254 #alias /home/rcdev/.rccontrol/community-1/static;
255 alias /home/rcdev/.rccontrol/enterprise-1/static;
256 }
257
258
259 location /_admin/login {
260 limit_req zone=req_limit burst=10 nodelay;
261 try_files $uri @rhode;
262 }
263
264 location / {
265 try_files $uri @rhode;
266 }
267
268 location @rhode {
269 # Url to running RhodeCode instance.
270 # This is shown as `- URL: <host>` in output from rccontrol status.
271 proxy_pass http://127.0.0.1:10020;
272 }
273
274 ## custom 502 error page. Will be displayed while RhodeCode server
275 ## is turned off
276 error_page 502 /502.html;
277 location = /502.html {
278 #root /home/rcdev/.rccontrol/community-1/static;
279 root /home/rcdev/.rccontrol/enterprise-1/static;
280 }
281 }
282
283
284 5) Optional: Full text search, in case you use `Whoosh` full text search we also need a
285 shared storage for the index. In our example our NFS is mounted at `/home/rcdev/storage`
286 which represents out storage so we can use the following:
287
288 .. code-block:: ini
289
290 # note the `_` prefix that allows using a directory without
291 # remap and rescan checking for vcs inside it.
292 search.location = /home/rcdev/storage/_index_data/index
37
293
38
294
39 .. note::
295 .. note::
40
296
41 If you use custom caches such as `beaker.cache.auth_plugins.` it's recommended
297 If you use ElasticSearch it's by default shared, and simply running ES node is
42 to set it to the memcached/redis or database backend so it can be shared
298 by default cluster compatible.
43 across machines.
299
300
301 6) Optional: If you intend to use mailing all instances need to use either a shared
302 mailing node, or each will use individual local mailagent. Simply put node-1/2/3 needs
303 to use same mailing configuration.
304
305
306
307 Setup rc-node-1
308 ^^^^^^^^^^^^^^^
44
309
45
310
46 It is recommended to create another dedicated |RCE| instance to handle
311 Configure `Nginx`_ as Load Balancer to rc-node-2/3.
47 traffic from build farms or continuous integration servers.
312 Minimal `Nginx`_ example below:
313
314 .. code-block:: nginx
315
316 ## define rc-cluster which contains a pool of our instances to connect to
317 upstream rc-cluster {
318 # rc-node-2/3 are stored in /etc/hosts with correct IP addresses
319 server rc-node-2:80;
320 server rc-node-3:80;
321 }
322
323 server {
324 listen 80;
325 server_name rc-node-1;
326
327 location / {
328 proxy_pass http://rc-cluster;
329 }
330 }
331
48
332
49 .. note::
333 .. note::
50
334
51 You should configure your load balancing accordingly. We recommend writing
335 You should configure your load balancing accordingly. We recommend writing
52 load balancing rules that will separate regular user traffic from
336 load balancing rules that will separate regular user traffic from
53 automated process traffic like continuous servers or build bots.
337 automated process traffic like continuous servers or build bots. Sticky sessions
338 are not required.
339
340
341 Show which instance handles a request
342 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
343
344 You can easily check if load-balancing is working as expected. Visit our main node
345 `rc-node-1` URL which at that point should already handle incoming requests and balance
346 it across node-2/3.
347
348 Add a special GET param `?showrcid=1` to show current instance handling your request.
349
350 For example: visiting url `http://rc-node-1/?showrcid=1` will show, in the bottom
351 of the screen` cluster instance info.
352 e.g: `RhodeCode instance id: rc-node-3-rc-node-3-3246`
353 which is generated from::
354
355 <NODE_HOSTNAME>-<INSTANCE_ID>-<WORKER_PID>
356
357
358 Using Celery with cluster
359 ^^^^^^^^^^^^^^^^^^^^^^^^^
54
360
55 .. note::
361
362 If `Celery` is used we recommend setting also an instance of Enterprise/Community+VCSserver
363 on the node that is running `RabbitMQ`_. Those instances will be used to executed async
364 tasks on the `rc-node-1`. This is the most efficient setup. `Celery` usually
365 handles tasks such as sending emails, forking repositories, importing
366 repositories from external location etc. Using workers on instance that has
367 the direct access to disks used by NFS as well as email server gives noticeable
368 performance boost. Running local workers to the NFS storage results in faster
369 execution of forking large repositories or sending lots of emails.
56
370
57 If Celery is used on each instance then you should run separate Celery
371 Those instances need to be configured in the same way as for other nodes.
58 instances, but the message broker should be the same for all of them.
372 The instance in rc-node-1 can be added to the cluser, but we don't recommend doing it.
373 For best results let it be isolated to only executing `Celery` tasks in the cluster setup.
374
375
376 .. _Gunicorn: http://gunicorn.org/
377 .. _Whoosh: https://pypi.python.org/pypi/Whoosh/
378 .. _Elasticsearch: https://www.elastic.co/..
379 .. _RabbitMQ: http://www.rabbitmq.com/
380 .. _Nginx: http://nginx.io
381 .. _Apache: http://nginx.io
382 .. _Redis: http://redis.io
383
@@ -1,66 +1,67 b''
1 .. _user-session-ref:
1 .. _user-session-ref:
2
2
3 Increase User Session Performance
3 User Session Performance
4 ---------------------------------
4 ------------------------
5
5
6 The default file-based sessions are only suitable for smaller setups, or
6 The default file-based sessions are only suitable for smaller setups, or
7 instances that doesn't have a lot of users or traffic.
7 instances that doesn't have a lot of users or traffic.
8 They are set as default option because it's setup-free solution.
8 They are set as default option because it's setup-free solution.
9
9
10 The most common issue of file based sessions are file limit errors which occur
10 The most common issue of file based sessions are file limit errors which occur
11 if there are lots of session files.
11 if there are lots of session files.
12
12
13 Therefore, in a large scale deployment, to give better performance,
13 Therefore, in a large scale deployment, to give better performance,
14 scalability, and maintainability we recommend switching from file-based
14 scalability, and maintainability we recommend switching from file-based
15 sessions to database-based user sessions or Redis based sessions.
15 sessions to database-based user sessions or Redis based sessions.
16
16
17 To switch to database-based user sessions uncomment the following section in
17 To switch to database-based user sessions uncomment the following section in
18 your :file:`/home/{user}/.rccontrol/{instance-id}/rhodecode.ini` file.
18 your :file:`/home/{user}/.rccontrol/{instance-id}/rhodecode.ini` file.
19
19
20
20
21 .. code-block:: ini
21 .. code-block:: ini
22
22
23 ## db based session, fast, and allows easy management over logged in users
23 ## db based session, fast, and allows easy management over logged in users
24 beaker.session.type = ext:database
24 beaker.session.type = ext:database
25 beaker.session.table_name = db_session
25 beaker.session.table_name = db_session
26
26
27 # use just one of the following accoring to the type of database
27 # use just one of the following according to the type of database
28 beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode
28 beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode
29 # or
29 beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode
30 beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode
30
31
31 beaker.session.sa.pool_recycle = 3600
32 beaker.session.sa.pool_recycle = 3600
32 beaker.session.sa.echo = false
33 beaker.session.sa.echo = false
33
34
34
35
35 and make sure you comment out the file based sessions.
36 and make sure you comment out the file based sessions.
36
37
37 .. code-block:: ini
38 .. code-block:: ini
38
39
39 ## types are file, ext:memcached, ext:database, and memory (default).
40 ## types are file, ext:memcached, ext:database, and memory (default).
40 #beaker.session.type = file
41 #beaker.session.type = file
41 #beaker.session.data_dir = %(here)s/data/sessions/data
42 #beaker.session.data_dir = %(here)s/data/sessions/data
42
43
43
44
44 The `table_name` will be automatically created on specified database if it isn't yet existing.
45 The `table_name` will be automatically created on specified database if it isn't yet existing.
45 Database specified in the `beaker.session.sa.url` can be the same that RhodeCode
46 Database specified in the `beaker.session.sa.url` can be the same that RhodeCode
46 uses, or if required it can be a different one. We recommend to use the same database.
47 uses, or if required it can be a different one. We recommend to use the same database.
47
48
48
49
49
50
50 To switch to reds-based user sessions uncomment the following section in
51 To switch to reds-based user sessions uncomment the following section in
51 your :file:`/home/{user}/.rccontrol/{instance-id}/rhodecode.ini` file.
52 your :file:`/home/{user}/.rccontrol/{instance-id}/rhodecode.ini` file.
52
53
53 .. code-block:: ini
54 .. code-block:: ini
54
55
55 ## redis sessions
56 ## redis sessions
56 beaker.session.type = ext:redis
57 beaker.session.type = ext:redis
57 beaker.session.url = localhost:6379
58 beaker.session.url = localhost:6379
58
59
59
60
60 and make sure you comment out the file based sessions.
61 and make sure you comment out the file based sessions.
61
62
62 .. code-block:: ini
63 .. code-block:: ini
63
64
64 ## types are file, ext:memcached, ext:database, and memory (default).
65 ## types are file, ext:memcached, ext:database, and memory (default).
65 #beaker.session.type = file
66 #beaker.session.type = file
66 #beaker.session.data_dir = %(here)s/data/sessions/data No newline at end of file
67 #beaker.session.data_dir = %(here)s/data/sessions/data
@@ -1,8 +1,8 b''
1 .. _adjust-vcs-mem-cache:
1 .. _adjust-vcs-mem-cache:
2
2
3 Adjusting VCS Memory Cache
3 VCSServer Memory Cache
4 --------------------------
4 ----------------------
5
5
6 The VCS Server mamory cache can be adjusted to work best with the resources
6 The VCS Server mamory cache can be adjusted to work best with the resources
7 available to your |RCE| instance. If you find that memory resources are under
7 available to your |RCE| instance. If you find that memory resources are under
8 pressure, see the :ref:`vcs-server-maintain` section for details.
8 pressure, see the :ref:`vcs-server-maintain` section for details.
1 NO CONTENT: file was removed
NO CONTENT: file was removed
General Comments 0
You need to be logged in to leave comments. Login now