# HG changeset patch # User Marcin Kuzminski # Date 2018-09-10 17:49:33 # Node ID 26521a96d815c17ac5e325ef8273df649e079074 # Parent b13c798a8af1e0dc381c4084a7a05ec7c6896b0c docs: updated scaling/cluster docs diff --git a/docs/admin/apache-config.rst b/docs/admin/apache-config.rst --- a/docs/admin/apache-config.rst +++ b/docs/admin/apache-config.rst @@ -8,7 +8,7 @@ the information in the following section .. toctree:: + apache-conf-example apache-diffie-hellman - apache-conf-example apache-subdirectory apache-wsgi-coding diff --git a/docs/admin/nginx-config-example.rst b/docs/admin/nginx-config-example.rst --- a/docs/admin/nginx-config-example.rst +++ b/docs/admin/nginx-config-example.rst @@ -7,7 +7,7 @@ Use the following example to configure N .. code-block:: nginx ## rate limiter for certain pages to prevent brute force attacks - limit_req_zone $binary_remote_addr zone=dl_limit:10m rate=1r/s; + limit_req_zone $binary_remote_addr zone=req_limit:10m rate=1r/s; ## custom log format log_format log_custom '$remote_addr - $remote_user [$time_local] ' @@ -18,7 +18,7 @@ Use the following example to configure N ## define upstream (local RhodeCode instance) to connect to upstream rc { # Url to running RhodeCode instance. - # This is shown as `- URL:` in output from rccontrol status. + # This is shown as `- URL: ` in output from rccontrol status. server 127.0.0.1:10002; # add more instances for load balancing @@ -85,9 +85,10 @@ Use the following example to configure N # Diffie-Hellman parameter for DHE ciphersuites, recommended 2048 bits #ssl_dhparam /etc/nginx/ssl/dhparam.pem; + # example of proxy.conf can be found in our docs. include /etc/nginx/proxy.conf; - ## serve static files by Nginx, recommended for performance + ## uncomment to serve static files by Nginx, recommended for performance # location /_static/rhodecode { # gzip on; # gzip_min_length 500; @@ -96,6 +97,7 @@ Use the following example to configure N # gzip_types text/css text/javascript text/xml text/plain text/x-component application/javascript application/json application/xml application/rss+xml font/truetype font/opentype application/vnd.ms-fontobject image/svg+xml; # gzip_vary on; # gzip_disable "msie6"; + # alias /path/to/.rccontrol/community-1/static; # alias /path/to/.rccontrol/enterprise-1/static; # } @@ -120,9 +122,9 @@ Use the following example to configure N proxy_set_header Connection "upgrade"; } + ## rate limit this endpoint to prevent login page brute-force attacks location /_admin/login { - ## rate limit this endpoint - limit_req zone=dl_limit burst=10 nodelay; + limit_req zone=req_limit burst=10 nodelay; try_files $uri @rhode; } @@ -138,6 +140,7 @@ Use the following example to configure N ## is turned off error_page 502 /502.html; location = /502.html { + #root /path/to/.rccontrol/community-1/static; root /path/to/.rccontrol/enterprise-1/static; } } \ No newline at end of file diff --git a/docs/admin/nginx-config.rst b/docs/admin/nginx-config.rst --- a/docs/admin/nginx-config.rst +++ b/docs/admin/nginx-config.rst @@ -8,7 +8,7 @@ the information in the following section .. toctree:: + nginx-config-example nginx-diffie-hellman - nginx-config-example - nginx-tuning + nginx-proxy-conf nginx-url-prefix diff --git a/docs/admin/nginx-tuning.rst b/docs/admin/nginx-proxy-conf.rst rename from docs/admin/nginx-tuning.rst rename to docs/admin/nginx-proxy-conf.rst --- a/docs/admin/nginx-tuning.rst +++ b/docs/admin/nginx-proxy-conf.rst @@ -1,7 +1,8 @@ -.. _nginx-tuning: +.. _nginx-proxy-conf: -Nginx Tuning ------------- +Nginx Proxy Config +------------------ + Set the following properties in your ``/etc/nginx/proxy.conf`` so it does not timeout during large pushes. diff --git a/docs/admin/tuning-change-large-file-dir.rst b/docs/admin/tuning-change-large-file-dir.rst --- a/docs/admin/tuning-change-large-file-dir.rst +++ b/docs/admin/tuning-change-large-file-dir.rst @@ -1,7 +1,7 @@ .. _hg-lrg-loc: -Change the |hg| Large Files Location ------------------------------------- +|hg| Large Files Location +------------------------- |RCE| manages |hg| larges files from the following default location :file:`/home/{user}/repos/.cache/largefiles`. If you wish to change this, use diff --git a/docs/admin/tuning-change-lfs-dir.rst b/docs/admin/tuning-change-lfs-dir.rst --- a/docs/admin/tuning-change-lfs-dir.rst +++ b/docs/admin/tuning-change-lfs-dir.rst @@ -1,7 +1,7 @@ .. _git-lfs-loc: -Change the |git| LFS storage Location -------------------------------------- +|git| LFS storage Location +-------------------------- |RCE| manages |git| LFS files from the following default location :file:`/home/{user}/repos/.cache/lfs_store`. If you wish to change this, use diff --git a/docs/admin/tuning-gunicorn.rst b/docs/admin/tuning-gunicorn.rst --- a/docs/admin/tuning-gunicorn.rst +++ b/docs/admin/tuning-gunicorn.rst @@ -1,14 +1,13 @@ .. _increase-gunicorn: -Increase Gunicorn Workers -------------------------- +Configure Gunicorn Workers +-------------------------- -|RCE| comes with `Gunicorn`_ packaged in its Nix environment. -Gunicorn is a Python WSGI HTTP Server for UNIX. +|RCE| comes with `Gunicorn`_ which is a Python WSGI HTTP Server for UNIX. To improve |RCE| performance you can increase the number of `Gunicorn`_ workers. -This allows to handle more connections concurently, and provide better +This allows to handle more connections concurrently, and provide better responsiveness and performance. By default during installation |RCC| tries to detect how many CPUs are @@ -18,8 +17,11 @@ However sometimes it's better to manuall To do this, use the following steps: 1. Open the :file:`home/{user}/.rccontrol/{instance-id}/rhodecode.ini` file. -2. In the ``[server:main]`` section, increase the number of Gunicorn - ``workers`` using the following formula :math:`(2 * Cores) + 1`. +2. In the ``[server:main]`` section, change the number of Gunicorn + ``workers`` using the following default formula :math:`(2 * Cores) + 1`. + We however not recommend using more than 8-12 workers per server. It's better + to start using the :ref:`scale-horizontal-cluster` in case that performance + with 8-12 workers is not enough. .. code-block:: ini diff --git a/docs/admin/tuning-increase-cache-size.rst b/docs/admin/tuning-increase-cache-size.rst deleted file mode 100644 --- a/docs/admin/tuning-increase-cache-size.rst +++ /dev/null @@ -1,17 +0,0 @@ -.. _cache-size: - -Increase Cache Size -------------------- - -When managing hundreds of |repos| from the main |RCE| interface the system -can become slow when the cache expires. Increasing the cache expiration -option improves the response times of the main user interface. -To increase your cache size, change the following default value in the -:file:`/home/{user}/.rccontrol/{instance-id}/rhodecode.ini` file. The value -is specified in seconds. - -.. code-block:: ini - - beaker.cache.long_term.expire=3600 # day (86400) week (604800) - -.. note:: The |RCE| cache automatically expires for changed |repos|. diff --git a/docs/admin/tuning-increase-db-performance.rst b/docs/admin/tuning-increase-db-performance.rst --- a/docs/admin/tuning-increase-db-performance.rst +++ b/docs/admin/tuning-increase-db-performance.rst @@ -1,7 +1,7 @@ .. _db-session-ref: -Increase Database Performance ------------------------------ +Database Performance +-------------------- For tuning PostgreSQL we recommend reading: http://www.revsys.com/writings/postgresql-performance.html diff --git a/docs/admin/tuning-rhodecode.rst b/docs/admin/tuning-rhodecode.rst --- a/docs/admin/tuning-rhodecode.rst +++ b/docs/admin/tuning-rhodecode.rst @@ -12,8 +12,7 @@ may find some of the following methods u tuning-vcs-memory-cache tuning-user-sessions-performance tuning-increase-db-performance - tuning-scale-horizontally - tuning-increase-cache-size + tuning-scale-horizontally-cluster tuning-mount-cache-memory tuning-change-encoding tuning-change-large-file-dir diff --git a/docs/admin/tuning-scale-horizontally.rst b/docs/admin/tuning-scale-horizontally-cluster.rst rename from docs/admin/tuning-scale-horizontally.rst rename to docs/admin/tuning-scale-horizontally-cluster.rst --- a/docs/admin/tuning-scale-horizontally.rst +++ b/docs/admin/tuning-scale-horizontally-cluster.rst @@ -1,58 +1,383 @@ -.. _scale-horizontal: +.. _scale-horizontal-cluster: + -Scale Horizontally ------------------- +Scale Horizontally / RhodeCode Cluster +-------------------------------------- |RCE| is built in a way it support horizontal scaling across multiple machines. -There are two main pre-requisites for that: +There are three main pre-requisites for that: -- Shared storage that each machine can access. -- Shared DB connection across machines. +- Shared storage that each machine can access. Using NFS or other shared storage system. +- Shared DB connection across machines. Using `MySQL`/`PostgreSQL` that each node can access. +- |RCE| user sessions and caches need to use a shared storage (e.g `Redis`_/`Memcached`) Horizontal scaling means adding more machines or workers into your pool of resources. Horizontally scaling |RCE| gives a huge performance increase, -especially under large traffic scenarios with a high number of requests. This -is very beneficial when |RCE| is serving many users simultaneously, +especially under large traffic scenarios with a high number of requests. +This is very beneficial when |RCE| is serving many users simultaneously, or if continuous integration servers are automatically pulling and pushing code. +It also adds High-Availability to your running system. + + +Cluster Overview +^^^^^^^^^^^^^^^^ + +Below we'll present a configuration example that will use two separate nodes to serve +|RCE| in a load-balanced environment. The 3rd node will act as a shared storage/cache +and handle load-balancing. In addition 3rd node will be used as shared database instance. + +This setup can be used both in Docker based configuration or with individual +physical/virtual machines. Using the 3rd node for Storage/Redis/PostgreSQL/Nginx is +optional. All those components can be installed on one of the two nodes used for |RCE|. +We'll use following naming for our nodes: + + - `rc-node-1` (NFS, DB, Cache node) + - `rc-node-2` (Worker node1) + - `rc-node-3` (Worker node2) + +Our shares NFS storage in the example is located on `/home/rcdev/storage` and +it's RW accessible on **each** node. + +In this example we used certain recommended components, however many +of those can be replaced by other, in case your organization already uses them, for example: + +- `MySQL`/`PostgreSQL`: Aren't replaceable and are the two only supported databases. +- `Nginx`_ on `rc-node-1` can be replaced by: `Hardware Load Balancer (F5)`, `Apache`_, `HA-Proxy` etc. +- `Nginx`_ on rc-node-2/3 acts as a reverse proxy and can be replaced by other HTTP server + acting as reverse proxy such as `Apache`_. +- `Redis`_ on `rc-node-1` can be replaced by: `Memcached` + + +Here's an overview what components should be installed/setup on each server in our example: + +- **rc-node-1**: + + - main storage acting as NFS host. + - `nginx` acting as a load-balancer. + - `postgresql-server` used for database and sessions. + - `redis-server` used for storing shared caches. + - optionally `rabbitmq-server` for `Celery` if used. + - optionally if `Celery` is used Enterprise/Community instance + VCSServer. + - optionally mailserver that can be shared by other instances. + - optionally channelstream server to handle live communication for all instances. + + +- **rc-node-2/3**: + + - `nginx` acting as a reverse proxy to handle requests to |RCE|. + - 1x RhodeCode Enterprise/Community instance. + - 1x VCSServer instance. + - optionally for testing connection: postgresql-client, redis-client (redis-tools). + + +Before we start here are few assumptions that should be fulfilled: + +- make sure each node can access each other. +- make sure `Redis`_/`MySQL`/`PostgreSQL`/`RabbitMQ`_ are running on `rc-node-1` +- make sure both `rc-node-2`/`3` can access NFS storage with RW access +- make sure rc-node-2/3 can access `Redis`_/`PostgreSQL`, `MySQL` database on `rc-node-1`. +- make sure `Redis`_/Database/`RabbitMQ`_ are password protected and accessible only from rc-node-2/3. + -If you scale across different machines, each |RCM| instance -needs to store its data on a shared disk, preferably together with your -|repos|. This data directory contains template caches, a full text search index, -and is used for task locking to ensure safety across multiple instances. -To do this, set the following properties in the :file:`rhodecode.ini` file to -set the shared location across all |RCM| instances. +Setup rc-node-2/3 +^^^^^^^^^^^^^^^^^ + +Initially before `rc-node-1` we'll configure both nodes 2 and 3 to operate as standalone +nodes with their own hostnames. Use a default installation settings, and use +the default local addresses (127.0.0.1) to configure VCSServer and Community/Enterprise instances. +All external connectivity will be handled by the reverse proxy (`Nginx`_ in our example). + +This way we can ensure each individual host works, +accepts connections, or do some operations explicitly on chosen node. + +In addition this would allow use to explicitly direct certain traffic to a node, e.g +CI server will only call directly `rc-node-3`. This should be done similar to normal +installation so check out `Nginx`_/`Apache`_ configuration example to configure each host. +Each one should already connect to shared database during installation. + + +1) Assuming our final url will be http://rc-node-1, Configure `instances_id`, `app.base_url` + +a) On **rc-node-2** find the following settings and edit :file:`/home/{user}/.rccontrol/{instance-id}/rhodecode.ini` + +.. code-block:: ini + + ## required format is: *NAME- + instance_id = *rc-node-2- + app.base_url = http://rc-node-1 + + +b) On **rc-node-3** find the following settings and edit :file:`/home/{user}/.rccontrol/{instance-id}/rhodecode.ini` + +.. code-block:: ini + + ## required format is: *NAME- + instance_id = *rc-node-3- + app.base_url = http://rc-node-1 + + + +2) Configure `User Session` to use a shared database. Example config that should be + changed on both node 2 and 3. Edit :file:`/home/{user}/.rccontrol/{instance-id}/rhodecode.ini` + +.. code-block:: ini + + #################################### + ### BEAKER SESSION #### + #################################### + + ## Disable the default `file` sessions + #beaker.session.type = file + #beaker.session.data_dir = %(here)s/data/sessions + + ## use shared db based session, fast, and allows easy management over logged in users + beaker.session.type = ext:database + beaker.session.table_name = db_session + # use our rc-node-1 here + beaker.session.sa.url = postgresql://postgres:qweqwe@rc-node-1/rhodecode + beaker.session.sa.pool_recycle = 3600 + beaker.session.sa.echo = false + +In addition make sure both instances use the same `session.secret` so users have +persistent sessions across nodes. Please generate other one then in this example. + +.. code-block:: ini + + # use an unique generated long string + beaker.session.secret = 70e116cae2274656ba7265fd860aebbd + +3) Configure stored cached/archive cache to our shared NFS `rc-node-1` .. code-block:: ini - cache_dir = /shared/path/caches # set to shared location - search.location = /shared/path/search_index # set to shared location + # note the `_` prefix that allows using a directory without + # remap and rescan checking for vcs inside it. + cache_dir = /home/rcdev/storage/_cache_dir/data + # note archive cache dir is disabled by default, however if you enable + # it also needs to be shared + #archive_cache_dir = /home/rcdev/storage/_tarball_cache_dir + + +4) Change cache backends to use `Redis`_ based caches. Below full example config + that replaces default file-based cache to shared `Redis`_ with Distributed Lock. + + +.. code-block:: ini + + ##################################### + ### DOGPILE CACHE #### + ##################################### + + ## `cache_perms` cache settings for permission tree, auth TTL. + #rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace + #rc_cache.cache_perms.expiration_time = 300 + + ## alternative `cache_perms` redis backend with distributed lock + rc_cache.cache_perms.backend = dogpile.cache.rc.redis + rc_cache.cache_perms.expiration_time = 300 + ## redis_expiration_time needs to be greater then expiration_time + rc_cache.cache_perms.arguments.redis_expiration_time = 7200 + rc_cache.cache_perms.arguments.socket_timeout = 30 + rc_cache.cache_perms.arguments.host = rc-node-1 + rc_cache.cache_perms.arguments.password = qweqwe + rc_cache.cache_perms.arguments.port = 6379 + rc_cache.cache_perms.arguments.db = 0 + rc_cache.cache_perms.arguments.distributed_lock = true + + ## `cache_repo` cache settings for FileTree, Readme, RSS FEEDS + #rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace + #rc_cache.cache_repo.expiration_time = 2592000 + + ## alternative `cache_repo` redis backend with distributed lock + rc_cache.cache_repo.backend = dogpile.cache.rc.redis + rc_cache.cache_repo.expiration_time = 2592000 + ## redis_expiration_time needs to be greater then expiration_time + rc_cache.cache_repo.arguments.redis_expiration_time = 2678400 + rc_cache.cache_repo.arguments.socket_timeout = 30 + rc_cache.cache_repo.arguments.host = rc-node-1 + rc_cache.cache_repo.arguments.password = qweqwe + rc_cache.cache_repo.arguments.port = 6379 + rc_cache.cache_repo.arguments.db = 1 + rc_cache.cache_repo.arguments.distributed_lock = true + + ## cache settings for SQL queries, this needs to use memory type backend + rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru + rc_cache.sql_cache_short.expiration_time = 30 + + ## `cache_repo_longterm` cache for repo object instances, this needs to use memory + ## type backend as the objects kept are not pickle serializable + rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru + ## by default we use 96H, this is using invalidation on push anyway + rc_cache.cache_repo_longterm.expiration_time = 345600 + ## max items in LRU cache, reduce this number to save memory, and expire last used + ## cached objects + rc_cache.cache_repo_longterm.max_size = 10000 + + +4) Configure `Nginx`_ as reverse proxy on `rc-node-2/3`: + Minimal `Nginx`_ config used: + - #################################### - ### BEAKER CACHE #### - #################################### - beaker.cache.data_dir = /shared/path/data # set to shared location - beaker.cache.lock_dir = /shared/path/lock # set to shared location +.. code-block:: nginx + + ## rate limiter for certain pages to prevent brute force attacks + limit_req_zone $binary_remote_addr zone=req_limit:10m rate=1r/s; + + ## custom log format + log_format log_custom '$remote_addr - $remote_user [$time_local] ' + '"$request" $status $body_bytes_sent ' + '"$http_referer" "$http_user_agent" ' + '$request_time $upstream_response_time $pipe'; + + server { + listen 80; + server_name rc-node-2; + #server_name rc-node-3; + + access_log /var/log/nginx/rhodecode.access.log log_custom; + error_log /var/log/nginx/rhodecode.error.log; + + # example of proxy.conf can be found in our docs. + include /etc/nginx/proxy.conf; + + ## serve static files by Nginx, recommended for performance + location /_static/rhodecode { + gzip on; + gzip_min_length 500; + gzip_proxied any; + gzip_comp_level 4; + gzip_types text/css text/javascript text/xml text/plain text/x-component application/javascript application/json application/xml application/rss+xml font/truetype font/opentype application/vnd.ms-fontobject image/svg+xml; + gzip_vary on; + gzip_disable "msie6"; + #alias /home/rcdev/.rccontrol/community-1/static; + alias /home/rcdev/.rccontrol/enterprise-1/static; + } + + + location /_admin/login { + limit_req zone=req_limit burst=10 nodelay; + try_files $uri @rhode; + } + + location / { + try_files $uri @rhode; + } + + location @rhode { + # Url to running RhodeCode instance. + # This is shown as `- URL: ` in output from rccontrol status. + proxy_pass http://127.0.0.1:10020; + } + + ## custom 502 error page. Will be displayed while RhodeCode server + ## is turned off + error_page 502 /502.html; + location = /502.html { + #root /home/rcdev/.rccontrol/community-1/static; + root /home/rcdev/.rccontrol/enterprise-1/static; + } + } + + +5) Optional: Full text search, in case you use `Whoosh` full text search we also need a + shared storage for the index. In our example our NFS is mounted at `/home/rcdev/storage` + which represents out storage so we can use the following: + +.. code-block:: ini + + # note the `_` prefix that allows using a directory without + # remap and rescan checking for vcs inside it. + search.location = /home/rcdev/storage/_index_data/index .. note:: - If you use custom caches such as `beaker.cache.auth_plugins.` it's recommended - to set it to the memcached/redis or database backend so it can be shared - across machines. + If you use ElasticSearch it's by default shared, and simply running ES node is + by default cluster compatible. + + +6) Optional: If you intend to use mailing all instances need to use either a shared + mailing node, or each will use individual local mailagent. Simply put node-1/2/3 needs + to use same mailing configuration. + + + +Setup rc-node-1 +^^^^^^^^^^^^^^^ -It is recommended to create another dedicated |RCE| instance to handle -traffic from build farms or continuous integration servers. +Configure `Nginx`_ as Load Balancer to rc-node-2/3. +Minimal `Nginx`_ example below: + +.. code-block:: nginx + + ## define rc-cluster which contains a pool of our instances to connect to + upstream rc-cluster { + # rc-node-2/3 are stored in /etc/hosts with correct IP addresses + server rc-node-2:80; + server rc-node-3:80; + } + + server { + listen 80; + server_name rc-node-1; + + location / { + proxy_pass http://rc-cluster; + } + } + .. note:: You should configure your load balancing accordingly. We recommend writing load balancing rules that will separate regular user traffic from - automated process traffic like continuous servers or build bots. + automated process traffic like continuous servers or build bots. Sticky sessions + are not required. + + +Show which instance handles a request +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You can easily check if load-balancing is working as expected. Visit our main node +`rc-node-1` URL which at that point should already handle incoming requests and balance +it across node-2/3. + +Add a special GET param `?showrcid=1` to show current instance handling your request. + +For example: visiting url `http://rc-node-1/?showrcid=1` will show, in the bottom +of the screen` cluster instance info. +e.g: `RhodeCode instance id: rc-node-3-rc-node-3-3246` +which is generated from:: + + -- + + +Using Celery with cluster +^^^^^^^^^^^^^^^^^^^^^^^^^ -.. note:: + +If `Celery` is used we recommend setting also an instance of Enterprise/Community+VCSserver +on the node that is running `RabbitMQ`_. Those instances will be used to executed async +tasks on the `rc-node-1`. This is the most efficient setup. `Celery` usually +handles tasks such as sending emails, forking repositories, importing +repositories from external location etc. Using workers on instance that has +the direct access to disks used by NFS as well as email server gives noticeable +performance boost. Running local workers to the NFS storage results in faster +execution of forking large repositories or sending lots of emails. - If Celery is used on each instance then you should run separate Celery - instances, but the message broker should be the same for all of them. +Those instances need to be configured in the same way as for other nodes. +The instance in rc-node-1 can be added to the cluser, but we don't recommend doing it. +For best results let it be isolated to only executing `Celery` tasks in the cluster setup. + + +.. _Gunicorn: http://gunicorn.org/ +.. _Whoosh: https://pypi.python.org/pypi/Whoosh/ +.. _Elasticsearch: https://www.elastic.co/.. +.. _RabbitMQ: http://www.rabbitmq.com/ +.. _Nginx: http://nginx.io +.. _Apache: http://nginx.io +.. _Redis: http://redis.io + diff --git a/docs/admin/tuning-user-sessions-performance.rst b/docs/admin/tuning-user-sessions-performance.rst --- a/docs/admin/tuning-user-sessions-performance.rst +++ b/docs/admin/tuning-user-sessions-performance.rst @@ -1,7 +1,7 @@ .. _user-session-ref: -Increase User Session Performance ---------------------------------- +User Session Performance +------------------------ The default file-based sessions are only suitable for smaller setups, or instances that doesn't have a lot of users or traffic. @@ -24,8 +24,9 @@ your :file:`/home/{user}/.rccontrol/{ins beaker.session.type = ext:database beaker.session.table_name = db_session - # use just one of the following accoring to the type of database + # use just one of the following according to the type of database beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode + # or beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode beaker.session.sa.pool_recycle = 3600 diff --git a/docs/admin/tuning-vcs-memory-cache.rst b/docs/admin/tuning-vcs-memory-cache.rst --- a/docs/admin/tuning-vcs-memory-cache.rst +++ b/docs/admin/tuning-vcs-memory-cache.rst @@ -1,7 +1,7 @@ .. _adjust-vcs-mem-cache: -Adjusting VCS Memory Cache --------------------------- +VCSServer Memory Cache +---------------------- The VCS Server mamory cache can be adjusted to work best with the resources available to your |RCE| instance. If you find that memory resources are under