##// END OF EJS Templates
docs: updated performance/tunning docs according to latest code changes
dan -
r4172:0f4eef6e default
parent child Browse files
Show More
@@ -0,0 +1,26 b''
1 .. _adjust-rhodecode-mem:
2
3 RhodeCode Memory Usage
4 ----------------------
5
6 Starting from Version 4.18.X RhodeCode has a builtin memory monitor for gunicorn workers.
7 Enabling this can limit the maximum amount of memory system can use. Each worker
8 for RhodeCode is monitored independently.
9 To enable Memory management make sure to have following settings inside `[app:main] section` of
10 :file:`home/{user}/.rccontrol/{instance-id}/rhodecode.ini` file.
11
12
13
14 ; Maximum memory usage that each worker can use before it will receive a
15 ; graceful restart signal 0 = memory monitoring is disabled
16 ; Examples: 268435456 (256MB), 536870912 (512MB)
17 ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
18 memory_max_usage = 1073741824
19
20 ; How often in seconds to check for memory usage for each gunicorn worker
21 memory_usage_check_interval = 60
22
23 ; Threshold value for which we don't recycle worker if GarbageCollection
24 ; frees up enough resources. Before each restart we try to run GC on worker
25 ; in case we get enough free memory after that, restart will not happen.
26 memory_usage_recovery_threshold = 0.8
@@ -9,7 +9,8 b' may find some of the following methods u'
9 9 .. toctree::
10 10
11 11 tuning/tuning-gunicorn
12 tuning/tuning-vcs-memory-cache
12 tuning/tuning-vcs-server-memory-usage
13 tuning/tuning-rhodecode-memory-usage
13 14 tuning/tuning-user-sessions-performance
14 15 tuning/tuning-increase-db-performance
15 16 tuning/tuning-scale-horizontally-cluster
@@ -25,26 +25,22 b' 2. In the ``[server:main]`` section, cha'
25 25
26 26 .. code-block:: ini
27 27
28 use = egg:gunicorn#main
29 ## Sets the number of process workers. You must set `instance_id = *`
30 ## when this option is set to more than one worker, recommended
31 ## value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers
32 ## The `instance_id = *` must be set in the [app:main] section below
33 workers = 4
34 ## process name
35 proc_name = rhodecode
36 ## type of worker class, one of sync, gevent
37 ## recommended for bigger setup is using of of other than sync one
38 worker_class = sync
39 ## The maximum number of simultaneous clients. Valid only for Gevent
40 #worker_connections = 10
41 ## max number of requests that worker will handle before being gracefully
42 ## restarted, could prevent memory leaks
43 max_requests = 1000
44 max_requests_jitter = 30
45 ## amount of time a worker can spend with handling a request tuning-change-lfs-dir.before it
46 ## gets killed and restarted. Set to 6hrs
47 timeout = 21600
28 ; Sets the number of process workers. More workers means more concurrent connections
29 ; RhodeCode can handle at the same time. Each additional worker also it increases
30 ; memory usage as each has it's own set of caches.
31 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
32 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
33 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
34 ; when using more than 1 worker.
35 workers = 6
36
37 ; Type of worker class, one of `sync`, `gevent`
38 ; Use `gevent` for rhodecode
39 worker_class = gevent
40
41 ; The maximum number of simultaneous clients per worker. Valid only for gevent
42 worker_connections = 10
43
48 44
49 45 3. In the ``[app:main]`` section, set the ``instance_id`` property to ``*``.
50 46
@@ -63,24 +59,19 b' 5. In the ``[server:main]`` section, inc'
63 59
64 60 .. code-block:: ini
65 61
66 ## run with gunicorn --log-config vcsserver.ini --paste vcsserver.ini
67 use = egg:gunicorn#main
68 ## Sets the number of process workers. Recommended
69 ## value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers
70 workers = 4
71 ## process name
72 proc_name = rhodecode_vcsserver
73 ## type of worker class, currently `sync` is the only option allowed.
62 ; Sets the number of process workers. More workers means more concurrent connections
63 ; RhodeCode can handle at the same time. Each additional worker also it increases
64 ; memory usage as each has it's own set of caches.
65 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
66 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
67 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
68 ; when using more than 1 worker.
69 workers = 8
70
71 ; Type of worker class, one of `sync`, `gevent`
72 ; Use `sync` for vcsserver
74 73 worker_class = sync
75 ## The maximum number of simultaneous clients. Valid only for Gevent
76 #worker_connections = 10
77 ## max number of requests that worker will handle before being gracefully
78 ## restarted, could prevent memory leaks
79 max_requests = 1000
80 max_requests_jitter = 30
81 ## amount of time a worker can spend with handling a request before it
82 ## gets killed and restarted. Set to 6hrs
83 timeout = 21600
74
84 75
85 76 6. Save your changes.
86 77 7. Restart your |RCE| instances, using the following command:
@@ -109,17 +100,18 b' 2. In the ``[server:main]`` section, cha'
109 100
110 101 .. code-block:: ini
111 102
112 ## type of worker class, one of sync, gevent
113 ## recommended for bigger setup is using of of other than sync one
103 ; Type of worker class, one of `sync`, `gevent`
104 ; Use `gevent` for rhodecode
114 105 worker_class = gevent
115 ## The maximum number of simultaneous clients. Valid only for Gevent
106
107 ; The maximum number of simultaneous clients per worker. Valid only for gevent
116 108 worker_connections = 30
117 109
118 110
119 111 .. note::
120 112
121 113 `Gevent` is currently only supported for Enterprise/Community instances.
122 VCSServer doesn't yet support gevent.
114 VCSServer doesn't support gevent.
123 115
124 116
125 117
@@ -1,8 +1,26 b''
1 .. _adjust-vcs-mem-cache:
1 .. _adjust-vcs-server-mem:
2 2
3 VCSServer Memory Cache
3 VCSServer Memory Usage
4 4 ----------------------
5 5
6 The VCS Server mamory cache can be adjusted to work best with the resources
7 available to your |RCE| instance. If you find that memory resources are under
8 pressure, see the :ref:`vcs-server-maintain` section for details.
6 Starting from Version 4.18.X RhodeCode has a builtin memory monitor for gunicorn workers.
7 Enabling this can limit the maximum amount of memory system can use. Each worker
8 for VCS Server is monitored independently.
9 To enable Memory management make sure to have following settings inside `[app:main] section` of
10 :file:`home/{user}/.rccontrol/{instance-id}/vcsserver.ini` file.
11
12
13
14 ; Maximum memory usage that each worker can use before it will receive a
15 ; graceful restart signal 0 = memory monitoring is disabled
16 ; Examples: 268435456 (256MB), 536870912 (512MB)
17 ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
18 memory_max_usage = 1073741824
19
20 ; How often in seconds to check for memory usage for each gunicorn worker
21 memory_usage_check_interval = 60
22
23 ; Threshold value for which we don't recycle worker if GarbageCollection
24 ; frees up enough resources. Before each restart we try to run GC on worker
25 ; in case we get enough free memory after that, restart will not happen.
26 memory_usage_recovery_threshold = 0.8
@@ -110,35 +110,39 b' match, for example:'
110 110
111 111 .. _vcs-server-maintain:
112 112
113 VCS Server Memory Optimization
114 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
113 VCS Server Cache Optimization
114 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
115 115
116 To optimize the VCS server to manage the cache and memory usage efficiently, you need to
117 configure the following options in the
118 :file:`/home/{user}/.rccontrol/{vcsserver-id}/vcsserver.ini` file. Once
119 configured, restart the VCS Server. By default we use an optimal settings, but in certain
120 conditions tunning expiration_time and max_size can affect memory usage and performance
116 To optimize the VCS server to manage the cache and memory usage efficiently, it's recommended to
117 configure the Redis backend for VCSServer caches.
118 Once configured, restart the VCS Server.
119
120 Make sure Redis is installed and running.
121 Open :file:`/home/{user}/.rccontrol/{vcsserver-id}/vcsserver.ini`
122 file and ensure the below settings for `repo_object` type cache are set:
121 123
122 124 .. code-block:: ini
123 125
124 ## cache region for storing repo_objects cache
125 rc_cache.repo_object.backend = dogpile.cache.rc.memory_lru
126 ; ensure the default file based cache is *commented out*
127 ##rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
128 ##rc_cache.repo_object.expiration_time = 2592000
126 129
127 ## cache auto-expires after N seconds, setting this to 0 disabled cache
128 rc_cache.repo_object.expiration_time = 300
130 ; `repo_object` cache settings for vcs methods for repositories
131 rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack
129 132
130 ## max size of LRU, old values will be discarded if the size of cache reaches max_size
131 ## Sets the maximum number of items stored in the cache, before the cache
132 ## starts to be cleared.
133 ; cache auto-expires after N seconds
134 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
135 rc_cache.repo_object.expiration_time = 2592000
136
137 ; redis_expiration_time needs to be greater then expiration_time
138 rc_cache.repo_object.arguments.redis_expiration_time = 3592000
133 139
134 ## As a general rule of thumb, running this value at 120 resulted in a
135 ## 5GB cache. Running it at 240 resulted in a 9GB cache. Your results
136 ## will differ based on usage patterns and |repo| sizes.
137
138 ## Tweaking this value to run at a fairly constant memory load on your
139 ## server will help performance.
140
141 rc_cache.repo_object.max_size = 120
140 rc_cache.repo_object.arguments.host = localhost
141 rc_cache.repo_object.arguments.port = 6379
142 rc_cache.repo_object.arguments.db = 5
143 rc_cache.repo_object.arguments.socket_timeout = 30
144 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
145 rc_cache.repo_object.arguments.distributed_lock = true
142 146
143 147
144 148 To clear the cache completely, you can restart the VCS Server.
@@ -190,25 +194,6 b' For a more detailed explanation of the l'
190 194 \port <int>
191 195 Set the port number on which the VCS Server will be available.
192 196
193 \locale <locale_utf>
194 Set the locale the VCS Server expects.
195
196 \workers <int>
197 Set the number of process workers.Recommended
198 value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers
199
200 \max_requests <int>
201 The maximum number of requests a worker will process before restarting.
202 Any value greater than zero will limit the number of requests a work
203 will process before automatically restarting. This is a simple method
204 to help limit the damage of memory leaks.
205
206 \max_requests_jitter <int>
207 The maximum jitter to add to the max_requests setting.
208 The jitter causes the restart per worker to be randomized by
209 randint(0, max_requests_jitter). This is intended to stagger worker
210 restarts to avoid all workers restarting at the same time.
211
212 197
213 198 .. note::
214 199
@@ -216,63 +201,139 b' For a more detailed explanation of the l'
216 201
217 202 .. code-block:: ini
218 203
219 ################################################################################
220 # RhodeCode VCSServer with HTTP Backend - configuration #
221 # #
222 ################################################################################
223
204 ; #################################
205 ; RHODECODE VCSSERVER CONFIGURATION
206 ; #################################
224 207
225 208 [server:main]
226 ## COMMON ##
209 ; COMMON HOST/IP CONFIG
227 210 host = 127.0.0.1
228 211 port = 10002
229 212
230 ##########################
231 ## GUNICORN WSGI SERVER ##
232 ##########################
233 ## run with gunicorn --log-config vcsserver.ini --paste vcsserver.ini
213 ; ###########################
214 ; GUNICORN APPLICATION SERVER
215 ; ###########################
216
217 ; run with gunicorn --log-config rhodecode.ini --paste rhodecode.ini
218
219 ; Module to use, this setting shouldn't be changed
234 220 use = egg:gunicorn#main
235 ## Sets the number of process workers. Recommended
236 ## value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers
237 workers = 3
238 ## process name
221
222 ; Sets the number of process workers. More workers means more concurrent connections
223 ; RhodeCode can handle at the same time. Each additional worker also it increases
224 ; memory usage as each has it's own set of caches.
225 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
226 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
227 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
228 ; when using more than 1 worker.
229 workers = 6
230
231 ; Gunicorn access log level
232 loglevel = info
233
234 ; Process name visible in process list
239 235 proc_name = rhodecode_vcsserver
240 ## type of worker class, one of sync, gevent
241 ## recommended for bigger setup is using of of other than sync one
236
237 ; Type of worker class, one of sync, gevent
238 ; currently `sync` is the only option allowed.
242 239 worker_class = sync
243 ## The maximum number of simultaneous clients. Valid only for Gevent
244 #worker_connections = 10
245 ## max number of requests that worker will handle before being gracefully
246 ## restarted, could prevent memory leaks
240
241 ; The maximum number of simultaneous clients. Valid only for gevent
242 worker_connections = 10
243
244 ; Max number of requests that worker will handle before being gracefully restarted.
245 ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
247 246 max_requests = 1000
248 247 max_requests_jitter = 30
249 ## amount of time a worker can spend with handling a request before it
250 ## gets killed and restarted. Set to 6hrs
248
249 ; Amount of time a worker can spend with handling a request before it
250 ; gets killed and restarted. By default set to 21600 (6hrs)
251 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
251 252 timeout = 21600
252 253
254 ; The maximum size of HTTP request line in bytes.
255 ; 0 for unlimited
256 limit_request_line = 0
257
258 ; Limit the number of HTTP headers fields in a request.
259 ; By default this value is 100 and can't be larger than 32768.
260 limit_request_fields = 32768
261
262 ; Limit the allowed size of an HTTP request header field.
263 ; Value is a positive number or 0.
264 ; Setting it to 0 will allow unlimited header field sizes.
265 limit_request_field_size = 0
266
267 ; Timeout for graceful workers restart.
268 ; After receiving a restart signal, workers have this much time to finish
269 ; serving requests. Workers still alive after the timeout (starting from the
270 ; receipt of the restart signal) are force killed.
271 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
272 graceful_timeout = 3600
273
274 # The number of seconds to wait for requests on a Keep-Alive connection.
275 # Generally set in the 1-5 seconds range.
276 keepalive = 2
277
278 ; Maximum memory usage that each worker can use before it will receive a
279 ; graceful restart signal 0 = memory monitoring is disabled
280 ; Examples: 268435456 (256MB), 536870912 (512MB)
281 ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
282 memory_max_usage = 1073741824
283
284 ; How often in seconds to check for memory usage for each gunicorn worker
285 memory_usage_check_interval = 60
286
287 ; Threshold value for which we don't recycle worker if GarbageCollection
288 ; frees up enough resources. Before each restart we try to run GC on worker
289 ; in case we get enough free memory after that, restart will not happen.
290 memory_usage_recovery_threshold = 0.8
291
292
253 293 [app:main]
254 294 use = egg:rhodecode-vcsserver
255 295
256 296 pyramid.default_locale_name = en
257 297 pyramid.includes =
258 298
259 ## default locale used by VCS systems
299 ; default locale used by VCS systems
260 300 locale = en_US.UTF-8
261 301
262 # cache regions, please don't change
263 beaker.cache.regions = repo_object
264 beaker.cache.repo_object.type = memorylru
265 beaker.cache.repo_object.max_items = 100
266 # cache auto-expires after N seconds
267 beaker.cache.repo_object.expire = 300
268 beaker.cache.repo_object.enabled = true
302 ; #############
303 ; DOGPILE CACHE
304 ; #############
305
306 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
307 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
308 cache_dir = %(here)s/data
309
310 ; **********************************************************
311 ; `repo_object` cache with redis backend
312 ; recommended for larger instance, or for better performance
313 ; **********************************************************
314
315 ; `repo_object` cache settings for vcs methods for repositories
316 rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack
269 317
318 ; cache auto-expires after N seconds
319 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
320 rc_cache.repo_object.expiration_time = 2592000
270 321
271 ################################
272 ### LOGGING CONFIGURATION ####
273 ################################
322 ; redis_expiration_time needs to be greater then expiration_time
323 rc_cache.repo_object.arguments.redis_expiration_time = 3592000
324
325 rc_cache.repo_object.arguments.host = localhost
326 rc_cache.repo_object.arguments.port = 6379
327 rc_cache.repo_object.arguments.db = 5
328 rc_cache.repo_object.arguments.socket_timeout = 30
329 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
330 rc_cache.repo_object.arguments.distributed_lock = true
331
332 ; #####################
333 ; LOGGING CONFIGURATION
334 ; #####################
274 335 [loggers]
275 keys = root, vcsserver, beaker
336 keys = root, vcsserver
276 337
277 338 [handlers]
278 339 keys = console
@@ -280,9 +341,9 b' For a more detailed explanation of the l'
280 341 [formatters]
281 342 keys = generic
282 343
283 #############
284 ## LOGGERS ##
285 #############
344 ; #######
345 ; LOGGERS
346 ; #######
286 347 [logger_root]
287 348 level = NOTSET
288 349 handlers = console
@@ -293,29 +354,23 b' For a more detailed explanation of the l'
293 354 qualname = vcsserver
294 355 propagate = 1
295 356
296 [logger_beaker]
297 level = DEBUG
298 handlers =
299 qualname = beaker
300 propagate = 1
301 357
302
303 ##############
304 ## HANDLERS ##
305 ##############
358 ; ########
359 ; HANDLERS
360 ; ########
306 361
307 362 [handler_console]
308 363 class = StreamHandler
309 args = (sys.stderr,)
310 level = DEBUG
364 args = (sys.stderr, )
365 level = INFO
311 366 formatter = generic
312 367
313 ################
314 ## FORMATTERS ##
315 ################
368 ; ##########
369 ; FORMATTERS
370 ; ##########
316 371
317 372 [formatter_generic]
318 format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s
373 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
319 374 datefmt = %Y-%m-%d %H:%M:%S
320 375
321 376
General Comments 0
You need to be logged in to leave comments. Login now