Show More
@@ -1,4 +1,4 b'' | |||
|
1 | ## -*- coding: utf-8 -*- | |
|
1 | ||
|
2 | 2 | |
|
3 | 3 | ; ######################################### |
|
4 | 4 | ; RHODECODE COMMUNITY EDITION CONFIGURATION |
@@ -77,7 +77,7 b' asyncore_use_poll = true' | |||
|
77 | 77 | ; Recommended type is `gevent` |
|
78 | 78 | #worker_class = gevent |
|
79 | 79 | |
|
80 | ; The maximum number of simultaneous clients. Valid only for gevent | |
|
80 | ; The maximum number of simultaneous clients per worker. Valid only for gevent | |
|
81 | 81 | #worker_connections = 10 |
|
82 | 82 | |
|
83 | 83 | ; The maximum number of pending connections worker will queue to handle |
@@ -222,12 +222,6 b' lang = en' | |||
|
222 | 222 | ; Settings this to true could lead to very long startup time. |
|
223 | 223 | startup.import_repos = false |
|
224 | 224 | |
|
225 | ; Uncomment and set this path to use archive download cache. | |
|
226 | ; Once enabled, generated archives will be cached at this location | |
|
227 | ; and served from the cache during subsequent requests for the same archive of | |
|
228 | ; the repository. | |
|
229 | #archive_cache_dir = /tmp/tarballcache | |
|
230 | ||
|
231 | 225 | ; URL at which the application is running. This is used for Bootstrapping |
|
232 | 226 | ; requests in context when no web request is available. Used in ishell, or |
|
233 | 227 | ; SSH calls. Set this for events to receive proper url for SSH calls. |
@@ -381,6 +375,20 b' file_store.backend = local' | |||
|
381 | 375 | ; path to store the uploaded binaries |
|
382 | 376 | file_store.storage_path = %(here)s/data/file_store |
|
383 | 377 | |
|
378 | ; Uncomment and set this path to control settings for archive download cache. | |
|
379 | ; Generated repo archives will be cached at this location | |
|
380 | ; and served from the cache during subsequent requests for the same archive of | |
|
381 | ; the repository. This path is important to be shared across filesystems and with | |
|
382 | ; RhodeCode and vcsserver | |
|
383 | ||
|
384 | ; Default is $cache_dir/archive_cache if not set | |
|
385 | archive_cache.store_dir = %(here)s/data/archive_cache | |
|
386 | ||
|
387 | ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb | |
|
388 | archive_cache.cache_size_gb = 10 | |
|
389 | ||
|
390 | ; By default cache uses sharding technique, this specifies how many shards are there | |
|
391 | archive_cache.cache_shards = 10 | |
|
384 | 392 | |
|
385 | 393 | ; ############# |
|
386 | 394 | ; CELERY CONFIG |
@@ -440,7 +448,7 b' rc_cache.cache_repo_longterm.max_size = ' | |||
|
440 | 448 | rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace |
|
441 | 449 | rc_cache.cache_general.expiration_time = 43200 |
|
442 | 450 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
443 |
#rc_cache.cache_general.arguments.filename = /tmp/cache_general |
|
|
451 | #rc_cache.cache_general.arguments.filename = /tmp/cache_general_db | |
|
444 | 452 | |
|
445 | 453 | ; alternative `cache_general` redis backend with distributed lock |
|
446 | 454 | #rc_cache.cache_general.backend = dogpile.cache.rc.redis |
@@ -467,7 +475,7 b' rc_cache.cache_general.expiration_time =' | |||
|
467 | 475 | rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace |
|
468 | 476 | rc_cache.cache_perms.expiration_time = 3600 |
|
469 | 477 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
470 |
#rc_cache.cache_perms.arguments.filename = /tmp/cache_perms |
|
|
478 | #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms_db | |
|
471 | 479 | |
|
472 | 480 | ; alternative `cache_perms` redis backend with distributed lock |
|
473 | 481 | #rc_cache.cache_perms.backend = dogpile.cache.rc.redis |
@@ -494,7 +502,7 b' rc_cache.cache_perms.expiration_time = 3' | |||
|
494 | 502 | rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace |
|
495 | 503 | rc_cache.cache_repo.expiration_time = 2592000 |
|
496 | 504 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
497 |
#rc_cache.cache_repo.arguments.filename = /tmp/cache_repo |
|
|
505 | #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo_db | |
|
498 | 506 | |
|
499 | 507 | ; alternative `cache_repo` redis backend with distributed lock |
|
500 | 508 | #rc_cache.cache_repo.backend = dogpile.cache.rc.redis |
@@ -805,7 +813,7 b' ssh.enable_ui_key_generator = true' | |||
|
805 | 813 | #appenlight.log_namespace_blacklist = |
|
806 | 814 | |
|
807 | 815 | ; Statsd client config, this is used to send metrics to statsd |
|
808 |
; We recommend setting statsd_exported and scrape them using Prometh |
|
|
816 | ; We recommend setting statsd_exported and scrape them using Prometheus | |
|
809 | 817 | #statsd.enabled = false |
|
810 | 818 | #statsd.statsd_host = 0.0.0.0 |
|
811 | 819 | #statsd.statsd_port = 8125 |
@@ -1,4 +1,4 b'' | |||
|
1 | ## -*- coding: utf-8 -*- | |
|
1 | ||
|
2 | 2 | |
|
3 | 3 | ; ######################################### |
|
4 | 4 | ; RHODECODE COMMUNITY EDITION CONFIGURATION |
@@ -173,12 +173,6 b' lang = en' | |||
|
173 | 173 | ; Settings this to true could lead to very long startup time. |
|
174 | 174 | startup.import_repos = false |
|
175 | 175 | |
|
176 | ; Uncomment and set this path to use archive download cache. | |
|
177 | ; Once enabled, generated archives will be cached at this location | |
|
178 | ; and served from the cache during subsequent requests for the same archive of | |
|
179 | ; the repository. | |
|
180 | #archive_cache_dir = /tmp/tarballcache | |
|
181 | ||
|
182 | 176 | ; URL at which the application is running. This is used for Bootstrapping |
|
183 | 177 | ; requests in context when no web request is available. Used in ishell, or |
|
184 | 178 | ; SSH calls. Set this for events to receive proper url for SSH calls. |
@@ -332,6 +326,20 b' file_store.backend = local' | |||
|
332 | 326 | ; path to store the uploaded binaries |
|
333 | 327 | file_store.storage_path = %(here)s/data/file_store |
|
334 | 328 | |
|
329 | ; Uncomment and set this path to control settings for archive download cache. | |
|
330 | ; Generated repo archives will be cached at this location | |
|
331 | ; and served from the cache during subsequent requests for the same archive of | |
|
332 | ; the repository. This path is important to be shared across filesystems and with | |
|
333 | ; RhodeCode and vcsserver | |
|
334 | ||
|
335 | ; Default is $cache_dir/archive_cache if not set | |
|
336 | archive_cache.store_dir = %(here)s/data/archive_cache | |
|
337 | ||
|
338 | ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb | |
|
339 | archive_cache.cache_size_gb = 10 | |
|
340 | ||
|
341 | ; By default cache uses sharding technique, this specifies how many shards are there | |
|
342 | archive_cache.cache_shards = 10 | |
|
335 | 343 | |
|
336 | 344 | ; ############# |
|
337 | 345 | ; CELERY CONFIG |
@@ -391,7 +399,7 b' rc_cache.cache_repo_longterm.max_size = ' | |||
|
391 | 399 | rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace |
|
392 | 400 | rc_cache.cache_general.expiration_time = 43200 |
|
393 | 401 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
394 |
#rc_cache.cache_general.arguments.filename = /tmp/cache_general |
|
|
402 | #rc_cache.cache_general.arguments.filename = /tmp/cache_general_db | |
|
395 | 403 | |
|
396 | 404 | ; alternative `cache_general` redis backend with distributed lock |
|
397 | 405 | #rc_cache.cache_general.backend = dogpile.cache.rc.redis |
@@ -418,7 +426,7 b' rc_cache.cache_general.expiration_time =' | |||
|
418 | 426 | rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace |
|
419 | 427 | rc_cache.cache_perms.expiration_time = 3600 |
|
420 | 428 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
421 |
#rc_cache.cache_perms.arguments.filename = /tmp/cache_perms |
|
|
429 | #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms_db | |
|
422 | 430 | |
|
423 | 431 | ; alternative `cache_perms` redis backend with distributed lock |
|
424 | 432 | #rc_cache.cache_perms.backend = dogpile.cache.rc.redis |
@@ -445,7 +453,7 b' rc_cache.cache_perms.expiration_time = 3' | |||
|
445 | 453 | rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace |
|
446 | 454 | rc_cache.cache_repo.expiration_time = 2592000 |
|
447 | 455 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
448 |
#rc_cache.cache_repo.arguments.filename = /tmp/cache_repo |
|
|
456 | #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo_db | |
|
449 | 457 | |
|
450 | 458 | ; alternative `cache_repo` redis backend with distributed lock |
|
451 | 459 | #rc_cache.cache_repo.backend = dogpile.cache.rc.redis |
@@ -568,6 +576,9 b' sqlalchemy.db1.pool_recycle = 3600' | |||
|
568 | 576 | |
|
569 | 577 | ; the number of connections to keep open inside the connection pool. |
|
570 | 578 | ; 0 indicates no limit |
|
579 | ; the general calculus with gevent is: | |
|
580 | ; if your system allows 500 concurrent greenlets (max_connections) that all do database access, | |
|
581 | ; then increase pool size + max overflow so that they add up to 500. | |
|
571 | 582 | #sqlalchemy.db1.pool_size = 5 |
|
572 | 583 | |
|
573 | 584 | ; The number of connections to allow in connection pool "overflow", that is |
General Comments 0
You need to be logged in to leave comments.
Login now