Show More
@@ -1,4 +1,4 b'' | |||||
1 | ## -*- coding: utf-8 -*- |
|
1 | ||
2 |
|
2 | |||
3 | ; ######################################### |
|
3 | ; ######################################### | |
4 | ; RHODECODE COMMUNITY EDITION CONFIGURATION |
|
4 | ; RHODECODE COMMUNITY EDITION CONFIGURATION | |
@@ -77,7 +77,7 b' asyncore_use_poll = true' | |||||
77 | ; Recommended type is `gevent` |
|
77 | ; Recommended type is `gevent` | |
78 | #worker_class = gevent |
|
78 | #worker_class = gevent | |
79 |
|
79 | |||
80 | ; The maximum number of simultaneous clients. Valid only for gevent |
|
80 | ; The maximum number of simultaneous clients per worker. Valid only for gevent | |
81 | #worker_connections = 10 |
|
81 | #worker_connections = 10 | |
82 |
|
82 | |||
83 | ; The maximum number of pending connections worker will queue to handle |
|
83 | ; The maximum number of pending connections worker will queue to handle | |
@@ -222,12 +222,6 b' lang = en' | |||||
222 | ; Settings this to true could lead to very long startup time. |
|
222 | ; Settings this to true could lead to very long startup time. | |
223 | startup.import_repos = false |
|
223 | startup.import_repos = false | |
224 |
|
224 | |||
225 | ; Uncomment and set this path to use archive download cache. |
|
|||
226 | ; Once enabled, generated archives will be cached at this location |
|
|||
227 | ; and served from the cache during subsequent requests for the same archive of |
|
|||
228 | ; the repository. |
|
|||
229 | #archive_cache_dir = /tmp/tarballcache |
|
|||
230 |
|
||||
231 | ; URL at which the application is running. This is used for Bootstrapping |
|
225 | ; URL at which the application is running. This is used for Bootstrapping | |
232 | ; requests in context when no web request is available. Used in ishell, or |
|
226 | ; requests in context when no web request is available. Used in ishell, or | |
233 | ; SSH calls. Set this for events to receive proper url for SSH calls. |
|
227 | ; SSH calls. Set this for events to receive proper url for SSH calls. | |
@@ -381,6 +375,20 b' file_store.backend = local' | |||||
381 | ; path to store the uploaded binaries |
|
375 | ; path to store the uploaded binaries | |
382 | file_store.storage_path = %(here)s/data/file_store |
|
376 | file_store.storage_path = %(here)s/data/file_store | |
383 |
|
377 | |||
|
378 | ; Uncomment and set this path to control settings for archive download cache. | |||
|
379 | ; Generated repo archives will be cached at this location | |||
|
380 | ; and served from the cache during subsequent requests for the same archive of | |||
|
381 | ; the repository. This path is important to be shared across filesystems and with | |||
|
382 | ; RhodeCode and vcsserver | |||
|
383 | ||||
|
384 | ; Default is $cache_dir/archive_cache if not set | |||
|
385 | archive_cache.store_dir = %(here)s/data/archive_cache | |||
|
386 | ||||
|
387 | ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb | |||
|
388 | archive_cache.cache_size_gb = 10 | |||
|
389 | ||||
|
390 | ; By default cache uses sharding technique, this specifies how many shards are there | |||
|
391 | archive_cache.cache_shards = 10 | |||
384 |
|
392 | |||
385 | ; ############# |
|
393 | ; ############# | |
386 | ; CELERY CONFIG |
|
394 | ; CELERY CONFIG | |
@@ -440,7 +448,7 b' rc_cache.cache_repo_longterm.max_size = ' | |||||
440 | rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace |
|
448 | rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace | |
441 | rc_cache.cache_general.expiration_time = 43200 |
|
449 | rc_cache.cache_general.expiration_time = 43200 | |
442 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
450 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set | |
443 |
#rc_cache.cache_general.arguments.filename = /tmp/cache_general |
|
451 | #rc_cache.cache_general.arguments.filename = /tmp/cache_general_db | |
444 |
|
452 | |||
445 | ; alternative `cache_general` redis backend with distributed lock |
|
453 | ; alternative `cache_general` redis backend with distributed lock | |
446 | #rc_cache.cache_general.backend = dogpile.cache.rc.redis |
|
454 | #rc_cache.cache_general.backend = dogpile.cache.rc.redis | |
@@ -467,7 +475,7 b' rc_cache.cache_general.expiration_time =' | |||||
467 | rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace |
|
475 | rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace | |
468 | rc_cache.cache_perms.expiration_time = 3600 |
|
476 | rc_cache.cache_perms.expiration_time = 3600 | |
469 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
477 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set | |
470 |
#rc_cache.cache_perms.arguments.filename = /tmp/cache_perms |
|
478 | #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms_db | |
471 |
|
479 | |||
472 | ; alternative `cache_perms` redis backend with distributed lock |
|
480 | ; alternative `cache_perms` redis backend with distributed lock | |
473 | #rc_cache.cache_perms.backend = dogpile.cache.rc.redis |
|
481 | #rc_cache.cache_perms.backend = dogpile.cache.rc.redis | |
@@ -494,7 +502,7 b' rc_cache.cache_perms.expiration_time = 3' | |||||
494 | rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace |
|
502 | rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace | |
495 | rc_cache.cache_repo.expiration_time = 2592000 |
|
503 | rc_cache.cache_repo.expiration_time = 2592000 | |
496 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
504 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set | |
497 |
#rc_cache.cache_repo.arguments.filename = /tmp/cache_repo |
|
505 | #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo_db | |
498 |
|
506 | |||
499 | ; alternative `cache_repo` redis backend with distributed lock |
|
507 | ; alternative `cache_repo` redis backend with distributed lock | |
500 | #rc_cache.cache_repo.backend = dogpile.cache.rc.redis |
|
508 | #rc_cache.cache_repo.backend = dogpile.cache.rc.redis | |
@@ -805,7 +813,7 b' ssh.enable_ui_key_generator = true' | |||||
805 | #appenlight.log_namespace_blacklist = |
|
813 | #appenlight.log_namespace_blacklist = | |
806 |
|
814 | |||
807 | ; Statsd client config, this is used to send metrics to statsd |
|
815 | ; Statsd client config, this is used to send metrics to statsd | |
808 |
; We recommend setting statsd_exported and scrape them using Prometh |
|
816 | ; We recommend setting statsd_exported and scrape them using Prometheus | |
809 | #statsd.enabled = false |
|
817 | #statsd.enabled = false | |
810 | #statsd.statsd_host = 0.0.0.0 |
|
818 | #statsd.statsd_host = 0.0.0.0 | |
811 | #statsd.statsd_port = 8125 |
|
819 | #statsd.statsd_port = 8125 |
@@ -1,4 +1,4 b'' | |||||
1 | ## -*- coding: utf-8 -*- |
|
1 | ||
2 |
|
2 | |||
3 | ; ######################################### |
|
3 | ; ######################################### | |
4 | ; RHODECODE COMMUNITY EDITION CONFIGURATION |
|
4 | ; RHODECODE COMMUNITY EDITION CONFIGURATION | |
@@ -173,12 +173,6 b' lang = en' | |||||
173 | ; Settings this to true could lead to very long startup time. |
|
173 | ; Settings this to true could lead to very long startup time. | |
174 | startup.import_repos = false |
|
174 | startup.import_repos = false | |
175 |
|
175 | |||
176 | ; Uncomment and set this path to use archive download cache. |
|
|||
177 | ; Once enabled, generated archives will be cached at this location |
|
|||
178 | ; and served from the cache during subsequent requests for the same archive of |
|
|||
179 | ; the repository. |
|
|||
180 | #archive_cache_dir = /tmp/tarballcache |
|
|||
181 |
|
||||
182 | ; URL at which the application is running. This is used for Bootstrapping |
|
176 | ; URL at which the application is running. This is used for Bootstrapping | |
183 | ; requests in context when no web request is available. Used in ishell, or |
|
177 | ; requests in context when no web request is available. Used in ishell, or | |
184 | ; SSH calls. Set this for events to receive proper url for SSH calls. |
|
178 | ; SSH calls. Set this for events to receive proper url for SSH calls. | |
@@ -332,6 +326,20 b' file_store.backend = local' | |||||
332 | ; path to store the uploaded binaries |
|
326 | ; path to store the uploaded binaries | |
333 | file_store.storage_path = %(here)s/data/file_store |
|
327 | file_store.storage_path = %(here)s/data/file_store | |
334 |
|
328 | |||
|
329 | ; Uncomment and set this path to control settings for archive download cache. | |||
|
330 | ; Generated repo archives will be cached at this location | |||
|
331 | ; and served from the cache during subsequent requests for the same archive of | |||
|
332 | ; the repository. This path is important to be shared across filesystems and with | |||
|
333 | ; RhodeCode and vcsserver | |||
|
334 | ||||
|
335 | ; Default is $cache_dir/archive_cache if not set | |||
|
336 | archive_cache.store_dir = %(here)s/data/archive_cache | |||
|
337 | ||||
|
338 | ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb | |||
|
339 | archive_cache.cache_size_gb = 10 | |||
|
340 | ||||
|
341 | ; By default cache uses sharding technique, this specifies how many shards are there | |||
|
342 | archive_cache.cache_shards = 10 | |||
335 |
|
343 | |||
336 | ; ############# |
|
344 | ; ############# | |
337 | ; CELERY CONFIG |
|
345 | ; CELERY CONFIG | |
@@ -391,7 +399,7 b' rc_cache.cache_repo_longterm.max_size = ' | |||||
391 | rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace |
|
399 | rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace | |
392 | rc_cache.cache_general.expiration_time = 43200 |
|
400 | rc_cache.cache_general.expiration_time = 43200 | |
393 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
401 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set | |
394 |
#rc_cache.cache_general.arguments.filename = /tmp/cache_general |
|
402 | #rc_cache.cache_general.arguments.filename = /tmp/cache_general_db | |
395 |
|
403 | |||
396 | ; alternative `cache_general` redis backend with distributed lock |
|
404 | ; alternative `cache_general` redis backend with distributed lock | |
397 | #rc_cache.cache_general.backend = dogpile.cache.rc.redis |
|
405 | #rc_cache.cache_general.backend = dogpile.cache.rc.redis | |
@@ -418,7 +426,7 b' rc_cache.cache_general.expiration_time =' | |||||
418 | rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace |
|
426 | rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace | |
419 | rc_cache.cache_perms.expiration_time = 3600 |
|
427 | rc_cache.cache_perms.expiration_time = 3600 | |
420 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
428 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set | |
421 |
#rc_cache.cache_perms.arguments.filename = /tmp/cache_perms |
|
429 | #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms_db | |
422 |
|
430 | |||
423 | ; alternative `cache_perms` redis backend with distributed lock |
|
431 | ; alternative `cache_perms` redis backend with distributed lock | |
424 | #rc_cache.cache_perms.backend = dogpile.cache.rc.redis |
|
432 | #rc_cache.cache_perms.backend = dogpile.cache.rc.redis | |
@@ -445,7 +453,7 b' rc_cache.cache_perms.expiration_time = 3' | |||||
445 | rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace |
|
453 | rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace | |
446 | rc_cache.cache_repo.expiration_time = 2592000 |
|
454 | rc_cache.cache_repo.expiration_time = 2592000 | |
447 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
455 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set | |
448 |
#rc_cache.cache_repo.arguments.filename = /tmp/cache_repo |
|
456 | #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo_db | |
449 |
|
457 | |||
450 | ; alternative `cache_repo` redis backend with distributed lock |
|
458 | ; alternative `cache_repo` redis backend with distributed lock | |
451 | #rc_cache.cache_repo.backend = dogpile.cache.rc.redis |
|
459 | #rc_cache.cache_repo.backend = dogpile.cache.rc.redis | |
@@ -568,6 +576,9 b' sqlalchemy.db1.pool_recycle = 3600' | |||||
568 |
|
576 | |||
569 | ; the number of connections to keep open inside the connection pool. |
|
577 | ; the number of connections to keep open inside the connection pool. | |
570 | ; 0 indicates no limit |
|
578 | ; 0 indicates no limit | |
|
579 | ; the general calculus with gevent is: | |||
|
580 | ; if your system allows 500 concurrent greenlets (max_connections) that all do database access, | |||
|
581 | ; then increase pool size + max overflow so that they add up to 500. | |||
571 | #sqlalchemy.db1.pool_size = 5 |
|
582 | #sqlalchemy.db1.pool_size = 5 | |
572 |
|
583 | |||
573 | ; The number of connections to allow in connection pool "overflow", that is |
|
584 | ; The number of connections to allow in connection pool "overflow", that is |
General Comments 0
You need to be logged in to leave comments.
Login now