Show More
@@ -1,4 +1,3 b'' | |||
|
1 | ||
|
2 | 1 | |
|
3 | 2 | ; ######################################### |
|
4 | 3 | ; RHODECODE COMMUNITY EDITION CONFIGURATION |
@@ -329,7 +328,10 b' use_celery = false' | |||
|
329 | 328 | #celerybeat-schedule.path = |
|
330 | 329 | |
|
331 | 330 | ; connection url to the message broker (default redis) |
|
332 |
celery.broker_url = redis:// |
|
|
331 | celery.broker_url = redis://redis:6379/8 | |
|
332 | ||
|
333 | ; results backend to get results for (default redis) | |
|
334 | celery.result_backend = redis://redis:6379/8 | |
|
333 | 335 | |
|
334 | 336 | ; rabbitmq example |
|
335 | 337 | #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost |
@@ -48,16 +48,6 b' tmp_upload_dir = None' | |||
|
48 | 48 | access_log_format = ( |
|
49 | 49 | 'time="%(t)s" pid=%(p)s level="INFO" type="[GNCRN]" ip="%(h)-15s" rqt="%(L)s" response_code="%(s)s" response_bytes="%(b)-6s" uri="%(m)s:%(U)s %(q)s" user=":%(u)s" user_agent="%(a)s"') |
|
50 | 50 | |
|
51 | ||
|
52 | # Sets the number of process workers. More workers means more concurrent connections | |
|
53 | # RhodeCode can handle at the same time. Each additional worker also it increases | |
|
54 | # memory usage as each has its own set of caches. | |
|
55 | # The Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more | |
|
56 | # than 8-10 unless for huge deployments .e.g 700-1000 users. | |
|
57 | # `instance_id = *` must be set in the [app:main] section below (which is the default) | |
|
58 | # when using more than 1 worker. | |
|
59 | workers = 4 | |
|
60 | ||
|
61 | 51 | # self adjust workers based on CPU count, to use maximum of CPU and not overquota the resources |
|
62 | 52 | # workers = get_workers() |
|
63 | 53 | |
@@ -72,10 +62,22 b" proc_name = 'rhodecode_enterprise'" | |||
|
72 | 62 | # gevent: |
|
73 | 63 | # In this case, the maximum number of concurrent requests is (N workers * X worker_connections) |
|
74 | 64 | # e.g. workers =3 worker_connections=10 = 3*10, 30 concurrent requests can be handled |
|
75 |
# gt |
|
|
65 | # gthread: | |
|
76 | 66 | # In this case, the maximum number of concurrent requests is (N workers * X threads) |
|
77 | 67 | # e.g. workers = 3 threads=3 = 3*3, 9 concurrent requests can be handled |
|
78 |
worker_class = 'g |
|
|
68 | worker_class = 'gthread' | |
|
69 | ||
|
70 | # Sets the number of process workers. More workers means more concurrent connections | |
|
71 | # RhodeCode can handle at the same time. Each additional worker also it increases | |
|
72 | # memory usage as each has its own set of caches. | |
|
73 | # The Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more | |
|
74 | # than 8-10 unless for huge deployments .e.g 700-1000 users. | |
|
75 | # `instance_id = *` must be set in the [app:main] section below (which is the default) | |
|
76 | # when using more than 1 worker. | |
|
77 | workers = 2 | |
|
78 | ||
|
79 | # Threads numbers for worker class gthread | |
|
80 | threads = 1 | |
|
79 | 81 | |
|
80 | 82 | # The maximum number of simultaneous clients. Valid only for gevent |
|
81 | 83 | # In this case, the maximum number of concurrent requests is (N workers * X worker_connections) |
@@ -279,7 +279,10 b' use_celery = false' | |||
|
279 | 279 | #celerybeat-schedule.path = |
|
280 | 280 | |
|
281 | 281 | ; connection url to the message broker (default redis) |
|
282 |
celery.broker_url = redis:// |
|
|
282 | celery.broker_url = redis://redis:6379/8 | |
|
283 | ||
|
284 | ; results backend to get results for (default redis) | |
|
285 | celery.result_backend = redis://redis:6379/8 | |
|
283 | 286 | |
|
284 | 287 | ; rabbitmq example |
|
285 | 288 | #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost |
General Comments 0
You need to be logged in to leave comments.
Login now