Show More
@@ -1,172 +1,172 b'' | |||||
1 | # required for pushd to work.. |
|
1 | # required for pushd to work.. | |
2 | SHELL = /bin/bash |
|
2 | SHELL = /bin/bash | |
3 |
|
3 | |||
4 |
|
4 | |||
5 | # set by: PATH_TO_OUTDATED_PACKAGES=/some/path/outdated_packages.py |
|
5 | # set by: PATH_TO_OUTDATED_PACKAGES=/some/path/outdated_packages.py | |
6 | OUTDATED_PACKAGES = ${PATH_TO_OUTDATED_PACKAGES} |
|
6 | OUTDATED_PACKAGES = ${PATH_TO_OUTDATED_PACKAGES} | |
7 |
|
7 | |||
8 | NODE_PATH=./node_modules |
|
8 | NODE_PATH=./node_modules | |
9 | WEBPACK=./node_binaries/webpack |
|
9 | WEBPACK=./node_binaries/webpack | |
10 | GRUNT=./node_binaries/grunt |
|
10 | GRUNT=./node_binaries/grunt | |
11 |
|
11 | |||
12 | .PHONY: clean |
|
12 | .PHONY: clean | |
13 | ## Cleanup compiled and cache py files |
|
13 | ## Cleanup compiled and cache py files | |
14 | clean: |
|
14 | clean: | |
15 | make test-clean |
|
15 | make test-clean | |
16 | find . -type f \( -iname '*.c' -o -iname '*.pyc' -o -iname '*.so' -o -iname '*.orig' \) -exec rm '{}' ';' |
|
16 | find . -type f \( -iname '*.c' -o -iname '*.pyc' -o -iname '*.so' -o -iname '*.orig' \) -exec rm '{}' ';' | |
17 | find . -type d -name "build" -prune -exec rm -rf '{}' ';' |
|
17 | find . -type d -name "build" -prune -exec rm -rf '{}' ';' | |
18 |
|
18 | |||
19 | .PHONY: test |
|
19 | .PHONY: test | |
20 | ## run test-clean and tests |
|
20 | ## run test-clean and tests | |
21 | test: |
|
21 | test: | |
22 | make test-clean |
|
22 | make test-clean | |
23 | make test-only |
|
23 | make test-only | |
24 |
|
24 | |||
25 |
|
25 | |||
26 | .PHONY: test-clean |
|
26 | .PHONY: test-clean | |
27 | ## run test-clean and tests |
|
27 | ## run test-clean and tests | |
28 | test-clean: |
|
28 | test-clean: | |
29 | rm -rf coverage.xml htmlcov junit.xml pylint.log result |
|
29 | rm -rf coverage.xml htmlcov junit.xml pylint.log result | |
30 | find . -type d -name "__pycache__" -prune -exec rm -rf '{}' ';' |
|
30 | find . -type d -name "__pycache__" -prune -exec rm -rf '{}' ';' | |
31 | find . -type f \( -iname '.coverage.*' \) -exec rm '{}' ';' |
|
31 | find . -type f \( -iname '.coverage.*' \) -exec rm '{}' ';' | |
32 |
|
32 | |||
33 |
|
33 | |||
34 | .PHONY: test-only |
|
34 | .PHONY: test-only | |
35 | ## Run tests only without cleanup |
|
35 | ## Run tests only without cleanup | |
36 | test-only: |
|
36 | test-only: | |
37 | PYTHONHASHSEED=random \ |
|
37 | PYTHONHASHSEED=random \ | |
38 | py.test -x -vv -r xw -p no:sugar \ |
|
38 | py.test -x -vv -r xw -p no:sugar \ | |
39 | --cov-report=term-missing --cov-report=html \ |
|
39 | --cov-report=term-missing --cov-report=html \ | |
40 | --cov=rhodecode rhodecode |
|
40 | --cov=rhodecode rhodecode | |
41 |
|
41 | |||
42 |
|
42 | |||
43 | .PHONY: test-only-mysql |
|
43 | .PHONY: test-only-mysql | |
44 | ## run tests against mysql |
|
44 | ## run tests against mysql | |
45 | test-only-mysql: |
|
45 | test-only-mysql: | |
46 | PYTHONHASHSEED=random \ |
|
46 | PYTHONHASHSEED=random \ | |
47 | py.test -x -vv -r xw -p no:sugar \ |
|
47 | py.test -x -vv -r xw -p no:sugar \ | |
48 | --cov-report=term-missing --cov-report=html \ |
|
48 | --cov-report=term-missing --cov-report=html \ | |
49 | --ini-config-override='{"app:main": {"sqlalchemy.db1.url": "mysql://root:qweqwe@localhost/rhodecode_test?charset=utf8"}}' \ |
|
49 | --ini-config-override='{"app:main": {"sqlalchemy.db1.url": "mysql://root:qweqwe@localhost/rhodecode_test?charset=utf8"}}' \ | |
50 | --cov=rhodecode rhodecode |
|
50 | --cov=rhodecode rhodecode | |
51 |
|
51 | |||
52 |
|
52 | |||
53 | .PHONY: test-only-postgres |
|
53 | .PHONY: test-only-postgres | |
54 | ## run tests against postgres |
|
54 | ## run tests against postgres | |
55 | test-only-postgres: |
|
55 | test-only-postgres: | |
56 | PYTHONHASHSEED=random \ |
|
56 | PYTHONHASHSEED=random \ | |
57 | py.test -x -vv -r xw -p no:sugar \ |
|
57 | py.test -x -vv -r xw -p no:sugar \ | |
58 | --cov-report=term-missing --cov-report=html \ |
|
58 | --cov-report=term-missing --cov-report=html \ | |
59 | --ini-config-override='{"app:main": {"sqlalchemy.db1.url": "postgresql://postgres:qweqwe@localhost/rhodecode_test"}}' \ |
|
59 | --ini-config-override='{"app:main": {"sqlalchemy.db1.url": "postgresql://postgres:qweqwe@localhost/rhodecode_test"}}' \ | |
60 | --cov=rhodecode rhodecode |
|
60 | --cov=rhodecode rhodecode | |
61 |
|
61 | |||
62 | .PHONY: docs |
|
62 | .PHONY: docs | |
63 | ## build docs |
|
63 | ## build docs | |
64 | docs: |
|
64 | docs: | |
65 | (cd docs; nix-build default.nix -o result; make clean html) |
|
65 | (cd docs; nix-build default.nix -o result; make clean html) | |
66 |
|
66 | |||
67 |
|
67 | |||
68 | .PHONY: docs-clean |
|
68 | .PHONY: docs-clean | |
69 | ## Cleanup docs |
|
69 | ## Cleanup docs | |
70 | docs-clean: |
|
70 | docs-clean: | |
71 | (cd docs; make clean) |
|
71 | (cd docs; make clean) | |
72 |
|
72 | |||
73 |
|
73 | |||
74 | .PHONY: docs-cleanup |
|
74 | .PHONY: docs-cleanup | |
75 | ## Cleanup docs |
|
75 | ## Cleanup docs | |
76 | docs-cleanup: |
|
76 | docs-cleanup: | |
77 | (cd docs; make cleanup) |
|
77 | (cd docs; make cleanup) | |
78 |
|
78 | |||
79 |
|
79 | |||
80 | .PHONY: web-build |
|
80 | .PHONY: web-build | |
81 | ## Build JS packages static/js |
|
81 | ## Build JS packages static/js | |
82 | # https://hub.docker.com/r/huli/grunt |
|
82 | # https://hub.docker.com/r/huli/grunt | |
83 | web-build: |
|
83 | web-build: | |
84 | NODE_PATH=$(NODE_PATH) $(GRUNT) |
|
84 | NODE_PATH=$(NODE_PATH) $(GRUNT) | |
85 |
|
85 | |||
86 | # check required files |
|
86 | # check required files | |
87 | STATIC_CHECK="/robots.txt /502.html \ |
|
87 | STATIC_CHECK="/robots.txt /502.html \ | |
88 | /js/scripts.min.js /js/rhodecode-components.js \ |
|
88 | /js/scripts.min.js /js/rhodecode-components.js \ | |
89 | /css/style.css /css/style-polymer.css /css/style-ipython.css" |
|
89 | /css/style.css /css/style-polymer.css /css/style-ipython.css" | |
90 |
|
90 | |||
91 | for file in $STATIC_CHECK; |
|
91 | for file in $STATIC_CHECK; | |
92 | do |
|
92 | do | |
93 | if [ ! -f rhodecode/public/$file ]; then |
|
93 | if [ ! -f rhodecode/public/$file ]; then | |
94 | echo "Missing $file expected after web-build" |
|
94 | echo "Missing $file expected after web-build" | |
95 | exit 1 |
|
95 | exit 1 | |
96 | fi |
|
96 | fi | |
97 | done |
|
97 | done | |
98 |
|
98 | |||
99 | .PHONY: pip-packages |
|
99 | .PHONY: pip-packages | |
100 | ## Show outdated packages |
|
100 | ## Show outdated packages | |
101 | pip-packages: |
|
101 | pip-packages: | |
102 | python ${OUTDATED_PACKAGES} |
|
102 | python ${OUTDATED_PACKAGES} | |
103 |
|
103 | |||
104 |
|
104 | |||
105 | .PHONY: build |
|
105 | .PHONY: build | |
106 | ## Build sdist/egg |
|
106 | ## Build sdist/egg | |
107 | build: |
|
107 | build: | |
108 | python -m build |
|
108 | python -m build | |
109 |
|
109 | |||
110 |
|
110 | |||
111 | .PHONY: dev-env |
|
111 | .PHONY: dev-env | |
112 | ## make dev-env based on the requirements files and install develop of packages |
|
112 | ## make dev-env based on the requirements files and install develop of packages | |
113 | dev-env: |
|
113 | dev-env: | |
114 | pip install build virtualenv |
|
114 | pip install build virtualenv | |
115 | pushd ../rhodecode-vcsserver/ && make dev-env && popd |
|
115 | pushd ../rhodecode-vcsserver/ && make dev-env && popd | |
116 | pip wheel --wheel-dir=/home/rhodecode/.cache/pip/wheels -r requirements.txt -r requirements_rc_tools.txt -r requirements_test.txt -r requirements_debug.txt |
|
116 | pip wheel --wheel-dir=/home/rhodecode/.cache/pip/wheels -r requirements.txt -r requirements_rc_tools.txt -r requirements_test.txt -r requirements_debug.txt | |
117 | pip install --no-index --find-links=/home/rhodecode/.cache/pip/wheels -r requirements.txt -r requirements_rc_tools.txt -r requirements_test.txt -r requirements_debug.txt |
|
117 | pip install --no-index --find-links=/home/rhodecode/.cache/pip/wheels -r requirements.txt -r requirements_rc_tools.txt -r requirements_test.txt -r requirements_debug.txt | |
118 | pip install -e . |
|
118 | pip install -e . | |
119 |
|
119 | |||
120 |
|
120 | |||
121 | .PHONY: dev-srv |
|
121 | .PHONY: dev-srv | |
122 | ## run develop server instance, docker exec -it $(docker ps -q --filter 'name=dev-enterprise-ce') /bin/bash |
|
122 | ## run develop server instance, docker exec -it $(docker ps -q --filter 'name=dev-enterprise-ce') /bin/bash | |
123 | dev-srv: |
|
123 | dev-srv: | |
124 | pserve --reload .dev/dev.ini |
|
124 | pserve --reload .dev/dev.ini | |
125 |
|
125 | |||
126 | .PHONY: dev-srv-g |
|
126 | .PHONY: dev-srv-g | |
127 | ## run gunicorn multi process workers |
|
127 | ## run gunicorn multi process workers | |
128 | dev-srv-g: |
|
128 | dev-srv-g: | |
129 |
gunicorn |
|
129 | gunicorn --paste .dev/dev.ini --bind=0.0.0.0:10020 --config=.dev/gunicorn_config.py | |
130 |
|
130 | |||
131 |
|
131 | |||
132 | # Default command on calling make |
|
132 | # Default command on calling make | |
133 | .DEFAULT_GOAL := show-help |
|
133 | .DEFAULT_GOAL := show-help | |
134 |
|
134 | |||
135 | .PHONY: show-help |
|
135 | .PHONY: show-help | |
136 | show-help: |
|
136 | show-help: | |
137 | @echo "$$(tput bold)Available rules:$$(tput sgr0)" |
|
137 | @echo "$$(tput bold)Available rules:$$(tput sgr0)" | |
138 | @echo |
|
138 | @echo | |
139 | @sed -n -e "/^## / { \ |
|
139 | @sed -n -e "/^## / { \ | |
140 | h; \ |
|
140 | h; \ | |
141 | s/.*//; \ |
|
141 | s/.*//; \ | |
142 | :doc" \ |
|
142 | :doc" \ | |
143 | -e "H; \ |
|
143 | -e "H; \ | |
144 | n; \ |
|
144 | n; \ | |
145 | s/^## //; \ |
|
145 | s/^## //; \ | |
146 | t doc" \ |
|
146 | t doc" \ | |
147 | -e "s/:.*//; \ |
|
147 | -e "s/:.*//; \ | |
148 | G; \ |
|
148 | G; \ | |
149 | s/\\n## /---/; \ |
|
149 | s/\\n## /---/; \ | |
150 | s/\\n/ /g; \ |
|
150 | s/\\n/ /g; \ | |
151 | p; \ |
|
151 | p; \ | |
152 | }" ${MAKEFILE_LIST} \ |
|
152 | }" ${MAKEFILE_LIST} \ | |
153 | | LC_ALL='C' sort --ignore-case \ |
|
153 | | LC_ALL='C' sort --ignore-case \ | |
154 | | awk -F '---' \ |
|
154 | | awk -F '---' \ | |
155 | -v ncol=$$(tput cols) \ |
|
155 | -v ncol=$$(tput cols) \ | |
156 | -v indent=19 \ |
|
156 | -v indent=19 \ | |
157 | -v col_on="$$(tput setaf 6)" \ |
|
157 | -v col_on="$$(tput setaf 6)" \ | |
158 | -v col_off="$$(tput sgr0)" \ |
|
158 | -v col_off="$$(tput sgr0)" \ | |
159 | '{ \ |
|
159 | '{ \ | |
160 | printf "%s%*s%s ", col_on, -indent, $$1, col_off; \ |
|
160 | printf "%s%*s%s ", col_on, -indent, $$1, col_off; \ | |
161 | n = split($$2, words, " "); \ |
|
161 | n = split($$2, words, " "); \ | |
162 | line_length = ncol - indent; \ |
|
162 | line_length = ncol - indent; \ | |
163 | for (i = 1; i <= n; i++) { \ |
|
163 | for (i = 1; i <= n; i++) { \ | |
164 | line_length -= length(words[i]) + 1; \ |
|
164 | line_length -= length(words[i]) + 1; \ | |
165 | if (line_length <= 0) { \ |
|
165 | if (line_length <= 0) { \ | |
166 | line_length = ncol - indent - length(words[i]) - 1; \ |
|
166 | line_length = ncol - indent - length(words[i]) - 1; \ | |
167 | printf "\n%*s ", -indent, " "; \ |
|
167 | printf "\n%*s ", -indent, " "; \ | |
168 | } \ |
|
168 | } \ | |
169 | printf "%s ", words[i]; \ |
|
169 | printf "%s ", words[i]; \ | |
170 | } \ |
|
170 | } \ | |
171 | printf "\n"; \ |
|
171 | printf "\n"; \ | |
172 | }' |
|
172 | }' |
@@ -1,506 +1,510 b'' | |||||
1 | """ |
|
1 | """ | |
2 | Gunicorn config extension and hooks. This config file adds some extra settings and memory management. |
|
2 | Gunicorn config extension and hooks. This config file adds some extra settings and memory management. | |
3 | Gunicorn configuration should be managed by .ini files entries of RhodeCode or VCSServer |
|
3 | Gunicorn configuration should be managed by .ini files entries of RhodeCode or VCSServer | |
4 | """ |
|
4 | """ | |
5 |
|
5 | |||
6 | import gc |
|
6 | import gc | |
7 | import os |
|
7 | import os | |
8 | import sys |
|
8 | import sys | |
9 | import math |
|
9 | import math | |
10 | import time |
|
10 | import time | |
11 | import threading |
|
11 | import threading | |
12 | import traceback |
|
12 | import traceback | |
13 | import random |
|
13 | import random | |
14 | import socket |
|
14 | import socket | |
15 | import dataclasses |
|
15 | import dataclasses | |
16 | from gunicorn.glogging import Logger |
|
16 | from gunicorn.glogging import Logger | |
17 |
|
17 | |||
18 |
|
18 | |||
19 | def get_workers(): |
|
19 | def get_workers(): | |
20 | import multiprocessing |
|
20 | import multiprocessing | |
21 | return multiprocessing.cpu_count() * 2 + 1 |
|
21 | return multiprocessing.cpu_count() * 2 + 1 | |
22 |
|
22 | |||
23 |
|
23 | |||
24 | bind = "127.0.0.1:10020" |
|
24 | bind = "127.0.0.1:10020" | |
25 |
|
25 | |||
26 |
|
26 | |||
27 | # Error logging output for gunicorn (-) is stdout |
|
27 | # Error logging output for gunicorn (-) is stdout | |
28 | errorlog = '-' |
|
28 | errorlog = '-' | |
29 |
|
29 | |||
30 | # Access logging output for gunicorn (-) is stdout |
|
30 | # Access logging output for gunicorn (-) is stdout | |
31 | accesslog = '-' |
|
31 | accesslog = '-' | |
32 |
|
32 | |||
33 |
|
33 | |||
34 | # SERVER MECHANICS |
|
34 | # SERVER MECHANICS | |
35 | # None == system temp dir |
|
35 | # None == system temp dir | |
36 | # worker_tmp_dir is recommended to be set to some tmpfs |
|
36 | # worker_tmp_dir is recommended to be set to some tmpfs | |
37 | worker_tmp_dir = None |
|
37 | worker_tmp_dir = None | |
38 | tmp_upload_dir = None |
|
38 | tmp_upload_dir = None | |
39 |
|
39 | |||
40 | # use re-use port logic |
|
40 | # use re-use port logic | |
41 | #reuse_port = True |
|
41 | #reuse_port = True | |
42 |
|
42 | |||
43 | # Custom log format |
|
43 | # Custom log format | |
44 | #access_log_format = ( |
|
44 | #access_log_format = ( | |
45 | # '%(t)s %(p)s INFO [GNCRN] %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"') |
|
45 | # '%(t)s %(p)s INFO [GNCRN] %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"') | |
46 |
|
46 | |||
47 | # loki format for easier parsing in grafana |
|
47 | # loki format for easier parsing in grafana | |
48 | access_log_format = ( |
|
48 | access_log_format = ( | |
49 | 'time="%(t)s" pid=%(p)s level="INFO" type="[GNCRN]" ip="%(h)-15s" rqt="%(L)s" response_code="%(s)s" response_bytes="%(b)-6s" uri="%(m)s:%(U)s %(q)s" user=":%(u)s" user_agent="%(a)s"') |
|
49 | 'time="%(t)s" pid=%(p)s level="INFO" type="[GNCRN]" ip="%(h)-15s" rqt="%(L)s" response_code="%(s)s" response_bytes="%(b)-6s" uri="%(m)s:%(U)s %(q)s" user=":%(u)s" user_agent="%(a)s"') | |
50 |
|
50 | |||
51 |
|
51 | |||
52 | # Sets the number of process workers. More workers means more concurrent connections |
|
52 | # Sets the number of process workers. More workers means more concurrent connections | |
53 | # RhodeCode can handle at the same time. Each additional worker also it increases |
|
53 | # RhodeCode can handle at the same time. Each additional worker also it increases | |
54 | # memory usage as each has it's own set of caches. |
|
54 | # memory usage as each has it's own set of caches. | |
55 | # Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more |
|
55 | # Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more | |
56 | # than 8-10 unless for huge deployments .e.g 700-1000 users. |
|
56 | # than 8-10 unless for huge deployments .e.g 700-1000 users. | |
57 | # `instance_id = *` must be set in the [app:main] section below (which is the default) |
|
57 | # `instance_id = *` must be set in the [app:main] section below (which is the default) | |
58 | # when using more than 1 worker. |
|
58 | # when using more than 1 worker. | |
59 | workers = 4 |
|
59 | workers = 4 | |
60 |
|
60 | |||
61 | # self adjust workers based on CPU count, to use maximum of CPU and not overquota the resources |
|
61 | # self adjust workers based on CPU count, to use maximum of CPU and not overquota the resources | |
62 | # workers = get_workers() |
|
62 | # workers = get_workers() | |
63 |
|
63 | |||
64 | # Gunicorn access log level |
|
64 | # Gunicorn access log level | |
65 | loglevel = 'info' |
|
65 | loglevel = 'info' | |
66 |
|
66 | |||
67 | # Process name visible in process list |
|
67 | # Process name visible in process list | |
68 | proc_name = 'rhodecode_enterprise' |
|
68 | proc_name = 'rhodecode_enterprise' | |
69 |
|
69 | |||
70 | # Type of worker class, one of `sync`, `gevent` |
|
70 | # Type of worker class, one of `sync`, `gevent` | |
71 | # currently `sync` is the only option allowed. |
|
71 | # currently `sync` is the only option allowed. | |
72 | worker_class = 'gevent' |
|
72 | worker_class = 'gevent' | |
73 |
|
73 | |||
74 | # The maximum number of simultaneous clients. Valid only for gevent |
|
74 | # The maximum number of simultaneous clients. Valid only for gevent | |
75 | worker_connections = 10 |
|
75 | worker_connections = 10 | |
76 |
|
76 | |||
77 | # Max number of requests that worker will handle before being gracefully restarted. |
|
77 | # Max number of requests that worker will handle before being gracefully restarted. | |
78 | # Prevents memory leaks, jitter adds variability so not all workers are restarted at once. |
|
78 | # Prevents memory leaks, jitter adds variability so not all workers are restarted at once. | |
79 | max_requests = 2000 |
|
79 | max_requests = 2000 | |
80 | max_requests_jitter = 30 |
|
80 | max_requests_jitter = 30 | |
81 |
|
81 | |||
82 | # The maximum number of pending connections. |
|
82 | # The maximum number of pending connections. | |
83 | # Exceeding this number results in the client getting an error when attempting to connect. |
|
83 | # Exceeding this number results in the client getting an error when attempting to connect. | |
84 | backlog = 64 |
|
84 | backlog = 64 | |
85 |
|
85 | |||
86 | # Amount of time a worker can spend with handling a request before it |
|
86 | # Amount of time a worker can spend with handling a request before it | |
87 | # gets killed and restarted. By default set to 21600 (6hrs) |
|
87 | # gets killed and restarted. By default set to 21600 (6hrs) | |
88 | # Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h) |
|
88 | # Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h) | |
89 | timeout = 21600 |
|
89 | timeout = 21600 | |
90 |
|
90 | |||
91 | # The maximum size of HTTP request line in bytes. |
|
91 | # The maximum size of HTTP request line in bytes. | |
92 | # 0 for unlimited |
|
92 | # 0 for unlimited | |
93 | limit_request_line = 0 |
|
93 | limit_request_line = 0 | |
94 |
|
94 | |||
95 | # Limit the number of HTTP headers fields in a request. |
|
95 | # Limit the number of HTTP headers fields in a request. | |
96 | # By default this value is 100 and can't be larger than 32768. |
|
96 | # By default this value is 100 and can't be larger than 32768. | |
97 | limit_request_fields = 32768 |
|
97 | limit_request_fields = 32768 | |
98 |
|
98 | |||
99 | # Limit the allowed size of an HTTP request header field. |
|
99 | # Limit the allowed size of an HTTP request header field. | |
100 | # Value is a positive number or 0. |
|
100 | # Value is a positive number or 0. | |
101 | # Setting it to 0 will allow unlimited header field sizes. |
|
101 | # Setting it to 0 will allow unlimited header field sizes. | |
102 | limit_request_field_size = 0 |
|
102 | limit_request_field_size = 0 | |
103 |
|
103 | |||
104 | # Timeout for graceful workers restart. |
|
104 | # Timeout for graceful workers restart. | |
105 | # After receiving a restart signal, workers have this much time to finish |
|
105 | # After receiving a restart signal, workers have this much time to finish | |
106 | # serving requests. Workers still alive after the timeout (starting from the |
|
106 | # serving requests. Workers still alive after the timeout (starting from the | |
107 | # receipt of the restart signal) are force killed. |
|
107 | # receipt of the restart signal) are force killed. | |
108 | # Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h) |
|
108 | # Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h) | |
109 | graceful_timeout = 21600 |
|
109 | graceful_timeout = 21600 | |
110 |
|
110 | |||
111 | # The number of seconds to wait for requests on a Keep-Alive connection. |
|
111 | # The number of seconds to wait for requests on a Keep-Alive connection. | |
112 | # Generally set in the 1-5 seconds range. |
|
112 | # Generally set in the 1-5 seconds range. | |
113 | keepalive = 2 |
|
113 | keepalive = 2 | |
114 |
|
114 | |||
115 | # Maximum memory usage that each worker can use before it will receive a |
|
115 | # Maximum memory usage that each worker can use before it will receive a | |
116 | # graceful restart signal 0 = memory monitoring is disabled |
|
116 | # graceful restart signal 0 = memory monitoring is disabled | |
117 | # Examples: 268435456 (256MB), 536870912 (512MB) |
|
117 | # Examples: 268435456 (256MB), 536870912 (512MB) | |
118 | # 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB) |
|
118 | # 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB) | |
|
119 | # Dynamic formula 1024 * 1024 * 256 == 256MBs | |||
119 | memory_max_usage = 0 |
|
120 | memory_max_usage = 0 | |
120 |
|
121 | |||
121 | # How often in seconds to check for memory usage for each gunicorn worker |
|
122 | # How often in seconds to check for memory usage for each gunicorn worker | |
122 | memory_usage_check_interval = 60 |
|
123 | memory_usage_check_interval = 60 | |
123 |
|
124 | |||
124 | # Threshold value for which we don't recycle worker if GarbageCollection |
|
125 | # Threshold value for which we don't recycle worker if GarbageCollection | |
125 | # frees up enough resources. Before each restart we try to run GC on worker |
|
126 | # frees up enough resources. Before each restart, we try to run GC on worker | |
126 | # in case we get enough free memory after that, restart will not happen. |
|
127 | # in case we get enough free memory after that, restart will not happen. | |
127 | memory_usage_recovery_threshold = 0.8 |
|
128 | memory_usage_recovery_threshold = 0.8 | |
128 |
|
129 | |||
129 |
|
130 | |||
130 | @dataclasses.dataclass |
|
131 | @dataclasses.dataclass | |
131 | class MemoryCheckConfig: |
|
132 | class MemoryCheckConfig: | |
132 | max_usage: int |
|
133 | max_usage: int | |
133 | check_interval: int |
|
134 | check_interval: int | |
134 | recovery_threshold: float |
|
135 | recovery_threshold: float | |
135 |
|
136 | |||
136 |
|
137 | |||
137 | def _get_process_rss(pid=None): |
|
138 | def _get_process_rss(pid=None): | |
138 | try: |
|
139 | try: | |
139 | import psutil |
|
140 | import psutil | |
140 | if pid: |
|
141 | if pid: | |
141 | proc = psutil.Process(pid) |
|
142 | proc = psutil.Process(pid) | |
142 | else: |
|
143 | else: | |
143 | proc = psutil.Process() |
|
144 | proc = psutil.Process() | |
144 | return proc.memory_info().rss |
|
145 | return proc.memory_info().rss | |
145 | except Exception: |
|
146 | except Exception: | |
146 | return None |
|
147 | return None | |
147 |
|
148 | |||
148 |
|
149 | |||
149 | def _get_config(ini_path): |
|
150 | def _get_config(ini_path): | |
150 | import configparser |
|
151 | import configparser | |
151 |
|
152 | |||
152 | try: |
|
153 | try: | |
153 | config = configparser.RawConfigParser() |
|
154 | config = configparser.RawConfigParser() | |
154 | config.read(ini_path) |
|
155 | config.read(ini_path) | |
155 | return config |
|
156 | return config | |
156 | except Exception: |
|
157 | except Exception: | |
157 | return None |
|
158 | return None | |
158 |
|
159 | |||
159 |
|
160 | |||
160 | def get_memory_usage_params(config=None): |
|
161 | def get_memory_usage_params(config=None): | |
161 | # memory spec defaults |
|
162 | # memory spec defaults | |
162 | _memory_max_usage = memory_max_usage |
|
163 | _memory_max_usage = memory_max_usage | |
163 | _memory_usage_check_interval = memory_usage_check_interval |
|
164 | _memory_usage_check_interval = memory_usage_check_interval | |
164 | _memory_usage_recovery_threshold = memory_usage_recovery_threshold |
|
165 | _memory_usage_recovery_threshold = memory_usage_recovery_threshold | |
165 |
|
166 | |||
166 | if config: |
|
167 | if config: | |
167 | ini_path = os.path.abspath(config) |
|
168 | ini_path = os.path.abspath(config) | |
168 | conf = _get_config(ini_path) |
|
169 | conf = _get_config(ini_path) | |
169 |
|
170 | |||
170 | section = 'server:main' |
|
171 | section = 'server:main' | |
171 | if conf and conf.has_section(section): |
|
172 | if conf and conf.has_section(section): | |
172 |
|
173 | |||
173 | if conf.has_option(section, 'memory_max_usage'): |
|
174 | if conf.has_option(section, 'memory_max_usage'): | |
174 | _memory_max_usage = conf.getint(section, 'memory_max_usage') |
|
175 | _memory_max_usage = conf.getint(section, 'memory_max_usage') | |
175 |
|
176 | |||
176 | if conf.has_option(section, 'memory_usage_check_interval'): |
|
177 | if conf.has_option(section, 'memory_usage_check_interval'): | |
177 | _memory_usage_check_interval = conf.getint(section, 'memory_usage_check_interval') |
|
178 | _memory_usage_check_interval = conf.getint(section, 'memory_usage_check_interval') | |
178 |
|
179 | |||
179 | if conf.has_option(section, 'memory_usage_recovery_threshold'): |
|
180 | if conf.has_option(section, 'memory_usage_recovery_threshold'): | |
180 | _memory_usage_recovery_threshold = conf.getfloat(section, 'memory_usage_recovery_threshold') |
|
181 | _memory_usage_recovery_threshold = conf.getfloat(section, 'memory_usage_recovery_threshold') | |
181 |
|
182 | |||
182 | _memory_max_usage = int(os.environ.get('RC_GUNICORN_MEMORY_MAX_USAGE', '') |
|
183 | _memory_max_usage = int(os.environ.get('RC_GUNICORN_MEMORY_MAX_USAGE', '') | |
183 | or _memory_max_usage) |
|
184 | or _memory_max_usage) | |
184 | _memory_usage_check_interval = int(os.environ.get('RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL', '') |
|
185 | _memory_usage_check_interval = int(os.environ.get('RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL', '') | |
185 | or _memory_usage_check_interval) |
|
186 | or _memory_usage_check_interval) | |
186 | _memory_usage_recovery_threshold = float(os.environ.get('RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD', '') |
|
187 | _memory_usage_recovery_threshold = float(os.environ.get('RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD', '') | |
187 | or _memory_usage_recovery_threshold) |
|
188 | or _memory_usage_recovery_threshold) | |
188 |
|
189 | |||
189 | return MemoryCheckConfig(_memory_max_usage, _memory_usage_check_interval, _memory_usage_recovery_threshold) |
|
190 | return MemoryCheckConfig(_memory_max_usage, _memory_usage_check_interval, _memory_usage_recovery_threshold) | |
190 |
|
191 | |||
191 |
|
192 | |||
192 | def _time_with_offset(check_interval): |
|
193 | def _time_with_offset(check_interval): | |
193 | return time.time() - random.randint(0, check_interval/2.0) |
|
194 | return time.time() - random.randint(0, check_interval/2.0) | |
194 |
|
195 | |||
195 |
|
196 | |||
196 | def pre_fork(server, worker): |
|
197 | def pre_fork(server, worker): | |
197 | pass |
|
198 | pass | |
198 |
|
199 | |||
199 |
|
200 | |||
200 | def post_fork(server, worker): |
|
201 | def post_fork(server, worker): | |
201 |
|
202 | |||
202 | memory_conf = get_memory_usage_params() |
|
203 | memory_conf = get_memory_usage_params() | |
203 | _memory_max_usage = memory_conf.max_usage |
|
204 | _memory_max_usage = memory_conf.max_usage | |
204 | _memory_usage_check_interval = memory_conf.check_interval |
|
205 | _memory_usage_check_interval = memory_conf.check_interval | |
205 | _memory_usage_recovery_threshold = memory_conf.recovery_threshold |
|
206 | _memory_usage_recovery_threshold = memory_conf.recovery_threshold | |
206 |
|
207 | |||
207 | worker._memory_max_usage = int(os.environ.get('RC_GUNICORN_MEMORY_MAX_USAGE', '') |
|
208 | worker._memory_max_usage = int(os.environ.get('RC_GUNICORN_MEMORY_MAX_USAGE', '') | |
208 | or _memory_max_usage) |
|
209 | or _memory_max_usage) | |
209 | worker._memory_usage_check_interval = int(os.environ.get('RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL', '') |
|
210 | worker._memory_usage_check_interval = int(os.environ.get('RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL', '') | |
210 | or _memory_usage_check_interval) |
|
211 | or _memory_usage_check_interval) | |
211 | worker._memory_usage_recovery_threshold = float(os.environ.get('RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD', '') |
|
212 | worker._memory_usage_recovery_threshold = float(os.environ.get('RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD', '') | |
212 | or _memory_usage_recovery_threshold) |
|
213 | or _memory_usage_recovery_threshold) | |
213 |
|
214 | |||
214 | # register memory last check time, with some random offset so we don't recycle all |
|
215 | # register memory last check time, with some random offset so we don't recycle all | |
215 | # at once |
|
216 | # at once | |
216 | worker._last_memory_check_time = _time_with_offset(_memory_usage_check_interval) |
|
217 | worker._last_memory_check_time = _time_with_offset(_memory_usage_check_interval) | |
217 |
|
218 | |||
218 | if _memory_max_usage: |
|
219 | if _memory_max_usage: | |
219 | server.log.info("pid=[%-10s] WORKER spawned with max memory set at %s", worker.pid, |
|
220 | server.log.info("pid=[%-10s] WORKER spawned with max memory set at %s", worker.pid, | |
220 | _format_data_size(_memory_max_usage)) |
|
221 | _format_data_size(_memory_max_usage)) | |
221 | else: |
|
222 | else: | |
222 | server.log.info("pid=[%-10s] WORKER spawned", worker.pid) |
|
223 | server.log.info("pid=[%-10s] WORKER spawned", worker.pid) | |
223 |
|
224 | |||
224 |
|
225 | |||
225 | def pre_exec(server): |
|
226 | def pre_exec(server): | |
226 | server.log.info("Forked child, re-executing.") |
|
227 | server.log.info("Forked child, re-executing.") | |
227 |
|
228 | |||
228 |
|
229 | |||
229 | def on_starting(server): |
|
230 | def on_starting(server): | |
230 | server_lbl = '{} {}'.format(server.proc_name, server.address) |
|
231 | server_lbl = '{} {}'.format(server.proc_name, server.address) | |
231 | server.log.info("Server %s is starting.", server_lbl) |
|
232 | server.log.info("Server %s is starting.", server_lbl) | |
|
233 | server.log.info('Config:') | |||
|
234 | server.log.info(f"\n{server.cfg}") | |||
|
235 | server.log.info(get_memory_usage_params()) | |||
232 |
|
236 | |||
233 |
|
237 | |||
234 | def when_ready(server): |
|
238 | def when_ready(server): | |
235 | server.log.info("Server %s is ready. Spawning workers", server) |
|
239 | server.log.info("Server %s is ready. Spawning workers", server) | |
236 |
|
240 | |||
237 |
|
241 | |||
238 | def on_reload(server): |
|
242 | def on_reload(server): | |
239 | pass |
|
243 | pass | |
240 |
|
244 | |||
241 |
|
245 | |||
242 | def _format_data_size(size, unit="B", precision=1, binary=True): |
|
246 | def _format_data_size(size, unit="B", precision=1, binary=True): | |
243 | """Format a number using SI units (kilo, mega, etc.). |
|
247 | """Format a number using SI units (kilo, mega, etc.). | |
244 |
|
248 | |||
245 | ``size``: The number as a float or int. |
|
249 | ``size``: The number as a float or int. | |
246 |
|
250 | |||
247 | ``unit``: The unit name in plural form. Examples: "bytes", "B". |
|
251 | ``unit``: The unit name in plural form. Examples: "bytes", "B". | |
248 |
|
252 | |||
249 | ``precision``: How many digits to the right of the decimal point. Default |
|
253 | ``precision``: How many digits to the right of the decimal point. Default | |
250 | is 1. 0 suppresses the decimal point. |
|
254 | is 1. 0 suppresses the decimal point. | |
251 |
|
255 | |||
252 | ``binary``: If false, use base-10 decimal prefixes (kilo = K = 1000). |
|
256 | ``binary``: If false, use base-10 decimal prefixes (kilo = K = 1000). | |
253 | If true, use base-2 binary prefixes (kibi = Ki = 1024). |
|
257 | If true, use base-2 binary prefixes (kibi = Ki = 1024). | |
254 |
|
258 | |||
255 | ``full_name``: If false (default), use the prefix abbreviation ("k" or |
|
259 | ``full_name``: If false (default), use the prefix abbreviation ("k" or | |
256 | "Ki"). If true, use the full prefix ("kilo" or "kibi"). If false, |
|
260 | "Ki"). If true, use the full prefix ("kilo" or "kibi"). If false, | |
257 | use abbreviation ("k" or "Ki"). |
|
261 | use abbreviation ("k" or "Ki"). | |
258 |
|
262 | |||
259 | """ |
|
263 | """ | |
260 |
|
264 | |||
261 | if not binary: |
|
265 | if not binary: | |
262 | base = 1000 |
|
266 | base = 1000 | |
263 | multiples = ('', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') |
|
267 | multiples = ('', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') | |
264 | else: |
|
268 | else: | |
265 | base = 1024 |
|
269 | base = 1024 | |
266 | multiples = ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi') |
|
270 | multiples = ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi') | |
267 |
|
271 | |||
268 | sign = "" |
|
272 | sign = "" | |
269 | if size > 0: |
|
273 | if size > 0: | |
270 | m = int(math.log(size, base)) |
|
274 | m = int(math.log(size, base)) | |
271 | elif size < 0: |
|
275 | elif size < 0: | |
272 | sign = "-" |
|
276 | sign = "-" | |
273 | size = -size |
|
277 | size = -size | |
274 | m = int(math.log(size, base)) |
|
278 | m = int(math.log(size, base)) | |
275 | else: |
|
279 | else: | |
276 | m = 0 |
|
280 | m = 0 | |
277 | if m > 8: |
|
281 | if m > 8: | |
278 | m = 8 |
|
282 | m = 8 | |
279 |
|
283 | |||
280 | if m == 0: |
|
284 | if m == 0: | |
281 | precision = '%.0f' |
|
285 | precision = '%.0f' | |
282 | else: |
|
286 | else: | |
283 | precision = '%%.%df' % precision |
|
287 | precision = '%%.%df' % precision | |
284 |
|
288 | |||
285 | size = precision % (size / math.pow(base, m)) |
|
289 | size = precision % (size / math.pow(base, m)) | |
286 |
|
290 | |||
287 | return '%s%s %s%s' % (sign, size.strip(), multiples[m], unit) |
|
291 | return '%s%s %s%s' % (sign, size.strip(), multiples[m], unit) | |
288 |
|
292 | |||
289 |
|
293 | |||
290 | def _check_memory_usage(worker): |
|
294 | def _check_memory_usage(worker): | |
291 | _memory_max_usage = worker._memory_max_usage |
|
295 | _memory_max_usage = worker._memory_max_usage | |
292 | if not _memory_max_usage: |
|
296 | if not _memory_max_usage: | |
293 | return |
|
297 | return | |
294 |
|
298 | |||
295 | _memory_usage_check_interval = worker._memory_usage_check_interval |
|
299 | _memory_usage_check_interval = worker._memory_usage_check_interval | |
296 | _memory_usage_recovery_threshold = memory_max_usage * worker._memory_usage_recovery_threshold |
|
300 | _memory_usage_recovery_threshold = memory_max_usage * worker._memory_usage_recovery_threshold | |
297 |
|
301 | |||
298 | elapsed = time.time() - worker._last_memory_check_time |
|
302 | elapsed = time.time() - worker._last_memory_check_time | |
299 | if elapsed > _memory_usage_check_interval: |
|
303 | if elapsed > _memory_usage_check_interval: | |
300 | mem_usage = _get_process_rss() |
|
304 | mem_usage = _get_process_rss() | |
301 | if mem_usage and mem_usage > _memory_max_usage: |
|
305 | if mem_usage and mem_usage > _memory_max_usage: | |
302 | worker.log.info( |
|
306 | worker.log.info( | |
303 | "memory usage %s > %s, forcing gc", |
|
307 | "memory usage %s > %s, forcing gc", | |
304 | _format_data_size(mem_usage), _format_data_size(_memory_max_usage)) |
|
308 | _format_data_size(mem_usage), _format_data_size(_memory_max_usage)) | |
305 | # Try to clean it up by forcing a full collection. |
|
309 | # Try to clean it up by forcing a full collection. | |
306 | gc.collect() |
|
310 | gc.collect() | |
307 | mem_usage = _get_process_rss() |
|
311 | mem_usage = _get_process_rss() | |
308 | if mem_usage > _memory_usage_recovery_threshold: |
|
312 | if mem_usage > _memory_usage_recovery_threshold: | |
309 | # Didn't clean up enough, we'll have to terminate. |
|
313 | # Didn't clean up enough, we'll have to terminate. | |
310 | worker.log.warning( |
|
314 | worker.log.warning( | |
311 | "memory usage %s > %s after gc, quitting", |
|
315 | "memory usage %s > %s after gc, quitting", | |
312 | _format_data_size(mem_usage), _format_data_size(_memory_max_usage)) |
|
316 | _format_data_size(mem_usage), _format_data_size(_memory_max_usage)) | |
313 | # This will cause worker to auto-restart itself |
|
317 | # This will cause worker to auto-restart itself | |
314 | worker.alive = False |
|
318 | worker.alive = False | |
315 | worker._last_memory_check_time = time.time() |
|
319 | worker._last_memory_check_time = time.time() | |
316 |
|
320 | |||
317 |
|
321 | |||
318 | def worker_int(worker): |
|
322 | def worker_int(worker): | |
319 | worker.log.info("pid=[%-10s] worker received INT or QUIT signal", worker.pid) |
|
323 | worker.log.info("pid=[%-10s] worker received INT or QUIT signal", worker.pid) | |
320 |
|
324 | |||
321 | # get traceback info, on worker crash |
|
325 | # get traceback info, on worker crash | |
322 | def get_thread_id(t_id): |
|
326 | def get_thread_id(t_id): | |
323 | id2name = dict([(th.ident, th.name) for th in threading.enumerate()]) |
|
327 | id2name = dict([(th.ident, th.name) for th in threading.enumerate()]) | |
324 | return id2name.get(t_id, "unknown_thread_id") |
|
328 | return id2name.get(t_id, "unknown_thread_id") | |
325 |
|
329 | |||
326 | code = [] |
|
330 | code = [] | |
327 | for thread_id, stack in sys._current_frames().items(): |
|
331 | for thread_id, stack in sys._current_frames().items(): # noqa | |
328 | code.append( |
|
332 | code.append( | |
329 | "\n# Thread: %s(%d)" % (get_thread_id(thread_id), thread_id)) |
|
333 | "\n# Thread: %s(%d)" % (get_thread_id(thread_id), thread_id)) | |
330 | for fname, lineno, name, line in traceback.extract_stack(stack): |
|
334 | for fname, lineno, name, line in traceback.extract_stack(stack): | |
331 | code.append('File: "%s", line %d, in %s' % (fname, lineno, name)) |
|
335 | code.append('File: "%s", line %d, in %s' % (fname, lineno, name)) | |
332 | if line: |
|
336 | if line: | |
333 | code.append(" %s" % (line.strip())) |
|
337 | code.append(" %s" % (line.strip())) | |
334 | worker.log.debug("\n".join(code)) |
|
338 | worker.log.debug("\n".join(code)) | |
335 |
|
339 | |||
336 |
|
340 | |||
337 | def worker_abort(worker): |
|
341 | def worker_abort(worker): | |
338 | worker.log.info("pid=[%-10s] worker received SIGABRT signal", worker.pid) |
|
342 | worker.log.info("pid=[%-10s] worker received SIGABRT signal", worker.pid) | |
339 |
|
343 | |||
340 |
|
344 | |||
341 | def worker_exit(server, worker): |
|
345 | def worker_exit(server, worker): | |
342 | worker.log.info("pid=[%-10s] worker exit", worker.pid) |
|
346 | worker.log.info("pid=[%-10s] worker exit", worker.pid) | |
343 |
|
347 | |||
344 |
|
348 | |||
345 | def child_exit(server, worker): |
|
349 | def child_exit(server, worker): | |
346 | worker.log.info("pid=[%-10s] worker child exit", worker.pid) |
|
350 | worker.log.info("pid=[%-10s] worker child exit", worker.pid) | |
347 |
|
351 | |||
348 |
|
352 | |||
349 | def pre_request(worker, req): |
|
353 | def pre_request(worker, req): | |
350 | worker.start_time = time.time() |
|
354 | worker.start_time = time.time() | |
351 | worker.log.debug( |
|
355 | worker.log.debug( | |
352 | "GNCRN PRE WORKER [cnt:%s]: %s %s", worker.nr, req.method, req.path) |
|
356 | "GNCRN PRE WORKER [cnt:%s]: %s %s", worker.nr, req.method, req.path) | |
353 |
|
357 | |||
354 |
|
358 | |||
355 | def post_request(worker, req, environ, resp): |
|
359 | def post_request(worker, req, environ, resp): | |
356 | total_time = time.time() - worker.start_time |
|
360 | total_time = time.time() - worker.start_time | |
357 | # Gunicorn sometimes has problems with reading the status_code |
|
361 | # Gunicorn sometimes has problems with reading the status_code | |
358 | status_code = getattr(resp, 'status_code', '') |
|
362 | status_code = getattr(resp, 'status_code', '') | |
359 | worker.log.debug( |
|
363 | worker.log.debug( | |
360 | "GNCRN POST WORKER [cnt:%s]: %s %s resp: %s, Load Time: %.4fs", |
|
364 | "GNCRN POST WORKER [cnt:%s]: %s %s resp: %s, Load Time: %.4fs", | |
361 | worker.nr, req.method, req.path, status_code, total_time) |
|
365 | worker.nr, req.method, req.path, status_code, total_time) | |
362 | _check_memory_usage(worker) |
|
366 | _check_memory_usage(worker) | |
363 |
|
367 | |||
364 |
|
368 | |||
365 | def _filter_proxy(ip): |
|
369 | def _filter_proxy(ip): | |
366 | """ |
|
370 | """ | |
367 | Passed in IP addresses in HEADERS can be in a special format of multiple |
|
371 | Passed in IP addresses in HEADERS can be in a special format of multiple | |
368 | ips. Those comma separated IPs are passed from various proxies in the |
|
372 | ips. Those comma separated IPs are passed from various proxies in the | |
369 | chain of request processing. The left-most being the original client. |
|
373 | chain of request processing. The left-most being the original client. | |
370 | We only care about the first IP which came from the org. client. |
|
374 | We only care about the first IP which came from the org. client. | |
371 |
|
375 | |||
372 | :param ip: ip string from headers |
|
376 | :param ip: ip string from headers | |
373 | """ |
|
377 | """ | |
374 | if ',' in ip: |
|
378 | if ',' in ip: | |
375 | _ips = ip.split(',') |
|
379 | _ips = ip.split(',') | |
376 | _first_ip = _ips[0].strip() |
|
380 | _first_ip = _ips[0].strip() | |
377 | return _first_ip |
|
381 | return _first_ip | |
378 | return ip |
|
382 | return ip | |
379 |
|
383 | |||
380 |
|
384 | |||
381 | def _filter_port(ip): |
|
385 | def _filter_port(ip): | |
382 | """ |
|
386 | """ | |
383 | Removes a port from ip, there are 4 main cases to handle here. |
|
387 | Removes a port from ip, there are 4 main cases to handle here. | |
384 | - ipv4 eg. 127.0.0.1 |
|
388 | - ipv4 eg. 127.0.0.1 | |
385 | - ipv6 eg. ::1 |
|
389 | - ipv6 eg. ::1 | |
386 | - ipv4+port eg. 127.0.0.1:8080 |
|
390 | - ipv4+port eg. 127.0.0.1:8080 | |
387 | - ipv6+port eg. [::1]:8080 |
|
391 | - ipv6+port eg. [::1]:8080 | |
388 |
|
392 | |||
389 | :param ip: |
|
393 | :param ip: | |
390 | """ |
|
394 | """ | |
391 | def is_ipv6(ip_addr): |
|
395 | def is_ipv6(ip_addr): | |
392 | if hasattr(socket, 'inet_pton'): |
|
396 | if hasattr(socket, 'inet_pton'): | |
393 | try: |
|
397 | try: | |
394 | socket.inet_pton(socket.AF_INET6, ip_addr) |
|
398 | socket.inet_pton(socket.AF_INET6, ip_addr) | |
395 | except socket.error: |
|
399 | except socket.error: | |
396 | return False |
|
400 | return False | |
397 | else: |
|
401 | else: | |
398 | return False |
|
402 | return False | |
399 | return True |
|
403 | return True | |
400 |
|
404 | |||
401 | if ':' not in ip: # must be ipv4 pure ip |
|
405 | if ':' not in ip: # must be ipv4 pure ip | |
402 | return ip |
|
406 | return ip | |
403 |
|
407 | |||
404 | if '[' in ip and ']' in ip: # ipv6 with port |
|
408 | if '[' in ip and ']' in ip: # ipv6 with port | |
405 | return ip.split(']')[0][1:].lower() |
|
409 | return ip.split(']')[0][1:].lower() | |
406 |
|
410 | |||
407 | # must be ipv6 or ipv4 with port |
|
411 | # must be ipv6 or ipv4 with port | |
408 | if is_ipv6(ip): |
|
412 | if is_ipv6(ip): | |
409 | return ip |
|
413 | return ip | |
410 | else: |
|
414 | else: | |
411 | ip, _port = ip.split(':')[:2] # means ipv4+port |
|
415 | ip, _port = ip.split(':')[:2] # means ipv4+port | |
412 | return ip |
|
416 | return ip | |
413 |
|
417 | |||
414 |
|
418 | |||
415 | def get_ip_addr(environ): |
|
419 | def get_ip_addr(environ): | |
416 | proxy_key = 'HTTP_X_REAL_IP' |
|
420 | proxy_key = 'HTTP_X_REAL_IP' | |
417 | proxy_key2 = 'HTTP_X_FORWARDED_FOR' |
|
421 | proxy_key2 = 'HTTP_X_FORWARDED_FOR' | |
418 | def_key = 'REMOTE_ADDR' |
|
422 | def_key = 'REMOTE_ADDR' | |
419 |
|
423 | |||
420 | def _filters(x): |
|
424 | def _filters(x): | |
421 | return _filter_port(_filter_proxy(x)) |
|
425 | return _filter_port(_filter_proxy(x)) | |
422 |
|
426 | |||
423 | ip = environ.get(proxy_key) |
|
427 | ip = environ.get(proxy_key) | |
424 | if ip: |
|
428 | if ip: | |
425 | return _filters(ip) |
|
429 | return _filters(ip) | |
426 |
|
430 | |||
427 | ip = environ.get(proxy_key2) |
|
431 | ip = environ.get(proxy_key2) | |
428 | if ip: |
|
432 | if ip: | |
429 | return _filters(ip) |
|
433 | return _filters(ip) | |
430 |
|
434 | |||
431 | ip = environ.get(def_key, '0.0.0.0') |
|
435 | ip = environ.get(def_key, '0.0.0.0') | |
432 | return _filters(ip) |
|
436 | return _filters(ip) | |
433 |
|
437 | |||
434 |
|
438 | |||
435 | class RhodeCodeLogger(Logger): |
|
439 | class RhodeCodeLogger(Logger): | |
436 | """ |
|
440 | """ | |
437 | Custom Logger that allows some customization that gunicorn doesn't allow |
|
441 | Custom Logger that allows some customization that gunicorn doesn't allow | |
438 | """ |
|
442 | """ | |
439 |
|
443 | |||
440 | datefmt = r"%Y-%m-%d %H:%M:%S" |
|
444 | datefmt = r"%Y-%m-%d %H:%M:%S" | |
441 |
|
445 | |||
442 | def __init__(self, cfg): |
|
446 | def __init__(self, cfg): | |
443 | Logger.__init__(self, cfg) |
|
447 | Logger.__init__(self, cfg) | |
444 |
|
448 | |||
445 | def now(self): |
|
449 | def now(self): | |
446 | """ return date in RhodeCode Log format """ |
|
450 | """ return date in RhodeCode Log format """ | |
447 | now = time.time() |
|
451 | now = time.time() | |
448 | msecs = int((now - int(now)) * 1000) |
|
452 | msecs = int((now - int(now)) * 1000) | |
449 | return time.strftime(self.datefmt, time.localtime(now)) + '.{0:03d}'.format(msecs) |
|
453 | return time.strftime(self.datefmt, time.localtime(now)) + '.{0:03d}'.format(msecs) | |
450 |
|
454 | |||
451 | def atoms(self, resp, req, environ, request_time): |
|
455 | def atoms(self, resp, req, environ, request_time): | |
452 | """ Gets atoms for log formatting. |
|
456 | """ Gets atoms for log formatting. | |
453 | """ |
|
457 | """ | |
454 | status = resp.status |
|
458 | status = resp.status | |
455 | if isinstance(status, str): |
|
459 | if isinstance(status, str): | |
456 | status = status.split(None, 1)[0] |
|
460 | status = status.split(None, 1)[0] | |
457 | atoms = { |
|
461 | atoms = { | |
458 | 'h': get_ip_addr(environ), |
|
462 | 'h': get_ip_addr(environ), | |
459 | 'l': '-', |
|
463 | 'l': '-', | |
460 | 'u': self._get_user(environ) or '-', |
|
464 | 'u': self._get_user(environ) or '-', | |
461 | 't': self.now(), |
|
465 | 't': self.now(), | |
462 | 'r': "%s %s %s" % (environ['REQUEST_METHOD'], |
|
466 | 'r': "%s %s %s" % (environ['REQUEST_METHOD'], | |
463 | environ['RAW_URI'], |
|
467 | environ['RAW_URI'], | |
464 | environ["SERVER_PROTOCOL"]), |
|
468 | environ["SERVER_PROTOCOL"]), | |
465 | 's': status, |
|
469 | 's': status, | |
466 | 'm': environ.get('REQUEST_METHOD'), |
|
470 | 'm': environ.get('REQUEST_METHOD'), | |
467 | 'U': environ.get('PATH_INFO'), |
|
471 | 'U': environ.get('PATH_INFO'), | |
468 | 'q': environ.get('QUERY_STRING'), |
|
472 | 'q': environ.get('QUERY_STRING'), | |
469 | 'H': environ.get('SERVER_PROTOCOL'), |
|
473 | 'H': environ.get('SERVER_PROTOCOL'), | |
470 | 'b': getattr(resp, 'sent', None) is not None and str(resp.sent) or '-', |
|
474 | 'b': getattr(resp, 'sent', None) is not None and str(resp.sent) or '-', | |
471 | 'B': getattr(resp, 'sent', None), |
|
475 | 'B': getattr(resp, 'sent', None), | |
472 | 'f': environ.get('HTTP_REFERER', '-'), |
|
476 | 'f': environ.get('HTTP_REFERER', '-'), | |
473 | 'a': environ.get('HTTP_USER_AGENT', '-'), |
|
477 | 'a': environ.get('HTTP_USER_AGENT', '-'), | |
474 | 'T': request_time.seconds, |
|
478 | 'T': request_time.seconds, | |
475 | 'D': (request_time.seconds * 1000000) + request_time.microseconds, |
|
479 | 'D': (request_time.seconds * 1000000) + request_time.microseconds, | |
476 | 'M': (request_time.seconds * 1000) + int(request_time.microseconds/1000), |
|
480 | 'M': (request_time.seconds * 1000) + int(request_time.microseconds/1000), | |
477 | 'L': "%d.%06d" % (request_time.seconds, request_time.microseconds), |
|
481 | 'L': "%d.%06d" % (request_time.seconds, request_time.microseconds), | |
478 | 'p': "<%s>" % os.getpid() |
|
482 | 'p': "<%s>" % os.getpid() | |
479 | } |
|
483 | } | |
480 |
|
484 | |||
481 | # add request headers |
|
485 | # add request headers | |
482 | if hasattr(req, 'headers'): |
|
486 | if hasattr(req, 'headers'): | |
483 | req_headers = req.headers |
|
487 | req_headers = req.headers | |
484 | else: |
|
488 | else: | |
485 | req_headers = req |
|
489 | req_headers = req | |
486 |
|
490 | |||
487 | if hasattr(req_headers, "items"): |
|
491 | if hasattr(req_headers, "items"): | |
488 | req_headers = req_headers.items() |
|
492 | req_headers = req_headers.items() | |
489 |
|
493 | |||
490 | atoms.update({"{%s}i" % k.lower(): v for k, v in req_headers}) |
|
494 | atoms.update({"{%s}i" % k.lower(): v for k, v in req_headers}) | |
491 |
|
495 | |||
492 | resp_headers = resp.headers |
|
496 | resp_headers = resp.headers | |
493 | if hasattr(resp_headers, "items"): |
|
497 | if hasattr(resp_headers, "items"): | |
494 | resp_headers = resp_headers.items() |
|
498 | resp_headers = resp_headers.items() | |
495 |
|
499 | |||
496 | # add response headers |
|
500 | # add response headers | |
497 | atoms.update({"{%s}o" % k.lower(): v for k, v in resp_headers}) |
|
501 | atoms.update({"{%s}o" % k.lower(): v for k, v in resp_headers}) | |
498 |
|
502 | |||
499 | # add environ variables |
|
503 | # add environ variables | |
500 | environ_variables = environ.items() |
|
504 | environ_variables = environ.items() | |
501 | atoms.update({"{%s}e" % k.lower(): v for k, v in environ_variables}) |
|
505 | atoms.update({"{%s}e" % k.lower(): v for k, v in environ_variables}) | |
502 |
|
506 | |||
503 | return atoms |
|
507 | return atoms | |
504 |
|
508 | |||
505 |
|
509 | |||
506 | logger_class = RhodeCodeLogger |
|
510 | logger_class = RhodeCodeLogger |
@@ -1,814 +1,813 b'' | |||||
1 |
|
||||
2 |
|
1 | |||
3 | ; ######################################### |
|
2 | ; ######################################### | |
4 | ; RHODECODE COMMUNITY EDITION CONFIGURATION |
|
3 | ; RHODECODE COMMUNITY EDITION CONFIGURATION | |
5 | ; ######################################### |
|
4 | ; ######################################### | |
6 |
|
5 | |||
7 | [DEFAULT] |
|
6 | [DEFAULT] | |
8 | ; Debug flag sets all loggers to debug, and enables request tracking |
|
7 | ; Debug flag sets all loggers to debug, and enables request tracking | |
9 | debug = false |
|
8 | debug = false | |
10 |
|
9 | |||
11 | ; ######################################################################## |
|
10 | ; ######################################################################## | |
12 | ; EMAIL CONFIGURATION |
|
11 | ; EMAIL CONFIGURATION | |
13 | ; These settings will be used by the RhodeCode mailing system |
|
12 | ; These settings will be used by the RhodeCode mailing system | |
14 | ; ######################################################################## |
|
13 | ; ######################################################################## | |
15 |
|
14 | |||
16 | ; prefix all emails subjects with given prefix, helps filtering out emails |
|
15 | ; prefix all emails subjects with given prefix, helps filtering out emails | |
17 | #email_prefix = [RhodeCode] |
|
16 | #email_prefix = [RhodeCode] | |
18 |
|
17 | |||
19 | ; email FROM address all mails will be sent |
|
18 | ; email FROM address all mails will be sent | |
20 | #app_email_from = rhodecode-noreply@localhost |
|
19 | #app_email_from = rhodecode-noreply@localhost | |
21 |
|
20 | |||
22 | #smtp_server = mail.server.com |
|
21 | #smtp_server = mail.server.com | |
23 | #smtp_username = |
|
22 | #smtp_username = | |
24 | #smtp_password = |
|
23 | #smtp_password = | |
25 | #smtp_port = |
|
24 | #smtp_port = | |
26 | #smtp_use_tls = false |
|
25 | #smtp_use_tls = false | |
27 | #smtp_use_ssl = true |
|
26 | #smtp_use_ssl = true | |
28 |
|
27 | |||
29 | [server:main] |
|
28 | [server:main] | |
30 | ; COMMON HOST/IP CONFIG, This applies mostly to develop setup, |
|
29 | ; COMMON HOST/IP CONFIG, This applies mostly to develop setup, | |
31 | ; Host port for gunicorn are controlled by gunicorn_conf.py |
|
30 | ; Host port for gunicorn are controlled by gunicorn_conf.py | |
32 | host = 127.0.0.1 |
|
31 | host = 127.0.0.1 | |
33 | port = 10020 |
|
32 | port = 10020 | |
34 |
|
33 | |||
35 |
|
34 | |||
36 | ; ########################### |
|
35 | ; ########################### | |
37 | ; GUNICORN APPLICATION SERVER |
|
36 | ; GUNICORN APPLICATION SERVER | |
38 | ; ########################### |
|
37 | ; ########################### | |
39 |
|
38 | |||
40 | ; run with gunicorn --paste rhodecode.ini --config gunicorn_conf.py |
|
39 | ; run with gunicorn --paste rhodecode.ini --config gunicorn_conf.py | |
41 |
|
40 | |||
42 | ; Module to use, this setting shouldn't be changed |
|
41 | ; Module to use, this setting shouldn't be changed | |
43 | use = egg:gunicorn#main |
|
42 | use = egg:gunicorn#main | |
44 |
|
43 | |||
45 | ; Prefix middleware for RhodeCode. |
|
44 | ; Prefix middleware for RhodeCode. | |
46 | ; recommended when using proxy setup. |
|
45 | ; recommended when using proxy setup. | |
47 | ; allows to set RhodeCode under a prefix in server. |
|
46 | ; allows to set RhodeCode under a prefix in server. | |
48 | ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well. |
|
47 | ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well. | |
49 | ; And set your prefix like: `prefix = /custom_prefix` |
|
48 | ; And set your prefix like: `prefix = /custom_prefix` | |
50 | ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need |
|
49 | ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need | |
51 | ; to make your cookies only work on prefix url |
|
50 | ; to make your cookies only work on prefix url | |
52 | [filter:proxy-prefix] |
|
51 | [filter:proxy-prefix] | |
53 | use = egg:PasteDeploy#prefix |
|
52 | use = egg:PasteDeploy#prefix | |
54 | prefix = / |
|
53 | prefix = / | |
55 |
|
54 | |||
56 | [app:main] |
|
55 | [app:main] | |
57 | ; The %(here)s variable will be replaced with the absolute path of parent directory |
|
56 | ; The %(here)s variable will be replaced with the absolute path of parent directory | |
58 | ; of this file |
|
57 | ; of this file | |
59 | ; Each option in the app:main can be override by an environmental variable |
|
58 | ; Each option in the app:main can be override by an environmental variable | |
60 | ; |
|
59 | ; | |
61 | ;To override an option: |
|
60 | ;To override an option: | |
62 | ; |
|
61 | ; | |
63 | ;RC_<KeyName> |
|
62 | ;RC_<KeyName> | |
64 | ;Everything should be uppercase, . and - should be replaced by _. |
|
63 | ;Everything should be uppercase, . and - should be replaced by _. | |
65 | ;For example, if you have these configuration settings: |
|
64 | ;For example, if you have these configuration settings: | |
66 | ;rc_cache.repo_object.backend = foo |
|
65 | ;rc_cache.repo_object.backend = foo | |
67 | ;can be overridden by |
|
66 | ;can be overridden by | |
68 | ;export RC_CACHE_REPO_OBJECT_BACKEND=foo |
|
67 | ;export RC_CACHE_REPO_OBJECT_BACKEND=foo | |
69 |
|
68 | |||
70 | use = egg:rhodecode-enterprise-ce |
|
69 | use = egg:rhodecode-enterprise-ce | |
71 |
|
70 | |||
72 | ; enable proxy prefix middleware, defined above |
|
71 | ; enable proxy prefix middleware, defined above | |
73 | #filter-with = proxy-prefix |
|
72 | #filter-with = proxy-prefix | |
74 |
|
73 | |||
75 | ; encryption key used to encrypt social plugin tokens, |
|
74 | ; encryption key used to encrypt social plugin tokens, | |
76 | ; remote_urls with credentials etc, if not set it defaults to |
|
75 | ; remote_urls with credentials etc, if not set it defaults to | |
77 | ; `beaker.session.secret` |
|
76 | ; `beaker.session.secret` | |
78 | #rhodecode.encrypted_values.secret = |
|
77 | #rhodecode.encrypted_values.secret = | |
79 |
|
78 | |||
80 | ; decryption strict mode (enabled by default). It controls if decryption raises |
|
79 | ; decryption strict mode (enabled by default). It controls if decryption raises | |
81 | ; `SignatureVerificationError` in case of wrong key, or damaged encryption data. |
|
80 | ; `SignatureVerificationError` in case of wrong key, or damaged encryption data. | |
82 | #rhodecode.encrypted_values.strict = false |
|
81 | #rhodecode.encrypted_values.strict = false | |
83 |
|
82 | |||
84 | ; Pick algorithm for encryption. Either fernet (more secure) or aes (default) |
|
83 | ; Pick algorithm for encryption. Either fernet (more secure) or aes (default) | |
85 | ; fernet is safer, and we strongly recommend switching to it. |
|
84 | ; fernet is safer, and we strongly recommend switching to it. | |
86 | ; Due to backward compatibility aes is used as default. |
|
85 | ; Due to backward compatibility aes is used as default. | |
87 | #rhodecode.encrypted_values.algorithm = fernet |
|
86 | #rhodecode.encrypted_values.algorithm = fernet | |
88 |
|
87 | |||
89 | ; Return gzipped responses from RhodeCode (static files/application) |
|
88 | ; Return gzipped responses from RhodeCode (static files/application) | |
90 | gzip_responses = false |
|
89 | gzip_responses = false | |
91 |
|
90 | |||
92 | ; Auto-generate javascript routes file on startup |
|
91 | ; Auto-generate javascript routes file on startup | |
93 | generate_js_files = false |
|
92 | generate_js_files = false | |
94 |
|
93 | |||
95 | ; System global default language. |
|
94 | ; System global default language. | |
96 | ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh |
|
95 | ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh | |
97 | lang = en |
|
96 | lang = en | |
98 |
|
97 | |||
99 | ; Perform a full repository scan and import on each server start. |
|
98 | ; Perform a full repository scan and import on each server start. | |
100 | ; Settings this to true could lead to very long startup time. |
|
99 | ; Settings this to true could lead to very long startup time. | |
101 | startup.import_repos = false |
|
100 | startup.import_repos = false | |
102 |
|
101 | |||
103 | ; URL at which the application is running. This is used for Bootstrapping |
|
102 | ; URL at which the application is running. This is used for Bootstrapping | |
104 | ; requests in context when no web request is available. Used in ishell, or |
|
103 | ; requests in context when no web request is available. Used in ishell, or | |
105 | ; SSH calls. Set this for events to receive proper url for SSH calls. |
|
104 | ; SSH calls. Set this for events to receive proper url for SSH calls. | |
106 | app.base_url = http://rhodecode.local |
|
105 | app.base_url = http://rhodecode.local | |
107 |
|
106 | |||
108 | ; Unique application ID. Should be a random unique string for security. |
|
107 | ; Unique application ID. Should be a random unique string for security. | |
109 | app_instance_uuid = rc-production |
|
108 | app_instance_uuid = rc-production | |
110 |
|
109 | |||
111 | ; Cut off limit for large diffs (size in bytes). If overall diff size on |
|
110 | ; Cut off limit for large diffs (size in bytes). If overall diff size on | |
112 | ; commit, or pull request exceeds this limit this diff will be displayed |
|
111 | ; commit, or pull request exceeds this limit this diff will be displayed | |
113 | ; partially. E.g 512000 == 512Kb |
|
112 | ; partially. E.g 512000 == 512Kb | |
114 | cut_off_limit_diff = 512000 |
|
113 | cut_off_limit_diff = 512000 | |
115 |
|
114 | |||
116 | ; Cut off limit for large files inside diffs (size in bytes). Each individual |
|
115 | ; Cut off limit for large files inside diffs (size in bytes). Each individual | |
117 | ; file inside diff which exceeds this limit will be displayed partially. |
|
116 | ; file inside diff which exceeds this limit will be displayed partially. | |
118 | ; E.g 128000 == 128Kb |
|
117 | ; E.g 128000 == 128Kb | |
119 | cut_off_limit_file = 128000 |
|
118 | cut_off_limit_file = 128000 | |
120 |
|
119 | |||
121 | ; Use cached version of vcs repositories everywhere. Recommended to be `true` |
|
120 | ; Use cached version of vcs repositories everywhere. Recommended to be `true` | |
122 | vcs_full_cache = true |
|
121 | vcs_full_cache = true | |
123 |
|
122 | |||
124 | ; Force https in RhodeCode, fixes https redirects, assumes it's always https. |
|
123 | ; Force https in RhodeCode, fixes https redirects, assumes it's always https. | |
125 | ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache |
|
124 | ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache | |
126 | force_https = false |
|
125 | force_https = false | |
127 |
|
126 | |||
128 | ; use Strict-Transport-Security headers |
|
127 | ; use Strict-Transport-Security headers | |
129 | use_htsts = false |
|
128 | use_htsts = false | |
130 |
|
129 | |||
131 | ; Set to true if your repos are exposed using the dumb protocol |
|
130 | ; Set to true if your repos are exposed using the dumb protocol | |
132 | git_update_server_info = false |
|
131 | git_update_server_info = false | |
133 |
|
132 | |||
134 | ; RSS/ATOM feed options |
|
133 | ; RSS/ATOM feed options | |
135 | rss_cut_off_limit = 256000 |
|
134 | rss_cut_off_limit = 256000 | |
136 | rss_items_per_page = 10 |
|
135 | rss_items_per_page = 10 | |
137 | rss_include_diff = false |
|
136 | rss_include_diff = false | |
138 |
|
137 | |||
139 | ; gist URL alias, used to create nicer urls for gist. This should be an |
|
138 | ; gist URL alias, used to create nicer urls for gist. This should be an | |
140 | ; url that does rewrites to _admin/gists/{gistid}. |
|
139 | ; url that does rewrites to _admin/gists/{gistid}. | |
141 | ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal |
|
140 | ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal | |
142 | ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid} |
|
141 | ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid} | |
143 | gist_alias_url = |
|
142 | gist_alias_url = | |
144 |
|
143 | |||
145 | ; List of views (using glob pattern syntax) that AUTH TOKENS could be |
|
144 | ; List of views (using glob pattern syntax) that AUTH TOKENS could be | |
146 | ; used for access. |
|
145 | ; used for access. | |
147 | ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it |
|
146 | ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it | |
148 | ; came from the the logged in user who own this authentication token. |
|
147 | ; came from the the logged in user who own this authentication token. | |
149 | ; Additionally @TOKEN syntax can be used to bound the view to specific |
|
148 | ; Additionally @TOKEN syntax can be used to bound the view to specific | |
150 | ; authentication token. Such view would be only accessible when used together |
|
149 | ; authentication token. Such view would be only accessible when used together | |
151 | ; with this authentication token |
|
150 | ; with this authentication token | |
152 | ; list of all views can be found under `/_admin/permissions/auth_token_access` |
|
151 | ; list of all views can be found under `/_admin/permissions/auth_token_access` | |
153 | ; The list should be "," separated and on a single line. |
|
152 | ; The list should be "," separated and on a single line. | |
154 | ; Most common views to enable: |
|
153 | ; Most common views to enable: | |
155 |
|
154 | |||
156 | # RepoCommitsView:repo_commit_download |
|
155 | # RepoCommitsView:repo_commit_download | |
157 | # RepoCommitsView:repo_commit_patch |
|
156 | # RepoCommitsView:repo_commit_patch | |
158 | # RepoCommitsView:repo_commit_raw |
|
157 | # RepoCommitsView:repo_commit_raw | |
159 | # RepoCommitsView:repo_commit_raw@TOKEN |
|
158 | # RepoCommitsView:repo_commit_raw@TOKEN | |
160 | # RepoFilesView:repo_files_diff |
|
159 | # RepoFilesView:repo_files_diff | |
161 | # RepoFilesView:repo_archivefile |
|
160 | # RepoFilesView:repo_archivefile | |
162 | # RepoFilesView:repo_file_raw |
|
161 | # RepoFilesView:repo_file_raw | |
163 | # GistView:* |
|
162 | # GistView:* | |
164 | api_access_controllers_whitelist = |
|
163 | api_access_controllers_whitelist = | |
165 |
|
164 | |||
166 | ; Default encoding used to convert from and to unicode |
|
165 | ; Default encoding used to convert from and to unicode | |
167 | ; can be also a comma separated list of encoding in case of mixed encodings |
|
166 | ; can be also a comma separated list of encoding in case of mixed encodings | |
168 | default_encoding = UTF-8 |
|
167 | default_encoding = UTF-8 | |
169 |
|
168 | |||
170 | ; instance-id prefix |
|
169 | ; instance-id prefix | |
171 | ; a prefix key for this instance used for cache invalidation when running |
|
170 | ; a prefix key for this instance used for cache invalidation when running | |
172 | ; multiple instances of RhodeCode, make sure it's globally unique for |
|
171 | ; multiple instances of RhodeCode, make sure it's globally unique for | |
173 | ; all running RhodeCode instances. Leave empty if you don't use it |
|
172 | ; all running RhodeCode instances. Leave empty if you don't use it | |
174 | instance_id = |
|
173 | instance_id = | |
175 |
|
174 | |||
176 | ; Fallback authentication plugin. Set this to a plugin ID to force the usage |
|
175 | ; Fallback authentication plugin. Set this to a plugin ID to force the usage | |
177 | ; of an authentication plugin also if it is disabled by it's settings. |
|
176 | ; of an authentication plugin also if it is disabled by it's settings. | |
178 | ; This could be useful if you are unable to log in to the system due to broken |
|
177 | ; This could be useful if you are unable to log in to the system due to broken | |
179 | ; authentication settings. Then you can enable e.g. the internal RhodeCode auth |
|
178 | ; authentication settings. Then you can enable e.g. the internal RhodeCode auth | |
180 | ; module to log in again and fix the settings. |
|
179 | ; module to log in again and fix the settings. | |
181 | ; Available builtin plugin IDs (hash is part of the ID): |
|
180 | ; Available builtin plugin IDs (hash is part of the ID): | |
182 | ; egg:rhodecode-enterprise-ce#rhodecode |
|
181 | ; egg:rhodecode-enterprise-ce#rhodecode | |
183 | ; egg:rhodecode-enterprise-ce#pam |
|
182 | ; egg:rhodecode-enterprise-ce#pam | |
184 | ; egg:rhodecode-enterprise-ce#ldap |
|
183 | ; egg:rhodecode-enterprise-ce#ldap | |
185 | ; egg:rhodecode-enterprise-ce#jasig_cas |
|
184 | ; egg:rhodecode-enterprise-ce#jasig_cas | |
186 | ; egg:rhodecode-enterprise-ce#headers |
|
185 | ; egg:rhodecode-enterprise-ce#headers | |
187 | ; egg:rhodecode-enterprise-ce#crowd |
|
186 | ; egg:rhodecode-enterprise-ce#crowd | |
188 |
|
187 | |||
189 | #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode |
|
188 | #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode | |
190 |
|
189 | |||
191 | ; Flag to control loading of legacy plugins in py:/path format |
|
190 | ; Flag to control loading of legacy plugins in py:/path format | |
192 | auth_plugin.import_legacy_plugins = true |
|
191 | auth_plugin.import_legacy_plugins = true | |
193 |
|
192 | |||
194 | ; alternative return HTTP header for failed authentication. Default HTTP |
|
193 | ; alternative return HTTP header for failed authentication. Default HTTP | |
195 | ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with |
|
194 | ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with | |
196 | ; handling that causing a series of failed authentication calls. |
|
195 | ; handling that causing a series of failed authentication calls. | |
197 | ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code |
|
196 | ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code | |
198 | ; This will be served instead of default 401 on bad authentication |
|
197 | ; This will be served instead of default 401 on bad authentication | |
199 | auth_ret_code = |
|
198 | auth_ret_code = | |
200 |
|
199 | |||
201 | ; use special detection method when serving auth_ret_code, instead of serving |
|
200 | ; use special detection method when serving auth_ret_code, instead of serving | |
202 | ; ret_code directly, use 401 initially (Which triggers credentials prompt) |
|
201 | ; ret_code directly, use 401 initially (Which triggers credentials prompt) | |
203 | ; and then serve auth_ret_code to clients |
|
202 | ; and then serve auth_ret_code to clients | |
204 | auth_ret_code_detection = false |
|
203 | auth_ret_code_detection = false | |
205 |
|
204 | |||
206 | ; locking return code. When repository is locked return this HTTP code. 2XX |
|
205 | ; locking return code. When repository is locked return this HTTP code. 2XX | |
207 | ; codes don't break the transactions while 4XX codes do |
|
206 | ; codes don't break the transactions while 4XX codes do | |
208 | lock_ret_code = 423 |
|
207 | lock_ret_code = 423 | |
209 |
|
208 | |||
210 | ; allows to change the repository location in settings page |
|
209 | ; allows to change the repository location in settings page | |
211 | allow_repo_location_change = true |
|
210 | allow_repo_location_change = true | |
212 |
|
211 | |||
213 | ; allows to setup custom hooks in settings page |
|
212 | ; allows to setup custom hooks in settings page | |
214 | allow_custom_hooks_settings = true |
|
213 | allow_custom_hooks_settings = true | |
215 |
|
214 | |||
216 | ; Generated license token required for EE edition license. |
|
215 | ; Generated license token required for EE edition license. | |
217 | ; New generated token value can be found in Admin > settings > license page. |
|
216 | ; New generated token value can be found in Admin > settings > license page. | |
218 | license_token = |
|
217 | license_token = | |
219 |
|
218 | |||
220 | ; This flag hides sensitive information on the license page such as token, and license data |
|
219 | ; This flag hides sensitive information on the license page such as token, and license data | |
221 | license.hide_license_info = false |
|
220 | license.hide_license_info = false | |
222 |
|
221 | |||
223 | ; supervisor connection uri, for managing supervisor and logs. |
|
222 | ; supervisor connection uri, for managing supervisor and logs. | |
224 | supervisor.uri = |
|
223 | supervisor.uri = | |
225 |
|
224 | |||
226 | ; supervisord group name/id we only want this RC instance to handle |
|
225 | ; supervisord group name/id we only want this RC instance to handle | |
227 | supervisor.group_id = prod |
|
226 | supervisor.group_id = prod | |
228 |
|
227 | |||
229 | ; Display extended labs settings |
|
228 | ; Display extended labs settings | |
230 | labs_settings_active = true |
|
229 | labs_settings_active = true | |
231 |
|
230 | |||
232 | ; Custom exception store path, defaults to TMPDIR |
|
231 | ; Custom exception store path, defaults to TMPDIR | |
233 | ; This is used to store exception from RhodeCode in shared directory |
|
232 | ; This is used to store exception from RhodeCode in shared directory | |
234 | #exception_tracker.store_path = |
|
233 | #exception_tracker.store_path = | |
235 |
|
234 | |||
236 | ; Send email with exception details when it happens |
|
235 | ; Send email with exception details when it happens | |
237 | #exception_tracker.send_email = false |
|
236 | #exception_tracker.send_email = false | |
238 |
|
237 | |||
239 | ; Comma separated list of recipients for exception emails, |
|
238 | ; Comma separated list of recipients for exception emails, | |
240 | ; e.g admin@rhodecode.com,devops@rhodecode.com |
|
239 | ; e.g admin@rhodecode.com,devops@rhodecode.com | |
241 | ; Can be left empty, then emails will be sent to ALL super-admins |
|
240 | ; Can be left empty, then emails will be sent to ALL super-admins | |
242 | #exception_tracker.send_email_recipients = |
|
241 | #exception_tracker.send_email_recipients = | |
243 |
|
242 | |||
244 | ; optional prefix to Add to email Subject |
|
243 | ; optional prefix to Add to email Subject | |
245 | #exception_tracker.email_prefix = [RHODECODE ERROR] |
|
244 | #exception_tracker.email_prefix = [RHODECODE ERROR] | |
246 |
|
245 | |||
247 | ; File store configuration. This is used to store and serve uploaded files |
|
246 | ; File store configuration. This is used to store and serve uploaded files | |
248 | file_store.enabled = true |
|
247 | file_store.enabled = true | |
249 |
|
248 | |||
250 | ; Storage backend, available options are: local |
|
249 | ; Storage backend, available options are: local | |
251 | file_store.backend = local |
|
250 | file_store.backend = local | |
252 |
|
251 | |||
253 | ; path to store the uploaded binaries |
|
252 | ; path to store the uploaded binaries | |
254 | file_store.storage_path = %(here)s/data/file_store |
|
253 | file_store.storage_path = %(here)s/data/file_store | |
255 |
|
254 | |||
256 | ; Uncomment and set this path to control settings for archive download cache. |
|
255 | ; Uncomment and set this path to control settings for archive download cache. | |
257 | ; Generated repo archives will be cached at this location |
|
256 | ; Generated repo archives will be cached at this location | |
258 | ; and served from the cache during subsequent requests for the same archive of |
|
257 | ; and served from the cache during subsequent requests for the same archive of | |
259 | ; the repository. This path is important to be shared across filesystems and with |
|
258 | ; the repository. This path is important to be shared across filesystems and with | |
260 | ; RhodeCode and vcsserver |
|
259 | ; RhodeCode and vcsserver | |
261 |
|
260 | |||
262 | ; Default is $cache_dir/archive_cache if not set |
|
261 | ; Default is $cache_dir/archive_cache if not set | |
263 | archive_cache.store_dir = %(here)s/data/archive_cache |
|
262 | archive_cache.store_dir = %(here)s/data/archive_cache | |
264 |
|
263 | |||
265 | ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb |
|
264 | ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb | |
266 |
archive_cache.cache_size_gb = |
|
265 | archive_cache.cache_size_gb = 40 | |
267 |
|
266 | |||
268 | ; By default cache uses sharding technique, this specifies how many shards are there |
|
267 | ; By default cache uses sharding technique, this specifies how many shards are there | |
269 |
archive_cache.cache_shards = |
|
268 | archive_cache.cache_shards = 4 | |
270 |
|
269 | |||
271 | ; ############# |
|
270 | ; ############# | |
272 | ; CELERY CONFIG |
|
271 | ; CELERY CONFIG | |
273 | ; ############# |
|
272 | ; ############# | |
274 |
|
273 | |||
275 | ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini |
|
274 | ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini | |
276 |
|
275 | |||
277 | use_celery = false |
|
276 | use_celery = false | |
278 |
|
277 | |||
279 | ; path to store schedule database |
|
278 | ; path to store schedule database | |
280 | #celerybeat-schedule.path = |
|
279 | #celerybeat-schedule.path = | |
281 |
|
280 | |||
282 | ; connection url to the message broker (default redis) |
|
281 | ; connection url to the message broker (default redis) | |
283 | celery.broker_url = redis://localhost:6379/8 |
|
282 | celery.broker_url = redis://localhost:6379/8 | |
284 |
|
283 | |||
285 | ; rabbitmq example |
|
284 | ; rabbitmq example | |
286 | #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost |
|
285 | #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost | |
287 |
|
286 | |||
288 | ; maximum tasks to execute before worker restart |
|
287 | ; maximum tasks to execute before worker restart | |
289 | celery.max_tasks_per_child = 20 |
|
288 | celery.max_tasks_per_child = 20 | |
290 |
|
289 | |||
291 | ; tasks will never be sent to the queue, but executed locally instead. |
|
290 | ; tasks will never be sent to the queue, but executed locally instead. | |
292 | celery.task_always_eager = false |
|
291 | celery.task_always_eager = false | |
293 |
|
292 | |||
294 | ; ############# |
|
293 | ; ############# | |
295 | ; DOGPILE CACHE |
|
294 | ; DOGPILE CACHE | |
296 | ; ############# |
|
295 | ; ############# | |
297 |
|
296 | |||
298 | ; Default cache dir for caches. Putting this into a ramdisk can boost performance. |
|
297 | ; Default cache dir for caches. Putting this into a ramdisk can boost performance. | |
299 | ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space |
|
298 | ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space | |
300 | cache_dir = %(here)s/data |
|
299 | cache_dir = %(here)s/data | |
301 |
|
300 | |||
302 | ; ********************************************* |
|
301 | ; ********************************************* | |
303 | ; `sql_cache_short` cache for heavy SQL queries |
|
302 | ; `sql_cache_short` cache for heavy SQL queries | |
304 | ; Only supported backend is `memory_lru` |
|
303 | ; Only supported backend is `memory_lru` | |
305 | ; ********************************************* |
|
304 | ; ********************************************* | |
306 | rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru |
|
305 | rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru | |
307 | rc_cache.sql_cache_short.expiration_time = 30 |
|
306 | rc_cache.sql_cache_short.expiration_time = 30 | |
308 |
|
307 | |||
309 |
|
308 | |||
310 | ; ***************************************************** |
|
309 | ; ***************************************************** | |
311 | ; `cache_repo_longterm` cache for repo object instances |
|
310 | ; `cache_repo_longterm` cache for repo object instances | |
312 | ; Only supported backend is `memory_lru` |
|
311 | ; Only supported backend is `memory_lru` | |
313 | ; ***************************************************** |
|
312 | ; ***************************************************** | |
314 | rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru |
|
313 | rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru | |
315 | ; by default we use 30 Days, cache is still invalidated on push |
|
314 | ; by default we use 30 Days, cache is still invalidated on push | |
316 | rc_cache.cache_repo_longterm.expiration_time = 2592000 |
|
315 | rc_cache.cache_repo_longterm.expiration_time = 2592000 | |
317 | ; max items in LRU cache, set to smaller number to save memory, and expire last used caches |
|
316 | ; max items in LRU cache, set to smaller number to save memory, and expire last used caches | |
318 | rc_cache.cache_repo_longterm.max_size = 10000 |
|
317 | rc_cache.cache_repo_longterm.max_size = 10000 | |
319 |
|
318 | |||
320 |
|
319 | |||
321 | ; ********************************************* |
|
320 | ; ********************************************* | |
322 | ; `cache_general` cache for general purpose use |
|
321 | ; `cache_general` cache for general purpose use | |
323 | ; for simplicity use rc.file_namespace backend, |
|
322 | ; for simplicity use rc.file_namespace backend, | |
324 | ; for performance and scale use rc.redis |
|
323 | ; for performance and scale use rc.redis | |
325 | ; ********************************************* |
|
324 | ; ********************************************* | |
326 | rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace |
|
325 | rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace | |
327 | rc_cache.cache_general.expiration_time = 43200 |
|
326 | rc_cache.cache_general.expiration_time = 43200 | |
328 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
327 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set | |
329 | #rc_cache.cache_general.arguments.filename = /tmp/cache_general_db |
|
328 | #rc_cache.cache_general.arguments.filename = /tmp/cache_general_db | |
330 |
|
329 | |||
331 | ; alternative `cache_general` redis backend with distributed lock |
|
330 | ; alternative `cache_general` redis backend with distributed lock | |
332 | #rc_cache.cache_general.backend = dogpile.cache.rc.redis |
|
331 | #rc_cache.cache_general.backend = dogpile.cache.rc.redis | |
333 | #rc_cache.cache_general.expiration_time = 300 |
|
332 | #rc_cache.cache_general.expiration_time = 300 | |
334 |
|
333 | |||
335 | ; redis_expiration_time needs to be greater then expiration_time |
|
334 | ; redis_expiration_time needs to be greater then expiration_time | |
336 | #rc_cache.cache_general.arguments.redis_expiration_time = 7200 |
|
335 | #rc_cache.cache_general.arguments.redis_expiration_time = 7200 | |
337 |
|
336 | |||
338 | #rc_cache.cache_general.arguments.host = localhost |
|
337 | #rc_cache.cache_general.arguments.host = localhost | |
339 | #rc_cache.cache_general.arguments.port = 6379 |
|
338 | #rc_cache.cache_general.arguments.port = 6379 | |
340 | #rc_cache.cache_general.arguments.db = 0 |
|
339 | #rc_cache.cache_general.arguments.db = 0 | |
341 | #rc_cache.cache_general.arguments.socket_timeout = 30 |
|
340 | #rc_cache.cache_general.arguments.socket_timeout = 30 | |
342 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends |
|
341 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends | |
343 | #rc_cache.cache_general.arguments.distributed_lock = true |
|
342 | #rc_cache.cache_general.arguments.distributed_lock = true | |
344 |
|
343 | |||
345 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen |
|
344 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen | |
346 | #rc_cache.cache_general.arguments.lock_auto_renewal = true |
|
345 | #rc_cache.cache_general.arguments.lock_auto_renewal = true | |
347 |
|
346 | |||
348 | ; ************************************************* |
|
347 | ; ************************************************* | |
349 | ; `cache_perms` cache for permission tree, auth TTL |
|
348 | ; `cache_perms` cache for permission tree, auth TTL | |
350 | ; for simplicity use rc.file_namespace backend, |
|
349 | ; for simplicity use rc.file_namespace backend, | |
351 | ; for performance and scale use rc.redis |
|
350 | ; for performance and scale use rc.redis | |
352 | ; ************************************************* |
|
351 | ; ************************************************* | |
353 | rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace |
|
352 | rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace | |
354 | rc_cache.cache_perms.expiration_time = 3600 |
|
353 | rc_cache.cache_perms.expiration_time = 3600 | |
355 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
354 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set | |
356 | #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms_db |
|
355 | #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms_db | |
357 |
|
356 | |||
358 | ; alternative `cache_perms` redis backend with distributed lock |
|
357 | ; alternative `cache_perms` redis backend with distributed lock | |
359 | #rc_cache.cache_perms.backend = dogpile.cache.rc.redis |
|
358 | #rc_cache.cache_perms.backend = dogpile.cache.rc.redis | |
360 | #rc_cache.cache_perms.expiration_time = 300 |
|
359 | #rc_cache.cache_perms.expiration_time = 300 | |
361 |
|
360 | |||
362 | ; redis_expiration_time needs to be greater then expiration_time |
|
361 | ; redis_expiration_time needs to be greater then expiration_time | |
363 | #rc_cache.cache_perms.arguments.redis_expiration_time = 7200 |
|
362 | #rc_cache.cache_perms.arguments.redis_expiration_time = 7200 | |
364 |
|
363 | |||
365 | #rc_cache.cache_perms.arguments.host = localhost |
|
364 | #rc_cache.cache_perms.arguments.host = localhost | |
366 | #rc_cache.cache_perms.arguments.port = 6379 |
|
365 | #rc_cache.cache_perms.arguments.port = 6379 | |
367 | #rc_cache.cache_perms.arguments.db = 0 |
|
366 | #rc_cache.cache_perms.arguments.db = 0 | |
368 | #rc_cache.cache_perms.arguments.socket_timeout = 30 |
|
367 | #rc_cache.cache_perms.arguments.socket_timeout = 30 | |
369 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends |
|
368 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends | |
370 | #rc_cache.cache_perms.arguments.distributed_lock = true |
|
369 | #rc_cache.cache_perms.arguments.distributed_lock = true | |
371 |
|
370 | |||
372 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen |
|
371 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen | |
373 | #rc_cache.cache_perms.arguments.lock_auto_renewal = true |
|
372 | #rc_cache.cache_perms.arguments.lock_auto_renewal = true | |
374 |
|
373 | |||
375 | ; *************************************************** |
|
374 | ; *************************************************** | |
376 | ; `cache_repo` cache for file tree, Readme, RSS FEEDS |
|
375 | ; `cache_repo` cache for file tree, Readme, RSS FEEDS | |
377 | ; for simplicity use rc.file_namespace backend, |
|
376 | ; for simplicity use rc.file_namespace backend, | |
378 | ; for performance and scale use rc.redis |
|
377 | ; for performance and scale use rc.redis | |
379 | ; *************************************************** |
|
378 | ; *************************************************** | |
380 | rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace |
|
379 | rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace | |
381 | rc_cache.cache_repo.expiration_time = 2592000 |
|
380 | rc_cache.cache_repo.expiration_time = 2592000 | |
382 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set |
|
381 | ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set | |
383 | #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo_db |
|
382 | #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo_db | |
384 |
|
383 | |||
385 | ; alternative `cache_repo` redis backend with distributed lock |
|
384 | ; alternative `cache_repo` redis backend with distributed lock | |
386 | #rc_cache.cache_repo.backend = dogpile.cache.rc.redis |
|
385 | #rc_cache.cache_repo.backend = dogpile.cache.rc.redis | |
387 | #rc_cache.cache_repo.expiration_time = 2592000 |
|
386 | #rc_cache.cache_repo.expiration_time = 2592000 | |
388 |
|
387 | |||
389 | ; redis_expiration_time needs to be greater then expiration_time |
|
388 | ; redis_expiration_time needs to be greater then expiration_time | |
390 | #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400 |
|
389 | #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400 | |
391 |
|
390 | |||
392 | #rc_cache.cache_repo.arguments.host = localhost |
|
391 | #rc_cache.cache_repo.arguments.host = localhost | |
393 | #rc_cache.cache_repo.arguments.port = 6379 |
|
392 | #rc_cache.cache_repo.arguments.port = 6379 | |
394 | #rc_cache.cache_repo.arguments.db = 1 |
|
393 | #rc_cache.cache_repo.arguments.db = 1 | |
395 | #rc_cache.cache_repo.arguments.socket_timeout = 30 |
|
394 | #rc_cache.cache_repo.arguments.socket_timeout = 30 | |
396 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends |
|
395 | ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends | |
397 | #rc_cache.cache_repo.arguments.distributed_lock = true |
|
396 | #rc_cache.cache_repo.arguments.distributed_lock = true | |
398 |
|
397 | |||
399 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen |
|
398 | ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen | |
400 | #rc_cache.cache_repo.arguments.lock_auto_renewal = true |
|
399 | #rc_cache.cache_repo.arguments.lock_auto_renewal = true | |
401 |
|
400 | |||
402 | ; ############## |
|
401 | ; ############## | |
403 | ; BEAKER SESSION |
|
402 | ; BEAKER SESSION | |
404 | ; ############## |
|
403 | ; ############## | |
405 |
|
404 | |||
406 | ; beaker.session.type is type of storage options for the logged users sessions. Current allowed |
|
405 | ; beaker.session.type is type of storage options for the logged users sessions. Current allowed | |
407 | ; types are file, ext:redis, ext:database, ext:memcached, and memory (default if not specified). |
|
406 | ; types are file, ext:redis, ext:database, ext:memcached, and memory (default if not specified). | |
408 | ; Fastest ones are Redis and ext:database |
|
407 | ; Fastest ones are Redis and ext:database | |
409 | beaker.session.type = file |
|
408 | beaker.session.type = file | |
410 | beaker.session.data_dir = %(here)s/data/sessions |
|
409 | beaker.session.data_dir = %(here)s/data/sessions | |
411 |
|
410 | |||
412 | ; Redis based sessions |
|
411 | ; Redis based sessions | |
413 | #beaker.session.type = ext:redis |
|
412 | #beaker.session.type = ext:redis | |
414 | #beaker.session.url = redis://127.0.0.1:6379/2 |
|
413 | #beaker.session.url = redis://127.0.0.1:6379/2 | |
415 |
|
414 | |||
416 | ; DB based session, fast, and allows easy management over logged in users |
|
415 | ; DB based session, fast, and allows easy management over logged in users | |
417 | #beaker.session.type = ext:database |
|
416 | #beaker.session.type = ext:database | |
418 | #beaker.session.table_name = db_session |
|
417 | #beaker.session.table_name = db_session | |
419 | #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode |
|
418 | #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode | |
420 | #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode |
|
419 | #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode | |
421 | #beaker.session.sa.pool_recycle = 3600 |
|
420 | #beaker.session.sa.pool_recycle = 3600 | |
422 | #beaker.session.sa.echo = false |
|
421 | #beaker.session.sa.echo = false | |
423 |
|
422 | |||
424 | beaker.session.key = rhodecode |
|
423 | beaker.session.key = rhodecode | |
425 | beaker.session.secret = production-rc-uytcxaz |
|
424 | beaker.session.secret = production-rc-uytcxaz | |
426 | beaker.session.lock_dir = %(here)s/data/sessions/lock |
|
425 | beaker.session.lock_dir = %(here)s/data/sessions/lock | |
427 |
|
426 | |||
428 | ; Secure encrypted cookie. Requires AES and AES python libraries |
|
427 | ; Secure encrypted cookie. Requires AES and AES python libraries | |
429 | ; you must disable beaker.session.secret to use this |
|
428 | ; you must disable beaker.session.secret to use this | |
430 | #beaker.session.encrypt_key = key_for_encryption |
|
429 | #beaker.session.encrypt_key = key_for_encryption | |
431 | #beaker.session.validate_key = validation_key |
|
430 | #beaker.session.validate_key = validation_key | |
432 |
|
431 | |||
433 | ; Sets session as invalid (also logging out user) if it haven not been |
|
432 | ; Sets session as invalid (also logging out user) if it haven not been | |
434 | ; accessed for given amount of time in seconds |
|
433 | ; accessed for given amount of time in seconds | |
435 | beaker.session.timeout = 2592000 |
|
434 | beaker.session.timeout = 2592000 | |
436 | beaker.session.httponly = true |
|
435 | beaker.session.httponly = true | |
437 |
|
436 | |||
438 | ; Path to use for the cookie. Set to prefix if you use prefix middleware |
|
437 | ; Path to use for the cookie. Set to prefix if you use prefix middleware | |
439 | #beaker.session.cookie_path = /custom_prefix |
|
438 | #beaker.session.cookie_path = /custom_prefix | |
440 |
|
439 | |||
441 | ; Set https secure cookie |
|
440 | ; Set https secure cookie | |
442 | beaker.session.secure = false |
|
441 | beaker.session.secure = false | |
443 |
|
442 | |||
444 | ; default cookie expiration time in seconds, set to `true` to set expire |
|
443 | ; default cookie expiration time in seconds, set to `true` to set expire | |
445 | ; at browser close |
|
444 | ; at browser close | |
446 | #beaker.session.cookie_expires = 3600 |
|
445 | #beaker.session.cookie_expires = 3600 | |
447 |
|
446 | |||
448 | ; ############################# |
|
447 | ; ############################# | |
449 | ; SEARCH INDEXING CONFIGURATION |
|
448 | ; SEARCH INDEXING CONFIGURATION | |
450 | ; ############################# |
|
449 | ; ############################# | |
451 |
|
450 | |||
452 | ; Full text search indexer is available in rhodecode-tools under |
|
451 | ; Full text search indexer is available in rhodecode-tools under | |
453 | ; `rhodecode-tools index` command |
|
452 | ; `rhodecode-tools index` command | |
454 |
|
453 | |||
455 | ; WHOOSH Backend, doesn't require additional services to run |
|
454 | ; WHOOSH Backend, doesn't require additional services to run | |
456 | ; it works good with few dozen repos |
|
455 | ; it works good with few dozen repos | |
457 | search.module = rhodecode.lib.index.whoosh |
|
456 | search.module = rhodecode.lib.index.whoosh | |
458 | search.location = %(here)s/data/index |
|
457 | search.location = %(here)s/data/index | |
459 |
|
458 | |||
460 | ; #################### |
|
459 | ; #################### | |
461 | ; CHANNELSTREAM CONFIG |
|
460 | ; CHANNELSTREAM CONFIG | |
462 | ; #################### |
|
461 | ; #################### | |
463 |
|
462 | |||
464 | ; channelstream enables persistent connections and live notification |
|
463 | ; channelstream enables persistent connections and live notification | |
465 | ; in the system. It's also used by the chat system |
|
464 | ; in the system. It's also used by the chat system | |
466 |
|
465 | |||
467 | channelstream.enabled = false |
|
466 | channelstream.enabled = false | |
468 |
|
467 | |||
469 | ; server address for channelstream server on the backend |
|
468 | ; server address for channelstream server on the backend | |
470 | channelstream.server = 127.0.0.1:9800 |
|
469 | channelstream.server = 127.0.0.1:9800 | |
471 |
|
470 | |||
472 | ; location of the channelstream server from outside world |
|
471 | ; location of the channelstream server from outside world | |
473 | ; use ws:// for http or wss:// for https. This address needs to be handled |
|
472 | ; use ws:// for http or wss:// for https. This address needs to be handled | |
474 | ; by external HTTP server such as Nginx or Apache |
|
473 | ; by external HTTP server such as Nginx or Apache | |
475 | ; see Nginx/Apache configuration examples in our docs |
|
474 | ; see Nginx/Apache configuration examples in our docs | |
476 | channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream |
|
475 | channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream | |
477 | channelstream.secret = secret |
|
476 | channelstream.secret = secret | |
478 | channelstream.history.location = %(here)s/channelstream_history |
|
477 | channelstream.history.location = %(here)s/channelstream_history | |
479 |
|
478 | |||
480 | ; Internal application path that Javascript uses to connect into. |
|
479 | ; Internal application path that Javascript uses to connect into. | |
481 | ; If you use proxy-prefix the prefix should be added before /_channelstream |
|
480 | ; If you use proxy-prefix the prefix should be added before /_channelstream | |
482 | channelstream.proxy_path = /_channelstream |
|
481 | channelstream.proxy_path = /_channelstream | |
483 |
|
482 | |||
484 |
|
483 | |||
485 | ; ############################## |
|
484 | ; ############################## | |
486 | ; MAIN RHODECODE DATABASE CONFIG |
|
485 | ; MAIN RHODECODE DATABASE CONFIG | |
487 | ; ############################## |
|
486 | ; ############################## | |
488 |
|
487 | |||
489 | #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30 |
|
488 | #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30 | |
490 | #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode |
|
489 | #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode | |
491 | #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8 |
|
490 | #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8 | |
492 | ; pymysql is an alternative driver for MySQL, use in case of problems with default one |
|
491 | ; pymysql is an alternative driver for MySQL, use in case of problems with default one | |
493 | #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode |
|
492 | #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode | |
494 |
|
493 | |||
495 | sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode |
|
494 | sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode | |
496 |
|
495 | |||
497 | ; see sqlalchemy docs for other advanced settings |
|
496 | ; see sqlalchemy docs for other advanced settings | |
498 | ; print the sql statements to output |
|
497 | ; print the sql statements to output | |
499 | sqlalchemy.db1.echo = false |
|
498 | sqlalchemy.db1.echo = false | |
500 |
|
499 | |||
501 | ; recycle the connections after this amount of seconds |
|
500 | ; recycle the connections after this amount of seconds | |
502 | sqlalchemy.db1.pool_recycle = 3600 |
|
501 | sqlalchemy.db1.pool_recycle = 3600 | |
503 |
|
502 | |||
504 | ; the number of connections to keep open inside the connection pool. |
|
503 | ; the number of connections to keep open inside the connection pool. | |
505 | ; 0 indicates no limit |
|
504 | ; 0 indicates no limit | |
506 | ; the general calculus with gevent is: |
|
505 | ; the general calculus with gevent is: | |
507 | ; if your system allows 500 concurrent greenlets (max_connections) that all do database access, |
|
506 | ; if your system allows 500 concurrent greenlets (max_connections) that all do database access, | |
508 | ; then increase pool size + max overflow so that they add up to 500. |
|
507 | ; then increase pool size + max overflow so that they add up to 500. | |
509 | #sqlalchemy.db1.pool_size = 5 |
|
508 | #sqlalchemy.db1.pool_size = 5 | |
510 |
|
509 | |||
511 | ; The number of connections to allow in connection pool "overflow", that is |
|
510 | ; The number of connections to allow in connection pool "overflow", that is | |
512 | ; connections that can be opened above and beyond the pool_size setting, |
|
511 | ; connections that can be opened above and beyond the pool_size setting, | |
513 | ; which defaults to five. |
|
512 | ; which defaults to five. | |
514 | #sqlalchemy.db1.max_overflow = 10 |
|
513 | #sqlalchemy.db1.max_overflow = 10 | |
515 |
|
514 | |||
516 | ; Connection check ping, used to detect broken database connections |
|
515 | ; Connection check ping, used to detect broken database connections | |
517 | ; could be enabled to better handle cases if MySQL has gone away errors |
|
516 | ; could be enabled to better handle cases if MySQL has gone away errors | |
518 | #sqlalchemy.db1.ping_connection = true |
|
517 | #sqlalchemy.db1.ping_connection = true | |
519 |
|
518 | |||
520 | ; ########## |
|
519 | ; ########## | |
521 | ; VCS CONFIG |
|
520 | ; VCS CONFIG | |
522 | ; ########## |
|
521 | ; ########## | |
523 | vcs.server.enable = true |
|
522 | vcs.server.enable = true | |
524 | vcs.server = localhost:9900 |
|
523 | vcs.server = localhost:9900 | |
525 |
|
524 | |||
526 | ; Web server connectivity protocol, responsible for web based VCS operations |
|
525 | ; Web server connectivity protocol, responsible for web based VCS operations | |
527 | ; Available protocols are: |
|
526 | ; Available protocols are: | |
528 | ; `http` - use http-rpc backend (default) |
|
527 | ; `http` - use http-rpc backend (default) | |
529 | vcs.server.protocol = http |
|
528 | vcs.server.protocol = http | |
530 |
|
529 | |||
531 | ; Push/Pull operations protocol, available options are: |
|
530 | ; Push/Pull operations protocol, available options are: | |
532 | ; `http` - use http-rpc backend (default) |
|
531 | ; `http` - use http-rpc backend (default) | |
533 | vcs.scm_app_implementation = http |
|
532 | vcs.scm_app_implementation = http | |
534 |
|
533 | |||
535 | ; Push/Pull operations hooks protocol, available options are: |
|
534 | ; Push/Pull operations hooks protocol, available options are: | |
536 | ; `http` - use http-rpc backend (default) |
|
535 | ; `http` - use http-rpc backend (default) | |
537 | vcs.hooks.protocol = http |
|
536 | vcs.hooks.protocol = http | |
538 |
|
537 | |||
539 | ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be |
|
538 | ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be | |
540 | ; accessible via network. |
|
539 | ; accessible via network. | |
541 | ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker) |
|
540 | ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker) | |
542 | vcs.hooks.host = * |
|
541 | vcs.hooks.host = * | |
543 |
|
542 | |||
544 | ; Start VCSServer with this instance as a subprocess, useful for development |
|
543 | ; Start VCSServer with this instance as a subprocess, useful for development | |
545 | vcs.start_server = false |
|
544 | vcs.start_server = false | |
546 |
|
545 | |||
547 | ; List of enabled VCS backends, available options are: |
|
546 | ; List of enabled VCS backends, available options are: | |
548 | ; `hg` - mercurial |
|
547 | ; `hg` - mercurial | |
549 | ; `git` - git |
|
548 | ; `git` - git | |
550 | ; `svn` - subversion |
|
549 | ; `svn` - subversion | |
551 | vcs.backends = hg, git, svn |
|
550 | vcs.backends = hg, git, svn | |
552 |
|
551 | |||
553 | ; Wait this number of seconds before killing connection to the vcsserver |
|
552 | ; Wait this number of seconds before killing connection to the vcsserver | |
554 | vcs.connection_timeout = 3600 |
|
553 | vcs.connection_timeout = 3600 | |
555 |
|
554 | |||
556 | ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out. |
|
555 | ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out. | |
557 | ; Set a numeric version for your current SVN e.g 1.8, or 1.12 |
|
556 | ; Set a numeric version for your current SVN e.g 1.8, or 1.12 | |
558 | ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible |
|
557 | ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible | |
559 | #vcs.svn.compatible_version = 1.8 |
|
558 | #vcs.svn.compatible_version = 1.8 | |
560 |
|
559 | |||
561 | ; Cache flag to cache vcsserver remote calls locally |
|
560 | ; Cache flag to cache vcsserver remote calls locally | |
562 | ; It uses cache_region `cache_repo` |
|
561 | ; It uses cache_region `cache_repo` | |
563 | vcs.methods.cache = true |
|
562 | vcs.methods.cache = true | |
564 |
|
563 | |||
565 | ; #################################################### |
|
564 | ; #################################################### | |
566 | ; Subversion proxy support (mod_dav_svn) |
|
565 | ; Subversion proxy support (mod_dav_svn) | |
567 | ; Maps RhodeCode repo groups into SVN paths for Apache |
|
566 | ; Maps RhodeCode repo groups into SVN paths for Apache | |
568 | ; #################################################### |
|
567 | ; #################################################### | |
569 |
|
568 | |||
570 | ; Enable or disable the config file generation. |
|
569 | ; Enable or disable the config file generation. | |
571 | svn.proxy.generate_config = false |
|
570 | svn.proxy.generate_config = false | |
572 |
|
571 | |||
573 | ; Generate config file with `SVNListParentPath` set to `On`. |
|
572 | ; Generate config file with `SVNListParentPath` set to `On`. | |
574 | svn.proxy.list_parent_path = true |
|
573 | svn.proxy.list_parent_path = true | |
575 |
|
574 | |||
576 | ; Set location and file name of generated config file. |
|
575 | ; Set location and file name of generated config file. | |
577 | svn.proxy.config_file_path = %(here)s/mod_dav_svn.conf |
|
576 | svn.proxy.config_file_path = %(here)s/mod_dav_svn.conf | |
578 |
|
577 | |||
579 | ; alternative mod_dav config template. This needs to be a valid mako template |
|
578 | ; alternative mod_dav config template. This needs to be a valid mako template | |
580 | ; Example template can be found in the source code: |
|
579 | ; Example template can be found in the source code: | |
581 | ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako |
|
580 | ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako | |
582 | #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako |
|
581 | #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako | |
583 |
|
582 | |||
584 | ; Used as a prefix to the `Location` block in the generated config file. |
|
583 | ; Used as a prefix to the `Location` block in the generated config file. | |
585 | ; In most cases it should be set to `/`. |
|
584 | ; In most cases it should be set to `/`. | |
586 | svn.proxy.location_root = / |
|
585 | svn.proxy.location_root = / | |
587 |
|
586 | |||
588 | ; Command to reload the mod dav svn configuration on change. |
|
587 | ; Command to reload the mod dav svn configuration on change. | |
589 | ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh |
|
588 | ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh | |
590 | ; Make sure user who runs RhodeCode process is allowed to reload Apache |
|
589 | ; Make sure user who runs RhodeCode process is allowed to reload Apache | |
591 | #svn.proxy.reload_cmd = /etc/init.d/apache2 reload |
|
590 | #svn.proxy.reload_cmd = /etc/init.d/apache2 reload | |
592 |
|
591 | |||
593 | ; If the timeout expires before the reload command finishes, the command will |
|
592 | ; If the timeout expires before the reload command finishes, the command will | |
594 | ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds. |
|
593 | ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds. | |
595 | #svn.proxy.reload_timeout = 10 |
|
594 | #svn.proxy.reload_timeout = 10 | |
596 |
|
595 | |||
597 | ; #################### |
|
596 | ; #################### | |
598 | ; SSH Support Settings |
|
597 | ; SSH Support Settings | |
599 | ; #################### |
|
598 | ; #################### | |
600 |
|
599 | |||
601 | ; Defines if a custom authorized_keys file should be created and written on |
|
600 | ; Defines if a custom authorized_keys file should be created and written on | |
602 | ; any change user ssh keys. Setting this to false also disables possibility |
|
601 | ; any change user ssh keys. Setting this to false also disables possibility | |
603 | ; of adding SSH keys by users from web interface. Super admins can still |
|
602 | ; of adding SSH keys by users from web interface. Super admins can still | |
604 | ; manage SSH Keys. |
|
603 | ; manage SSH Keys. | |
605 | ssh.generate_authorized_keyfile = false |
|
604 | ssh.generate_authorized_keyfile = false | |
606 |
|
605 | |||
607 | ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding` |
|
606 | ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding` | |
608 | # ssh.authorized_keys_ssh_opts = |
|
607 | # ssh.authorized_keys_ssh_opts = | |
609 |
|
608 | |||
610 | ; Path to the authorized_keys file where the generate entries are placed. |
|
609 | ; Path to the authorized_keys file where the generate entries are placed. | |
611 | ; It is possible to have multiple key files specified in `sshd_config` e.g. |
|
610 | ; It is possible to have multiple key files specified in `sshd_config` e.g. | |
612 | ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode |
|
611 | ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode | |
613 | ssh.authorized_keys_file_path = ~/.ssh/authorized_keys_rhodecode |
|
612 | ssh.authorized_keys_file_path = ~/.ssh/authorized_keys_rhodecode | |
614 |
|
613 | |||
615 | ; Command to execute the SSH wrapper. The binary is available in the |
|
614 | ; Command to execute the SSH wrapper. The binary is available in the | |
616 | ; RhodeCode installation directory. |
|
615 | ; RhodeCode installation directory. | |
617 | ; e.g ~/.rccontrol/community-1/profile/bin/rc-ssh-wrapper |
|
616 | ; e.g ~/.rccontrol/community-1/profile/bin/rc-ssh-wrapper | |
618 | ssh.wrapper_cmd = ~/.rccontrol/community-1/rc-ssh-wrapper |
|
617 | ssh.wrapper_cmd = ~/.rccontrol/community-1/rc-ssh-wrapper | |
619 |
|
618 | |||
620 | ; Allow shell when executing the ssh-wrapper command |
|
619 | ; Allow shell when executing the ssh-wrapper command | |
621 | ssh.wrapper_cmd_allow_shell = false |
|
620 | ssh.wrapper_cmd_allow_shell = false | |
622 |
|
621 | |||
623 | ; Enables logging, and detailed output send back to the client during SSH |
|
622 | ; Enables logging, and detailed output send back to the client during SSH | |
624 | ; operations. Useful for debugging, shouldn't be used in production. |
|
623 | ; operations. Useful for debugging, shouldn't be used in production. | |
625 | ssh.enable_debug_logging = false |
|
624 | ssh.enable_debug_logging = false | |
626 |
|
625 | |||
627 | ; Paths to binary executable, by default they are the names, but we can |
|
626 | ; Paths to binary executable, by default they are the names, but we can | |
628 | ; override them if we want to use a custom one |
|
627 | ; override them if we want to use a custom one | |
629 | ssh.executable.hg = ~/.rccontrol/vcsserver-1/profile/bin/hg |
|
628 | ssh.executable.hg = ~/.rccontrol/vcsserver-1/profile/bin/hg | |
630 | ssh.executable.git = ~/.rccontrol/vcsserver-1/profile/bin/git |
|
629 | ssh.executable.git = ~/.rccontrol/vcsserver-1/profile/bin/git | |
631 | ssh.executable.svn = ~/.rccontrol/vcsserver-1/profile/bin/svnserve |
|
630 | ssh.executable.svn = ~/.rccontrol/vcsserver-1/profile/bin/svnserve | |
632 |
|
631 | |||
633 | ; Enables SSH key generator web interface. Disabling this still allows users |
|
632 | ; Enables SSH key generator web interface. Disabling this still allows users | |
634 | ; to add their own keys. |
|
633 | ; to add their own keys. | |
635 | ssh.enable_ui_key_generator = true |
|
634 | ssh.enable_ui_key_generator = true | |
636 |
|
635 | |||
637 |
|
636 | |||
638 | ; ################# |
|
637 | ; ################# | |
639 | ; APPENLIGHT CONFIG |
|
638 | ; APPENLIGHT CONFIG | |
640 | ; ################# |
|
639 | ; ################# | |
641 |
|
640 | |||
642 | ; Appenlight is tailored to work with RhodeCode, see |
|
641 | ; Appenlight is tailored to work with RhodeCode, see | |
643 | ; http://appenlight.rhodecode.com for details how to obtain an account |
|
642 | ; http://appenlight.rhodecode.com for details how to obtain an account | |
644 |
|
643 | |||
645 | ; Appenlight integration enabled |
|
644 | ; Appenlight integration enabled | |
646 | #appenlight = false |
|
645 | #appenlight = false | |
647 |
|
646 | |||
648 | #appenlight.server_url = https://api.appenlight.com |
|
647 | #appenlight.server_url = https://api.appenlight.com | |
649 | #appenlight.api_key = YOUR_API_KEY |
|
648 | #appenlight.api_key = YOUR_API_KEY | |
650 | #appenlight.transport_config = https://api.appenlight.com?threaded=1&timeout=5 |
|
649 | #appenlight.transport_config = https://api.appenlight.com?threaded=1&timeout=5 | |
651 |
|
650 | |||
652 | ; used for JS client |
|
651 | ; used for JS client | |
653 | #appenlight.api_public_key = YOUR_API_PUBLIC_KEY |
|
652 | #appenlight.api_public_key = YOUR_API_PUBLIC_KEY | |
654 |
|
653 | |||
655 | ; TWEAK AMOUNT OF INFO SENT HERE |
|
654 | ; TWEAK AMOUNT OF INFO SENT HERE | |
656 |
|
655 | |||
657 | ; enables 404 error logging (default False) |
|
656 | ; enables 404 error logging (default False) | |
658 | #appenlight.report_404 = false |
|
657 | #appenlight.report_404 = false | |
659 |
|
658 | |||
660 | ; time in seconds after request is considered being slow (default 1) |
|
659 | ; time in seconds after request is considered being slow (default 1) | |
661 | #appenlight.slow_request_time = 1 |
|
660 | #appenlight.slow_request_time = 1 | |
662 |
|
661 | |||
663 | ; record slow requests in application |
|
662 | ; record slow requests in application | |
664 | ; (needs to be enabled for slow datastore recording and time tracking) |
|
663 | ; (needs to be enabled for slow datastore recording and time tracking) | |
665 | #appenlight.slow_requests = true |
|
664 | #appenlight.slow_requests = true | |
666 |
|
665 | |||
667 | ; enable hooking to application loggers |
|
666 | ; enable hooking to application loggers | |
668 | #appenlight.logging = true |
|
667 | #appenlight.logging = true | |
669 |
|
668 | |||
670 | ; minimum log level for log capture |
|
669 | ; minimum log level for log capture | |
671 | #ppenlight.logging.level = WARNING |
|
670 | #ppenlight.logging.level = WARNING | |
672 |
|
671 | |||
673 | ; send logs only from erroneous/slow requests |
|
672 | ; send logs only from erroneous/slow requests | |
674 | ; (saves API quota for intensive logging) |
|
673 | ; (saves API quota for intensive logging) | |
675 | #appenlight.logging_on_error = false |
|
674 | #appenlight.logging_on_error = false | |
676 |
|
675 | |||
677 | ; list of additional keywords that should be grabbed from environ object |
|
676 | ; list of additional keywords that should be grabbed from environ object | |
678 | ; can be string with comma separated list of words in lowercase |
|
677 | ; can be string with comma separated list of words in lowercase | |
679 | ; (by default client will always send following info: |
|
678 | ; (by default client will always send following info: | |
680 | ; 'REMOTE_USER', 'REMOTE_ADDR', 'SERVER_NAME', 'CONTENT_TYPE' + all keys that |
|
679 | ; 'REMOTE_USER', 'REMOTE_ADDR', 'SERVER_NAME', 'CONTENT_TYPE' + all keys that | |
681 | ; start with HTTP* this list be extended with additional keywords here |
|
680 | ; start with HTTP* this list be extended with additional keywords here | |
682 | #appenlight.environ_keys_whitelist = |
|
681 | #appenlight.environ_keys_whitelist = | |
683 |
|
682 | |||
684 | ; list of keywords that should be blanked from request object |
|
683 | ; list of keywords that should be blanked from request object | |
685 | ; can be string with comma separated list of words in lowercase |
|
684 | ; can be string with comma separated list of words in lowercase | |
686 | ; (by default client will always blank keys that contain following words |
|
685 | ; (by default client will always blank keys that contain following words | |
687 | ; 'password', 'passwd', 'pwd', 'auth_tkt', 'secret', 'csrf' |
|
686 | ; 'password', 'passwd', 'pwd', 'auth_tkt', 'secret', 'csrf' | |
688 | ; this list be extended with additional keywords set here |
|
687 | ; this list be extended with additional keywords set here | |
689 | #appenlight.request_keys_blacklist = |
|
688 | #appenlight.request_keys_blacklist = | |
690 |
|
689 | |||
691 | ; list of namespaces that should be ignores when gathering log entries |
|
690 | ; list of namespaces that should be ignores when gathering log entries | |
692 | ; can be string with comma separated list of namespaces |
|
691 | ; can be string with comma separated list of namespaces | |
693 | ; (by default the client ignores own entries: appenlight_client.client) |
|
692 | ; (by default the client ignores own entries: appenlight_client.client) | |
694 | #appenlight.log_namespace_blacklist = |
|
693 | #appenlight.log_namespace_blacklist = | |
695 |
|
694 | |||
696 | ; Statsd client config, this is used to send metrics to statsd |
|
695 | ; Statsd client config, this is used to send metrics to statsd | |
697 | ; We recommend setting statsd_exported and scrape them using Prometheus |
|
696 | ; We recommend setting statsd_exported and scrape them using Prometheus | |
698 | #statsd.enabled = false |
|
697 | #statsd.enabled = false | |
699 | #statsd.statsd_host = 0.0.0.0 |
|
698 | #statsd.statsd_host = 0.0.0.0 | |
700 | #statsd.statsd_port = 8125 |
|
699 | #statsd.statsd_port = 8125 | |
701 | #statsd.statsd_prefix = |
|
700 | #statsd.statsd_prefix = | |
702 | #statsd.statsd_ipv6 = false |
|
701 | #statsd.statsd_ipv6 = false | |
703 |
|
702 | |||
704 | ; configure logging automatically at server startup set to false |
|
703 | ; configure logging automatically at server startup set to false | |
705 | ; to use the below custom logging config. |
|
704 | ; to use the below custom logging config. | |
706 | ; RC_LOGGING_FORMATTER |
|
705 | ; RC_LOGGING_FORMATTER | |
707 | ; RC_LOGGING_LEVEL |
|
706 | ; RC_LOGGING_LEVEL | |
708 | ; env variables can control the settings for logging in case of autoconfigure |
|
707 | ; env variables can control the settings for logging in case of autoconfigure | |
709 |
|
708 | |||
710 | #logging.autoconfigure = true |
|
709 | #logging.autoconfigure = true | |
711 |
|
710 | |||
712 | ; specify your own custom logging config file to configure logging |
|
711 | ; specify your own custom logging config file to configure logging | |
713 | #logging.logging_conf_file = /path/to/custom_logging.ini |
|
712 | #logging.logging_conf_file = /path/to/custom_logging.ini | |
714 |
|
713 | |||
715 | ; Dummy marker to add new entries after. |
|
714 | ; Dummy marker to add new entries after. | |
716 | ; Add any custom entries below. Please don't remove this marker. |
|
715 | ; Add any custom entries below. Please don't remove this marker. | |
717 | custom.conf = 1 |
|
716 | custom.conf = 1 | |
718 |
|
717 | |||
719 |
|
718 | |||
720 | ; ##################### |
|
719 | ; ##################### | |
721 | ; LOGGING CONFIGURATION |
|
720 | ; LOGGING CONFIGURATION | |
722 | ; ##################### |
|
721 | ; ##################### | |
723 |
|
722 | |||
724 | [loggers] |
|
723 | [loggers] | |
725 | keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper |
|
724 | keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper | |
726 |
|
725 | |||
727 | [handlers] |
|
726 | [handlers] | |
728 | keys = console, console_sql |
|
727 | keys = console, console_sql | |
729 |
|
728 | |||
730 | [formatters] |
|
729 | [formatters] | |
731 | keys = generic, json, color_formatter, color_formatter_sql |
|
730 | keys = generic, json, color_formatter, color_formatter_sql | |
732 |
|
731 | |||
733 | ; ####### |
|
732 | ; ####### | |
734 | ; LOGGERS |
|
733 | ; LOGGERS | |
735 | ; ####### |
|
734 | ; ####### | |
736 | [logger_root] |
|
735 | [logger_root] | |
737 | level = NOTSET |
|
736 | level = NOTSET | |
738 | handlers = console |
|
737 | handlers = console | |
739 |
|
738 | |||
740 | [logger_sqlalchemy] |
|
739 | [logger_sqlalchemy] | |
741 | level = INFO |
|
740 | level = INFO | |
742 | handlers = console_sql |
|
741 | handlers = console_sql | |
743 | qualname = sqlalchemy.engine |
|
742 | qualname = sqlalchemy.engine | |
744 | propagate = 0 |
|
743 | propagate = 0 | |
745 |
|
744 | |||
746 | [logger_beaker] |
|
745 | [logger_beaker] | |
747 | level = DEBUG |
|
746 | level = DEBUG | |
748 | handlers = |
|
747 | handlers = | |
749 | qualname = beaker.container |
|
748 | qualname = beaker.container | |
750 | propagate = 1 |
|
749 | propagate = 1 | |
751 |
|
750 | |||
752 | [logger_rhodecode] |
|
751 | [logger_rhodecode] | |
753 | level = DEBUG |
|
752 | level = DEBUG | |
754 | handlers = |
|
753 | handlers = | |
755 | qualname = rhodecode |
|
754 | qualname = rhodecode | |
756 | propagate = 1 |
|
755 | propagate = 1 | |
757 |
|
756 | |||
758 | [logger_ssh_wrapper] |
|
757 | [logger_ssh_wrapper] | |
759 | level = DEBUG |
|
758 | level = DEBUG | |
760 | handlers = |
|
759 | handlers = | |
761 | qualname = ssh_wrapper |
|
760 | qualname = ssh_wrapper | |
762 | propagate = 1 |
|
761 | propagate = 1 | |
763 |
|
762 | |||
764 | [logger_celery] |
|
763 | [logger_celery] | |
765 | level = DEBUG |
|
764 | level = DEBUG | |
766 | handlers = |
|
765 | handlers = | |
767 | qualname = celery |
|
766 | qualname = celery | |
768 |
|
767 | |||
769 |
|
768 | |||
770 | ; ######## |
|
769 | ; ######## | |
771 | ; HANDLERS |
|
770 | ; HANDLERS | |
772 | ; ######## |
|
771 | ; ######## | |
773 |
|
772 | |||
774 | [handler_console] |
|
773 | [handler_console] | |
775 | class = StreamHandler |
|
774 | class = StreamHandler | |
776 | args = (sys.stderr, ) |
|
775 | args = (sys.stderr, ) | |
777 | level = INFO |
|
776 | level = INFO | |
778 | ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json' |
|
777 | ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json' | |
779 | ; This allows sending properly formatted logs to grafana loki or elasticsearch |
|
778 | ; This allows sending properly formatted logs to grafana loki or elasticsearch | |
780 | formatter = generic |
|
779 | formatter = generic | |
781 |
|
780 | |||
782 | [handler_console_sql] |
|
781 | [handler_console_sql] | |
783 | ; "level = DEBUG" logs SQL queries and results. |
|
782 | ; "level = DEBUG" logs SQL queries and results. | |
784 | ; "level = INFO" logs SQL queries. |
|
783 | ; "level = INFO" logs SQL queries. | |
785 | ; "level = WARN" logs neither. (Recommended for production systems.) |
|
784 | ; "level = WARN" logs neither. (Recommended for production systems.) | |
786 | class = StreamHandler |
|
785 | class = StreamHandler | |
787 | args = (sys.stderr, ) |
|
786 | args = (sys.stderr, ) | |
788 | level = WARN |
|
787 | level = WARN | |
789 | ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json' |
|
788 | ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json' | |
790 | ; This allows sending properly formatted logs to grafana loki or elasticsearch |
|
789 | ; This allows sending properly formatted logs to grafana loki or elasticsearch | |
791 | formatter = generic |
|
790 | formatter = generic | |
792 |
|
791 | |||
793 | ; ########## |
|
792 | ; ########## | |
794 | ; FORMATTERS |
|
793 | ; FORMATTERS | |
795 | ; ########## |
|
794 | ; ########## | |
796 |
|
795 | |||
797 | [formatter_generic] |
|
796 | [formatter_generic] | |
798 | class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter |
|
797 | class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter | |
799 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s |
|
798 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s | |
800 | datefmt = %Y-%m-%d %H:%M:%S |
|
799 | datefmt = %Y-%m-%d %H:%M:%S | |
801 |
|
800 | |||
802 | [formatter_color_formatter] |
|
801 | [formatter_color_formatter] | |
803 | class = rhodecode.lib.logging_formatter.ColorFormatter |
|
802 | class = rhodecode.lib.logging_formatter.ColorFormatter | |
804 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s |
|
803 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s | |
805 | datefmt = %Y-%m-%d %H:%M:%S |
|
804 | datefmt = %Y-%m-%d %H:%M:%S | |
806 |
|
805 | |||
807 | [formatter_color_formatter_sql] |
|
806 | [formatter_color_formatter_sql] | |
808 | class = rhodecode.lib.logging_formatter.ColorFormatterSql |
|
807 | class = rhodecode.lib.logging_formatter.ColorFormatterSql | |
809 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s |
|
808 | format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s | |
810 | datefmt = %Y-%m-%d %H:%M:%S |
|
809 | datefmt = %Y-%m-%d %H:%M:%S | |
811 |
|
810 | |||
812 | [formatter_json] |
|
811 | [formatter_json] | |
813 | format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s |
|
812 | format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s | |
814 | class = rhodecode.lib._vendor.jsonlogger.JsonFormatter |
|
813 | class = rhodecode.lib._vendor.jsonlogger.JsonFormatter |
General Comments 0
You need to be logged in to leave comments.
Login now