##// END OF EJS Templates
configs: added small fixes into gunicorn configs
super-admin -
r5136:7b3b90fa default
parent child Browse files
Show More
@@ -1,172 +1,172 b''
1 1 # required for pushd to work..
2 2 SHELL = /bin/bash
3 3
4 4
5 5 # set by: PATH_TO_OUTDATED_PACKAGES=/some/path/outdated_packages.py
6 6 OUTDATED_PACKAGES = ${PATH_TO_OUTDATED_PACKAGES}
7 7
8 8 NODE_PATH=./node_modules
9 9 WEBPACK=./node_binaries/webpack
10 10 GRUNT=./node_binaries/grunt
11 11
12 12 .PHONY: clean
13 13 ## Cleanup compiled and cache py files
14 14 clean:
15 15 make test-clean
16 16 find . -type f \( -iname '*.c' -o -iname '*.pyc' -o -iname '*.so' -o -iname '*.orig' \) -exec rm '{}' ';'
17 17 find . -type d -name "build" -prune -exec rm -rf '{}' ';'
18 18
19 19 .PHONY: test
20 20 ## run test-clean and tests
21 21 test:
22 22 make test-clean
23 23 make test-only
24 24
25 25
26 26 .PHONY: test-clean
27 27 ## run test-clean and tests
28 28 test-clean:
29 29 rm -rf coverage.xml htmlcov junit.xml pylint.log result
30 30 find . -type d -name "__pycache__" -prune -exec rm -rf '{}' ';'
31 31 find . -type f \( -iname '.coverage.*' \) -exec rm '{}' ';'
32 32
33 33
34 34 .PHONY: test-only
35 35 ## Run tests only without cleanup
36 36 test-only:
37 37 PYTHONHASHSEED=random \
38 38 py.test -x -vv -r xw -p no:sugar \
39 39 --cov-report=term-missing --cov-report=html \
40 40 --cov=rhodecode rhodecode
41 41
42 42
43 43 .PHONY: test-only-mysql
44 44 ## run tests against mysql
45 45 test-only-mysql:
46 46 PYTHONHASHSEED=random \
47 47 py.test -x -vv -r xw -p no:sugar \
48 48 --cov-report=term-missing --cov-report=html \
49 49 --ini-config-override='{"app:main": {"sqlalchemy.db1.url": "mysql://root:qweqwe@localhost/rhodecode_test?charset=utf8"}}' \
50 50 --cov=rhodecode rhodecode
51 51
52 52
53 53 .PHONY: test-only-postgres
54 54 ## run tests against postgres
55 55 test-only-postgres:
56 56 PYTHONHASHSEED=random \
57 57 py.test -x -vv -r xw -p no:sugar \
58 58 --cov-report=term-missing --cov-report=html \
59 59 --ini-config-override='{"app:main": {"sqlalchemy.db1.url": "postgresql://postgres:qweqwe@localhost/rhodecode_test"}}' \
60 60 --cov=rhodecode rhodecode
61 61
62 62 .PHONY: docs
63 63 ## build docs
64 64 docs:
65 65 (cd docs; nix-build default.nix -o result; make clean html)
66 66
67 67
68 68 .PHONY: docs-clean
69 69 ## Cleanup docs
70 70 docs-clean:
71 71 (cd docs; make clean)
72 72
73 73
74 74 .PHONY: docs-cleanup
75 75 ## Cleanup docs
76 76 docs-cleanup:
77 77 (cd docs; make cleanup)
78 78
79 79
80 80 .PHONY: web-build
81 81 ## Build JS packages static/js
82 82 # https://hub.docker.com/r/huli/grunt
83 83 web-build:
84 84 NODE_PATH=$(NODE_PATH) $(GRUNT)
85 85
86 86 # check required files
87 87 STATIC_CHECK="/robots.txt /502.html \
88 88 /js/scripts.min.js /js/rhodecode-components.js \
89 89 /css/style.css /css/style-polymer.css /css/style-ipython.css"
90 90
91 91 for file in $STATIC_CHECK;
92 92 do
93 93 if [ ! -f rhodecode/public/$file ]; then
94 94 echo "Missing $file expected after web-build"
95 95 exit 1
96 96 fi
97 97 done
98 98
99 99 .PHONY: pip-packages
100 100 ## Show outdated packages
101 101 pip-packages:
102 102 python ${OUTDATED_PACKAGES}
103 103
104 104
105 105 .PHONY: build
106 106 ## Build sdist/egg
107 107 build:
108 108 python -m build
109 109
110 110
111 111 .PHONY: dev-env
112 112 ## make dev-env based on the requirements files and install develop of packages
113 113 dev-env:
114 114 pip install build virtualenv
115 115 pushd ../rhodecode-vcsserver/ && make dev-env && popd
116 116 pip wheel --wheel-dir=/home/rhodecode/.cache/pip/wheels -r requirements.txt -r requirements_rc_tools.txt -r requirements_test.txt -r requirements_debug.txt
117 117 pip install --no-index --find-links=/home/rhodecode/.cache/pip/wheels -r requirements.txt -r requirements_rc_tools.txt -r requirements_test.txt -r requirements_debug.txt
118 118 pip install -e .
119 119
120 120
121 121 .PHONY: dev-srv
122 122 ## run develop server instance, docker exec -it $(docker ps -q --filter 'name=dev-enterprise-ce') /bin/bash
123 123 dev-srv:
124 124 pserve --reload .dev/dev.ini
125 125
126 126 .PHONY: dev-srv-g
127 127 ## run gunicorn multi process workers
128 128 dev-srv-g:
129 gunicorn --workers=4 --paste .dev/dev_g.ini --bind=0.0.0.0:10020 --worker-class=gevent --threads=1 --config=configs/gunicorn_config.py --timeout=120
129 gunicorn --paste .dev/dev.ini --bind=0.0.0.0:10020 --config=.dev/gunicorn_config.py
130 130
131 131
132 132 # Default command on calling make
133 133 .DEFAULT_GOAL := show-help
134 134
135 135 .PHONY: show-help
136 136 show-help:
137 137 @echo "$$(tput bold)Available rules:$$(tput sgr0)"
138 138 @echo
139 139 @sed -n -e "/^## / { \
140 140 h; \
141 141 s/.*//; \
142 142 :doc" \
143 143 -e "H; \
144 144 n; \
145 145 s/^## //; \
146 146 t doc" \
147 147 -e "s/:.*//; \
148 148 G; \
149 149 s/\\n## /---/; \
150 150 s/\\n/ /g; \
151 151 p; \
152 152 }" ${MAKEFILE_LIST} \
153 153 | LC_ALL='C' sort --ignore-case \
154 154 | awk -F '---' \
155 155 -v ncol=$$(tput cols) \
156 156 -v indent=19 \
157 157 -v col_on="$$(tput setaf 6)" \
158 158 -v col_off="$$(tput sgr0)" \
159 159 '{ \
160 160 printf "%s%*s%s ", col_on, -indent, $$1, col_off; \
161 161 n = split($$2, words, " "); \
162 162 line_length = ncol - indent; \
163 163 for (i = 1; i <= n; i++) { \
164 164 line_length -= length(words[i]) + 1; \
165 165 if (line_length <= 0) { \
166 166 line_length = ncol - indent - length(words[i]) - 1; \
167 167 printf "\n%*s ", -indent, " "; \
168 168 } \
169 169 printf "%s ", words[i]; \
170 170 } \
171 171 printf "\n"; \
172 172 }'
@@ -1,506 +1,510 b''
1 1 """
2 2 Gunicorn config extension and hooks. This config file adds some extra settings and memory management.
3 3 Gunicorn configuration should be managed by .ini files entries of RhodeCode or VCSServer
4 4 """
5 5
6 6 import gc
7 7 import os
8 8 import sys
9 9 import math
10 10 import time
11 11 import threading
12 12 import traceback
13 13 import random
14 14 import socket
15 15 import dataclasses
16 16 from gunicorn.glogging import Logger
17 17
18 18
19 19 def get_workers():
20 20 import multiprocessing
21 21 return multiprocessing.cpu_count() * 2 + 1
22 22
23 23
24 24 bind = "127.0.0.1:10020"
25 25
26 26
27 27 # Error logging output for gunicorn (-) is stdout
28 28 errorlog = '-'
29 29
30 30 # Access logging output for gunicorn (-) is stdout
31 31 accesslog = '-'
32 32
33 33
34 34 # SERVER MECHANICS
35 35 # None == system temp dir
36 36 # worker_tmp_dir is recommended to be set to some tmpfs
37 37 worker_tmp_dir = None
38 38 tmp_upload_dir = None
39 39
40 40 # use re-use port logic
41 41 #reuse_port = True
42 42
43 43 # Custom log format
44 44 #access_log_format = (
45 45 # '%(t)s %(p)s INFO [GNCRN] %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"')
46 46
47 47 # loki format for easier parsing in grafana
48 48 access_log_format = (
49 49 'time="%(t)s" pid=%(p)s level="INFO" type="[GNCRN]" ip="%(h)-15s" rqt="%(L)s" response_code="%(s)s" response_bytes="%(b)-6s" uri="%(m)s:%(U)s %(q)s" user=":%(u)s" user_agent="%(a)s"')
50 50
51 51
52 52 # Sets the number of process workers. More workers means more concurrent connections
53 53 # RhodeCode can handle at the same time. Each additional worker also it increases
54 54 # memory usage as each has it's own set of caches.
55 55 # Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
56 56 # than 8-10 unless for huge deployments .e.g 700-1000 users.
57 57 # `instance_id = *` must be set in the [app:main] section below (which is the default)
58 58 # when using more than 1 worker.
59 59 workers = 4
60 60
61 61 # self adjust workers based on CPU count, to use maximum of CPU and not overquota the resources
62 62 # workers = get_workers()
63 63
64 64 # Gunicorn access log level
65 65 loglevel = 'info'
66 66
67 67 # Process name visible in process list
68 68 proc_name = 'rhodecode_enterprise'
69 69
70 70 # Type of worker class, one of `sync`, `gevent`
71 71 # currently `sync` is the only option allowed.
72 72 worker_class = 'gevent'
73 73
74 74 # The maximum number of simultaneous clients. Valid only for gevent
75 75 worker_connections = 10
76 76
77 77 # Max number of requests that worker will handle before being gracefully restarted.
78 78 # Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
79 79 max_requests = 2000
80 80 max_requests_jitter = 30
81 81
82 82 # The maximum number of pending connections.
83 83 # Exceeding this number results in the client getting an error when attempting to connect.
84 84 backlog = 64
85 85
86 86 # Amount of time a worker can spend with handling a request before it
87 87 # gets killed and restarted. By default set to 21600 (6hrs)
88 88 # Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
89 89 timeout = 21600
90 90
91 91 # The maximum size of HTTP request line in bytes.
92 92 # 0 for unlimited
93 93 limit_request_line = 0
94 94
95 95 # Limit the number of HTTP headers fields in a request.
96 96 # By default this value is 100 and can't be larger than 32768.
97 97 limit_request_fields = 32768
98 98
99 99 # Limit the allowed size of an HTTP request header field.
100 100 # Value is a positive number or 0.
101 101 # Setting it to 0 will allow unlimited header field sizes.
102 102 limit_request_field_size = 0
103 103
104 104 # Timeout for graceful workers restart.
105 105 # After receiving a restart signal, workers have this much time to finish
106 106 # serving requests. Workers still alive after the timeout (starting from the
107 107 # receipt of the restart signal) are force killed.
108 108 # Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
109 109 graceful_timeout = 21600
110 110
111 111 # The number of seconds to wait for requests on a Keep-Alive connection.
112 112 # Generally set in the 1-5 seconds range.
113 113 keepalive = 2
114 114
115 115 # Maximum memory usage that each worker can use before it will receive a
116 116 # graceful restart signal 0 = memory monitoring is disabled
117 117 # Examples: 268435456 (256MB), 536870912 (512MB)
118 118 # 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
119 # Dynamic formula 1024 * 1024 * 256 == 256MBs
119 120 memory_max_usage = 0
120 121
121 122 # How often in seconds to check for memory usage for each gunicorn worker
122 123 memory_usage_check_interval = 60
123 124
124 125 # Threshold value for which we don't recycle worker if GarbageCollection
125 # frees up enough resources. Before each restart we try to run GC on worker
126 # frees up enough resources. Before each restart, we try to run GC on worker
126 127 # in case we get enough free memory after that, restart will not happen.
127 128 memory_usage_recovery_threshold = 0.8
128 129
129 130
130 131 @dataclasses.dataclass
131 132 class MemoryCheckConfig:
132 133 max_usage: int
133 134 check_interval: int
134 135 recovery_threshold: float
135 136
136 137
137 138 def _get_process_rss(pid=None):
138 139 try:
139 140 import psutil
140 141 if pid:
141 142 proc = psutil.Process(pid)
142 143 else:
143 144 proc = psutil.Process()
144 145 return proc.memory_info().rss
145 146 except Exception:
146 147 return None
147 148
148 149
149 150 def _get_config(ini_path):
150 151 import configparser
151 152
152 153 try:
153 154 config = configparser.RawConfigParser()
154 155 config.read(ini_path)
155 156 return config
156 157 except Exception:
157 158 return None
158 159
159 160
160 161 def get_memory_usage_params(config=None):
161 162 # memory spec defaults
162 163 _memory_max_usage = memory_max_usage
163 164 _memory_usage_check_interval = memory_usage_check_interval
164 165 _memory_usage_recovery_threshold = memory_usage_recovery_threshold
165 166
166 167 if config:
167 168 ini_path = os.path.abspath(config)
168 169 conf = _get_config(ini_path)
169 170
170 171 section = 'server:main'
171 172 if conf and conf.has_section(section):
172 173
173 174 if conf.has_option(section, 'memory_max_usage'):
174 175 _memory_max_usage = conf.getint(section, 'memory_max_usage')
175 176
176 177 if conf.has_option(section, 'memory_usage_check_interval'):
177 178 _memory_usage_check_interval = conf.getint(section, 'memory_usage_check_interval')
178 179
179 180 if conf.has_option(section, 'memory_usage_recovery_threshold'):
180 181 _memory_usage_recovery_threshold = conf.getfloat(section, 'memory_usage_recovery_threshold')
181 182
182 183 _memory_max_usage = int(os.environ.get('RC_GUNICORN_MEMORY_MAX_USAGE', '')
183 184 or _memory_max_usage)
184 185 _memory_usage_check_interval = int(os.environ.get('RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL', '')
185 186 or _memory_usage_check_interval)
186 187 _memory_usage_recovery_threshold = float(os.environ.get('RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD', '')
187 188 or _memory_usage_recovery_threshold)
188 189
189 190 return MemoryCheckConfig(_memory_max_usage, _memory_usage_check_interval, _memory_usage_recovery_threshold)
190 191
191 192
192 193 def _time_with_offset(check_interval):
193 194 return time.time() - random.randint(0, check_interval/2.0)
194 195
195 196
196 197 def pre_fork(server, worker):
197 198 pass
198 199
199 200
200 201 def post_fork(server, worker):
201 202
202 203 memory_conf = get_memory_usage_params()
203 204 _memory_max_usage = memory_conf.max_usage
204 205 _memory_usage_check_interval = memory_conf.check_interval
205 206 _memory_usage_recovery_threshold = memory_conf.recovery_threshold
206 207
207 208 worker._memory_max_usage = int(os.environ.get('RC_GUNICORN_MEMORY_MAX_USAGE', '')
208 209 or _memory_max_usage)
209 210 worker._memory_usage_check_interval = int(os.environ.get('RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL', '')
210 211 or _memory_usage_check_interval)
211 212 worker._memory_usage_recovery_threshold = float(os.environ.get('RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD', '')
212 213 or _memory_usage_recovery_threshold)
213 214
214 215 # register memory last check time, with some random offset so we don't recycle all
215 216 # at once
216 217 worker._last_memory_check_time = _time_with_offset(_memory_usage_check_interval)
217 218
218 219 if _memory_max_usage:
219 220 server.log.info("pid=[%-10s] WORKER spawned with max memory set at %s", worker.pid,
220 221 _format_data_size(_memory_max_usage))
221 222 else:
222 223 server.log.info("pid=[%-10s] WORKER spawned", worker.pid)
223 224
224 225
225 226 def pre_exec(server):
226 227 server.log.info("Forked child, re-executing.")
227 228
228 229
229 230 def on_starting(server):
230 231 server_lbl = '{} {}'.format(server.proc_name, server.address)
231 232 server.log.info("Server %s is starting.", server_lbl)
233 server.log.info('Config:')
234 server.log.info(f"\n{server.cfg}")
235 server.log.info(get_memory_usage_params())
232 236
233 237
234 238 def when_ready(server):
235 239 server.log.info("Server %s is ready. Spawning workers", server)
236 240
237 241
238 242 def on_reload(server):
239 243 pass
240 244
241 245
242 246 def _format_data_size(size, unit="B", precision=1, binary=True):
243 247 """Format a number using SI units (kilo, mega, etc.).
244 248
245 249 ``size``: The number as a float or int.
246 250
247 251 ``unit``: The unit name in plural form. Examples: "bytes", "B".
248 252
249 253 ``precision``: How many digits to the right of the decimal point. Default
250 254 is 1. 0 suppresses the decimal point.
251 255
252 256 ``binary``: If false, use base-10 decimal prefixes (kilo = K = 1000).
253 257 If true, use base-2 binary prefixes (kibi = Ki = 1024).
254 258
255 259 ``full_name``: If false (default), use the prefix abbreviation ("k" or
256 260 "Ki"). If true, use the full prefix ("kilo" or "kibi"). If false,
257 261 use abbreviation ("k" or "Ki").
258 262
259 263 """
260 264
261 265 if not binary:
262 266 base = 1000
263 267 multiples = ('', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
264 268 else:
265 269 base = 1024
266 270 multiples = ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi')
267 271
268 272 sign = ""
269 273 if size > 0:
270 274 m = int(math.log(size, base))
271 275 elif size < 0:
272 276 sign = "-"
273 277 size = -size
274 278 m = int(math.log(size, base))
275 279 else:
276 280 m = 0
277 281 if m > 8:
278 282 m = 8
279 283
280 284 if m == 0:
281 285 precision = '%.0f'
282 286 else:
283 287 precision = '%%.%df' % precision
284 288
285 289 size = precision % (size / math.pow(base, m))
286 290
287 291 return '%s%s %s%s' % (sign, size.strip(), multiples[m], unit)
288 292
289 293
290 294 def _check_memory_usage(worker):
291 295 _memory_max_usage = worker._memory_max_usage
292 296 if not _memory_max_usage:
293 297 return
294 298
295 299 _memory_usage_check_interval = worker._memory_usage_check_interval
296 300 _memory_usage_recovery_threshold = memory_max_usage * worker._memory_usage_recovery_threshold
297 301
298 302 elapsed = time.time() - worker._last_memory_check_time
299 303 if elapsed > _memory_usage_check_interval:
300 304 mem_usage = _get_process_rss()
301 305 if mem_usage and mem_usage > _memory_max_usage:
302 306 worker.log.info(
303 307 "memory usage %s > %s, forcing gc",
304 308 _format_data_size(mem_usage), _format_data_size(_memory_max_usage))
305 309 # Try to clean it up by forcing a full collection.
306 310 gc.collect()
307 311 mem_usage = _get_process_rss()
308 312 if mem_usage > _memory_usage_recovery_threshold:
309 313 # Didn't clean up enough, we'll have to terminate.
310 314 worker.log.warning(
311 315 "memory usage %s > %s after gc, quitting",
312 316 _format_data_size(mem_usage), _format_data_size(_memory_max_usage))
313 317 # This will cause worker to auto-restart itself
314 318 worker.alive = False
315 319 worker._last_memory_check_time = time.time()
316 320
317 321
318 322 def worker_int(worker):
319 323 worker.log.info("pid=[%-10s] worker received INT or QUIT signal", worker.pid)
320 324
321 325 # get traceback info, on worker crash
322 326 def get_thread_id(t_id):
323 327 id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
324 328 return id2name.get(t_id, "unknown_thread_id")
325 329
326 330 code = []
327 for thread_id, stack in sys._current_frames().items():
331 for thread_id, stack in sys._current_frames().items(): # noqa
328 332 code.append(
329 333 "\n# Thread: %s(%d)" % (get_thread_id(thread_id), thread_id))
330 334 for fname, lineno, name, line in traceback.extract_stack(stack):
331 335 code.append('File: "%s", line %d, in %s' % (fname, lineno, name))
332 336 if line:
333 337 code.append(" %s" % (line.strip()))
334 338 worker.log.debug("\n".join(code))
335 339
336 340
337 341 def worker_abort(worker):
338 342 worker.log.info("pid=[%-10s] worker received SIGABRT signal", worker.pid)
339 343
340 344
341 345 def worker_exit(server, worker):
342 346 worker.log.info("pid=[%-10s] worker exit", worker.pid)
343 347
344 348
345 349 def child_exit(server, worker):
346 350 worker.log.info("pid=[%-10s] worker child exit", worker.pid)
347 351
348 352
349 353 def pre_request(worker, req):
350 354 worker.start_time = time.time()
351 355 worker.log.debug(
352 356 "GNCRN PRE WORKER [cnt:%s]: %s %s", worker.nr, req.method, req.path)
353 357
354 358
355 359 def post_request(worker, req, environ, resp):
356 360 total_time = time.time() - worker.start_time
357 361 # Gunicorn sometimes has problems with reading the status_code
358 362 status_code = getattr(resp, 'status_code', '')
359 363 worker.log.debug(
360 364 "GNCRN POST WORKER [cnt:%s]: %s %s resp: %s, Load Time: %.4fs",
361 365 worker.nr, req.method, req.path, status_code, total_time)
362 366 _check_memory_usage(worker)
363 367
364 368
365 369 def _filter_proxy(ip):
366 370 """
367 371 Passed in IP addresses in HEADERS can be in a special format of multiple
368 372 ips. Those comma separated IPs are passed from various proxies in the
369 373 chain of request processing. The left-most being the original client.
370 374 We only care about the first IP which came from the org. client.
371 375
372 376 :param ip: ip string from headers
373 377 """
374 378 if ',' in ip:
375 379 _ips = ip.split(',')
376 380 _first_ip = _ips[0].strip()
377 381 return _first_ip
378 382 return ip
379 383
380 384
381 385 def _filter_port(ip):
382 386 """
383 387 Removes a port from ip, there are 4 main cases to handle here.
384 388 - ipv4 eg. 127.0.0.1
385 389 - ipv6 eg. ::1
386 390 - ipv4+port eg. 127.0.0.1:8080
387 391 - ipv6+port eg. [::1]:8080
388 392
389 393 :param ip:
390 394 """
391 395 def is_ipv6(ip_addr):
392 396 if hasattr(socket, 'inet_pton'):
393 397 try:
394 398 socket.inet_pton(socket.AF_INET6, ip_addr)
395 399 except socket.error:
396 400 return False
397 401 else:
398 402 return False
399 403 return True
400 404
401 405 if ':' not in ip: # must be ipv4 pure ip
402 406 return ip
403 407
404 408 if '[' in ip and ']' in ip: # ipv6 with port
405 409 return ip.split(']')[0][1:].lower()
406 410
407 411 # must be ipv6 or ipv4 with port
408 412 if is_ipv6(ip):
409 413 return ip
410 414 else:
411 415 ip, _port = ip.split(':')[:2] # means ipv4+port
412 416 return ip
413 417
414 418
415 419 def get_ip_addr(environ):
416 420 proxy_key = 'HTTP_X_REAL_IP'
417 421 proxy_key2 = 'HTTP_X_FORWARDED_FOR'
418 422 def_key = 'REMOTE_ADDR'
419 423
420 424 def _filters(x):
421 425 return _filter_port(_filter_proxy(x))
422 426
423 427 ip = environ.get(proxy_key)
424 428 if ip:
425 429 return _filters(ip)
426 430
427 431 ip = environ.get(proxy_key2)
428 432 if ip:
429 433 return _filters(ip)
430 434
431 435 ip = environ.get(def_key, '0.0.0.0')
432 436 return _filters(ip)
433 437
434 438
435 439 class RhodeCodeLogger(Logger):
436 440 """
437 441 Custom Logger that allows some customization that gunicorn doesn't allow
438 442 """
439 443
440 444 datefmt = r"%Y-%m-%d %H:%M:%S"
441 445
442 446 def __init__(self, cfg):
443 447 Logger.__init__(self, cfg)
444 448
445 449 def now(self):
446 450 """ return date in RhodeCode Log format """
447 451 now = time.time()
448 452 msecs = int((now - int(now)) * 1000)
449 453 return time.strftime(self.datefmt, time.localtime(now)) + '.{0:03d}'.format(msecs)
450 454
451 455 def atoms(self, resp, req, environ, request_time):
452 456 """ Gets atoms for log formatting.
453 457 """
454 458 status = resp.status
455 459 if isinstance(status, str):
456 460 status = status.split(None, 1)[0]
457 461 atoms = {
458 462 'h': get_ip_addr(environ),
459 463 'l': '-',
460 464 'u': self._get_user(environ) or '-',
461 465 't': self.now(),
462 466 'r': "%s %s %s" % (environ['REQUEST_METHOD'],
463 467 environ['RAW_URI'],
464 468 environ["SERVER_PROTOCOL"]),
465 469 's': status,
466 470 'm': environ.get('REQUEST_METHOD'),
467 471 'U': environ.get('PATH_INFO'),
468 472 'q': environ.get('QUERY_STRING'),
469 473 'H': environ.get('SERVER_PROTOCOL'),
470 474 'b': getattr(resp, 'sent', None) is not None and str(resp.sent) or '-',
471 475 'B': getattr(resp, 'sent', None),
472 476 'f': environ.get('HTTP_REFERER', '-'),
473 477 'a': environ.get('HTTP_USER_AGENT', '-'),
474 478 'T': request_time.seconds,
475 479 'D': (request_time.seconds * 1000000) + request_time.microseconds,
476 480 'M': (request_time.seconds * 1000) + int(request_time.microseconds/1000),
477 481 'L': "%d.%06d" % (request_time.seconds, request_time.microseconds),
478 482 'p': "<%s>" % os.getpid()
479 483 }
480 484
481 485 # add request headers
482 486 if hasattr(req, 'headers'):
483 487 req_headers = req.headers
484 488 else:
485 489 req_headers = req
486 490
487 491 if hasattr(req_headers, "items"):
488 492 req_headers = req_headers.items()
489 493
490 494 atoms.update({"{%s}i" % k.lower(): v for k, v in req_headers})
491 495
492 496 resp_headers = resp.headers
493 497 if hasattr(resp_headers, "items"):
494 498 resp_headers = resp_headers.items()
495 499
496 500 # add response headers
497 501 atoms.update({"{%s}o" % k.lower(): v for k, v in resp_headers})
498 502
499 503 # add environ variables
500 504 environ_variables = environ.items()
501 505 atoms.update({"{%s}e" % k.lower(): v for k, v in environ_variables})
502 506
503 507 return atoms
504 508
505 509
506 510 logger_class = RhodeCodeLogger
@@ -1,814 +1,813 b''
1
2 1
3 2 ; #########################################
4 3 ; RHODECODE COMMUNITY EDITION CONFIGURATION
5 4 ; #########################################
6 5
7 6 [DEFAULT]
8 7 ; Debug flag sets all loggers to debug, and enables request tracking
9 8 debug = false
10 9
11 10 ; ########################################################################
12 11 ; EMAIL CONFIGURATION
13 12 ; These settings will be used by the RhodeCode mailing system
14 13 ; ########################################################################
15 14
16 15 ; prefix all emails subjects with given prefix, helps filtering out emails
17 16 #email_prefix = [RhodeCode]
18 17
19 18 ; email FROM address all mails will be sent
20 19 #app_email_from = rhodecode-noreply@localhost
21 20
22 21 #smtp_server = mail.server.com
23 22 #smtp_username =
24 23 #smtp_password =
25 24 #smtp_port =
26 25 #smtp_use_tls = false
27 26 #smtp_use_ssl = true
28 27
29 28 [server:main]
30 29 ; COMMON HOST/IP CONFIG, This applies mostly to develop setup,
31 30 ; Host port for gunicorn are controlled by gunicorn_conf.py
32 31 host = 127.0.0.1
33 32 port = 10020
34 33
35 34
36 35 ; ###########################
37 36 ; GUNICORN APPLICATION SERVER
38 37 ; ###########################
39 38
40 39 ; run with gunicorn --paste rhodecode.ini --config gunicorn_conf.py
41 40
42 41 ; Module to use, this setting shouldn't be changed
43 42 use = egg:gunicorn#main
44 43
45 44 ; Prefix middleware for RhodeCode.
46 45 ; recommended when using proxy setup.
47 46 ; allows to set RhodeCode under a prefix in server.
48 47 ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well.
49 48 ; And set your prefix like: `prefix = /custom_prefix`
50 49 ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need
51 50 ; to make your cookies only work on prefix url
52 51 [filter:proxy-prefix]
53 52 use = egg:PasteDeploy#prefix
54 53 prefix = /
55 54
56 55 [app:main]
57 56 ; The %(here)s variable will be replaced with the absolute path of parent directory
58 57 ; of this file
59 58 ; Each option in the app:main can be override by an environmental variable
60 59 ;
61 60 ;To override an option:
62 61 ;
63 62 ;RC_<KeyName>
64 63 ;Everything should be uppercase, . and - should be replaced by _.
65 64 ;For example, if you have these configuration settings:
66 65 ;rc_cache.repo_object.backend = foo
67 66 ;can be overridden by
68 67 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
69 68
70 69 use = egg:rhodecode-enterprise-ce
71 70
72 71 ; enable proxy prefix middleware, defined above
73 72 #filter-with = proxy-prefix
74 73
75 74 ; encryption key used to encrypt social plugin tokens,
76 75 ; remote_urls with credentials etc, if not set it defaults to
77 76 ; `beaker.session.secret`
78 77 #rhodecode.encrypted_values.secret =
79 78
80 79 ; decryption strict mode (enabled by default). It controls if decryption raises
81 80 ; `SignatureVerificationError` in case of wrong key, or damaged encryption data.
82 81 #rhodecode.encrypted_values.strict = false
83 82
84 83 ; Pick algorithm for encryption. Either fernet (more secure) or aes (default)
85 84 ; fernet is safer, and we strongly recommend switching to it.
86 85 ; Due to backward compatibility aes is used as default.
87 86 #rhodecode.encrypted_values.algorithm = fernet
88 87
89 88 ; Return gzipped responses from RhodeCode (static files/application)
90 89 gzip_responses = false
91 90
92 91 ; Auto-generate javascript routes file on startup
93 92 generate_js_files = false
94 93
95 94 ; System global default language.
96 95 ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh
97 96 lang = en
98 97
99 98 ; Perform a full repository scan and import on each server start.
100 99 ; Settings this to true could lead to very long startup time.
101 100 startup.import_repos = false
102 101
103 102 ; URL at which the application is running. This is used for Bootstrapping
104 103 ; requests in context when no web request is available. Used in ishell, or
105 104 ; SSH calls. Set this for events to receive proper url for SSH calls.
106 105 app.base_url = http://rhodecode.local
107 106
108 107 ; Unique application ID. Should be a random unique string for security.
109 108 app_instance_uuid = rc-production
110 109
111 110 ; Cut off limit for large diffs (size in bytes). If overall diff size on
112 111 ; commit, or pull request exceeds this limit this diff will be displayed
113 112 ; partially. E.g 512000 == 512Kb
114 113 cut_off_limit_diff = 512000
115 114
116 115 ; Cut off limit for large files inside diffs (size in bytes). Each individual
117 116 ; file inside diff which exceeds this limit will be displayed partially.
118 117 ; E.g 128000 == 128Kb
119 118 cut_off_limit_file = 128000
120 119
121 120 ; Use cached version of vcs repositories everywhere. Recommended to be `true`
122 121 vcs_full_cache = true
123 122
124 123 ; Force https in RhodeCode, fixes https redirects, assumes it's always https.
125 124 ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache
126 125 force_https = false
127 126
128 127 ; use Strict-Transport-Security headers
129 128 use_htsts = false
130 129
131 130 ; Set to true if your repos are exposed using the dumb protocol
132 131 git_update_server_info = false
133 132
134 133 ; RSS/ATOM feed options
135 134 rss_cut_off_limit = 256000
136 135 rss_items_per_page = 10
137 136 rss_include_diff = false
138 137
139 138 ; gist URL alias, used to create nicer urls for gist. This should be an
140 139 ; url that does rewrites to _admin/gists/{gistid}.
141 140 ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal
142 141 ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid}
143 142 gist_alias_url =
144 143
145 144 ; List of views (using glob pattern syntax) that AUTH TOKENS could be
146 145 ; used for access.
147 146 ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it
148 147 ; came from the the logged in user who own this authentication token.
149 148 ; Additionally @TOKEN syntax can be used to bound the view to specific
150 149 ; authentication token. Such view would be only accessible when used together
151 150 ; with this authentication token
152 151 ; list of all views can be found under `/_admin/permissions/auth_token_access`
153 152 ; The list should be "," separated and on a single line.
154 153 ; Most common views to enable:
155 154
156 155 # RepoCommitsView:repo_commit_download
157 156 # RepoCommitsView:repo_commit_patch
158 157 # RepoCommitsView:repo_commit_raw
159 158 # RepoCommitsView:repo_commit_raw@TOKEN
160 159 # RepoFilesView:repo_files_diff
161 160 # RepoFilesView:repo_archivefile
162 161 # RepoFilesView:repo_file_raw
163 162 # GistView:*
164 163 api_access_controllers_whitelist =
165 164
166 165 ; Default encoding used to convert from and to unicode
167 166 ; can be also a comma separated list of encoding in case of mixed encodings
168 167 default_encoding = UTF-8
169 168
170 169 ; instance-id prefix
171 170 ; a prefix key for this instance used for cache invalidation when running
172 171 ; multiple instances of RhodeCode, make sure it's globally unique for
173 172 ; all running RhodeCode instances. Leave empty if you don't use it
174 173 instance_id =
175 174
176 175 ; Fallback authentication plugin. Set this to a plugin ID to force the usage
177 176 ; of an authentication plugin also if it is disabled by it's settings.
178 177 ; This could be useful if you are unable to log in to the system due to broken
179 178 ; authentication settings. Then you can enable e.g. the internal RhodeCode auth
180 179 ; module to log in again and fix the settings.
181 180 ; Available builtin plugin IDs (hash is part of the ID):
182 181 ; egg:rhodecode-enterprise-ce#rhodecode
183 182 ; egg:rhodecode-enterprise-ce#pam
184 183 ; egg:rhodecode-enterprise-ce#ldap
185 184 ; egg:rhodecode-enterprise-ce#jasig_cas
186 185 ; egg:rhodecode-enterprise-ce#headers
187 186 ; egg:rhodecode-enterprise-ce#crowd
188 187
189 188 #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode
190 189
191 190 ; Flag to control loading of legacy plugins in py:/path format
192 191 auth_plugin.import_legacy_plugins = true
193 192
194 193 ; alternative return HTTP header for failed authentication. Default HTTP
195 194 ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with
196 195 ; handling that causing a series of failed authentication calls.
197 196 ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code
198 197 ; This will be served instead of default 401 on bad authentication
199 198 auth_ret_code =
200 199
201 200 ; use special detection method when serving auth_ret_code, instead of serving
202 201 ; ret_code directly, use 401 initially (Which triggers credentials prompt)
203 202 ; and then serve auth_ret_code to clients
204 203 auth_ret_code_detection = false
205 204
206 205 ; locking return code. When repository is locked return this HTTP code. 2XX
207 206 ; codes don't break the transactions while 4XX codes do
208 207 lock_ret_code = 423
209 208
210 209 ; allows to change the repository location in settings page
211 210 allow_repo_location_change = true
212 211
213 212 ; allows to setup custom hooks in settings page
214 213 allow_custom_hooks_settings = true
215 214
216 215 ; Generated license token required for EE edition license.
217 216 ; New generated token value can be found in Admin > settings > license page.
218 217 license_token =
219 218
220 219 ; This flag hides sensitive information on the license page such as token, and license data
221 220 license.hide_license_info = false
222 221
223 222 ; supervisor connection uri, for managing supervisor and logs.
224 223 supervisor.uri =
225 224
226 225 ; supervisord group name/id we only want this RC instance to handle
227 226 supervisor.group_id = prod
228 227
229 228 ; Display extended labs settings
230 229 labs_settings_active = true
231 230
232 231 ; Custom exception store path, defaults to TMPDIR
233 232 ; This is used to store exception from RhodeCode in shared directory
234 233 #exception_tracker.store_path =
235 234
236 235 ; Send email with exception details when it happens
237 236 #exception_tracker.send_email = false
238 237
239 238 ; Comma separated list of recipients for exception emails,
240 239 ; e.g admin@rhodecode.com,devops@rhodecode.com
241 240 ; Can be left empty, then emails will be sent to ALL super-admins
242 241 #exception_tracker.send_email_recipients =
243 242
244 243 ; optional prefix to Add to email Subject
245 244 #exception_tracker.email_prefix = [RHODECODE ERROR]
246 245
247 246 ; File store configuration. This is used to store and serve uploaded files
248 247 file_store.enabled = true
249 248
250 249 ; Storage backend, available options are: local
251 250 file_store.backend = local
252 251
253 252 ; path to store the uploaded binaries
254 253 file_store.storage_path = %(here)s/data/file_store
255 254
256 255 ; Uncomment and set this path to control settings for archive download cache.
257 256 ; Generated repo archives will be cached at this location
258 257 ; and served from the cache during subsequent requests for the same archive of
259 258 ; the repository. This path is important to be shared across filesystems and with
260 259 ; RhodeCode and vcsserver
261 260
262 261 ; Default is $cache_dir/archive_cache if not set
263 262 archive_cache.store_dir = %(here)s/data/archive_cache
264 263
265 264 ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb
266 archive_cache.cache_size_gb = 10
265 archive_cache.cache_size_gb = 40
267 266
268 267 ; By default cache uses sharding technique, this specifies how many shards are there
269 archive_cache.cache_shards = 10
268 archive_cache.cache_shards = 4
270 269
271 270 ; #############
272 271 ; CELERY CONFIG
273 272 ; #############
274 273
275 274 ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini
276 275
277 276 use_celery = false
278 277
279 278 ; path to store schedule database
280 279 #celerybeat-schedule.path =
281 280
282 281 ; connection url to the message broker (default redis)
283 282 celery.broker_url = redis://localhost:6379/8
284 283
285 284 ; rabbitmq example
286 285 #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost
287 286
288 287 ; maximum tasks to execute before worker restart
289 288 celery.max_tasks_per_child = 20
290 289
291 290 ; tasks will never be sent to the queue, but executed locally instead.
292 291 celery.task_always_eager = false
293 292
294 293 ; #############
295 294 ; DOGPILE CACHE
296 295 ; #############
297 296
298 297 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
299 298 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
300 299 cache_dir = %(here)s/data
301 300
302 301 ; *********************************************
303 302 ; `sql_cache_short` cache for heavy SQL queries
304 303 ; Only supported backend is `memory_lru`
305 304 ; *********************************************
306 305 rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru
307 306 rc_cache.sql_cache_short.expiration_time = 30
308 307
309 308
310 309 ; *****************************************************
311 310 ; `cache_repo_longterm` cache for repo object instances
312 311 ; Only supported backend is `memory_lru`
313 312 ; *****************************************************
314 313 rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru
315 314 ; by default we use 30 Days, cache is still invalidated on push
316 315 rc_cache.cache_repo_longterm.expiration_time = 2592000
317 316 ; max items in LRU cache, set to smaller number to save memory, and expire last used caches
318 317 rc_cache.cache_repo_longterm.max_size = 10000
319 318
320 319
321 320 ; *********************************************
322 321 ; `cache_general` cache for general purpose use
323 322 ; for simplicity use rc.file_namespace backend,
324 323 ; for performance and scale use rc.redis
325 324 ; *********************************************
326 325 rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace
327 326 rc_cache.cache_general.expiration_time = 43200
328 327 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
329 328 #rc_cache.cache_general.arguments.filename = /tmp/cache_general_db
330 329
331 330 ; alternative `cache_general` redis backend with distributed lock
332 331 #rc_cache.cache_general.backend = dogpile.cache.rc.redis
333 332 #rc_cache.cache_general.expiration_time = 300
334 333
335 334 ; redis_expiration_time needs to be greater then expiration_time
336 335 #rc_cache.cache_general.arguments.redis_expiration_time = 7200
337 336
338 337 #rc_cache.cache_general.arguments.host = localhost
339 338 #rc_cache.cache_general.arguments.port = 6379
340 339 #rc_cache.cache_general.arguments.db = 0
341 340 #rc_cache.cache_general.arguments.socket_timeout = 30
342 341 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
343 342 #rc_cache.cache_general.arguments.distributed_lock = true
344 343
345 344 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
346 345 #rc_cache.cache_general.arguments.lock_auto_renewal = true
347 346
348 347 ; *************************************************
349 348 ; `cache_perms` cache for permission tree, auth TTL
350 349 ; for simplicity use rc.file_namespace backend,
351 350 ; for performance and scale use rc.redis
352 351 ; *************************************************
353 352 rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace
354 353 rc_cache.cache_perms.expiration_time = 3600
355 354 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
356 355 #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms_db
357 356
358 357 ; alternative `cache_perms` redis backend with distributed lock
359 358 #rc_cache.cache_perms.backend = dogpile.cache.rc.redis
360 359 #rc_cache.cache_perms.expiration_time = 300
361 360
362 361 ; redis_expiration_time needs to be greater then expiration_time
363 362 #rc_cache.cache_perms.arguments.redis_expiration_time = 7200
364 363
365 364 #rc_cache.cache_perms.arguments.host = localhost
366 365 #rc_cache.cache_perms.arguments.port = 6379
367 366 #rc_cache.cache_perms.arguments.db = 0
368 367 #rc_cache.cache_perms.arguments.socket_timeout = 30
369 368 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
370 369 #rc_cache.cache_perms.arguments.distributed_lock = true
371 370
372 371 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
373 372 #rc_cache.cache_perms.arguments.lock_auto_renewal = true
374 373
375 374 ; ***************************************************
376 375 ; `cache_repo` cache for file tree, Readme, RSS FEEDS
377 376 ; for simplicity use rc.file_namespace backend,
378 377 ; for performance and scale use rc.redis
379 378 ; ***************************************************
380 379 rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace
381 380 rc_cache.cache_repo.expiration_time = 2592000
382 381 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
383 382 #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo_db
384 383
385 384 ; alternative `cache_repo` redis backend with distributed lock
386 385 #rc_cache.cache_repo.backend = dogpile.cache.rc.redis
387 386 #rc_cache.cache_repo.expiration_time = 2592000
388 387
389 388 ; redis_expiration_time needs to be greater then expiration_time
390 389 #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400
391 390
392 391 #rc_cache.cache_repo.arguments.host = localhost
393 392 #rc_cache.cache_repo.arguments.port = 6379
394 393 #rc_cache.cache_repo.arguments.db = 1
395 394 #rc_cache.cache_repo.arguments.socket_timeout = 30
396 395 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
397 396 #rc_cache.cache_repo.arguments.distributed_lock = true
398 397
399 398 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
400 399 #rc_cache.cache_repo.arguments.lock_auto_renewal = true
401 400
402 401 ; ##############
403 402 ; BEAKER SESSION
404 403 ; ##############
405 404
406 405 ; beaker.session.type is type of storage options for the logged users sessions. Current allowed
407 406 ; types are file, ext:redis, ext:database, ext:memcached, and memory (default if not specified).
408 407 ; Fastest ones are Redis and ext:database
409 408 beaker.session.type = file
410 409 beaker.session.data_dir = %(here)s/data/sessions
411 410
412 411 ; Redis based sessions
413 412 #beaker.session.type = ext:redis
414 413 #beaker.session.url = redis://127.0.0.1:6379/2
415 414
416 415 ; DB based session, fast, and allows easy management over logged in users
417 416 #beaker.session.type = ext:database
418 417 #beaker.session.table_name = db_session
419 418 #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode
420 419 #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode
421 420 #beaker.session.sa.pool_recycle = 3600
422 421 #beaker.session.sa.echo = false
423 422
424 423 beaker.session.key = rhodecode
425 424 beaker.session.secret = production-rc-uytcxaz
426 425 beaker.session.lock_dir = %(here)s/data/sessions/lock
427 426
428 427 ; Secure encrypted cookie. Requires AES and AES python libraries
429 428 ; you must disable beaker.session.secret to use this
430 429 #beaker.session.encrypt_key = key_for_encryption
431 430 #beaker.session.validate_key = validation_key
432 431
433 432 ; Sets session as invalid (also logging out user) if it haven not been
434 433 ; accessed for given amount of time in seconds
435 434 beaker.session.timeout = 2592000
436 435 beaker.session.httponly = true
437 436
438 437 ; Path to use for the cookie. Set to prefix if you use prefix middleware
439 438 #beaker.session.cookie_path = /custom_prefix
440 439
441 440 ; Set https secure cookie
442 441 beaker.session.secure = false
443 442
444 443 ; default cookie expiration time in seconds, set to `true` to set expire
445 444 ; at browser close
446 445 #beaker.session.cookie_expires = 3600
447 446
448 447 ; #############################
449 448 ; SEARCH INDEXING CONFIGURATION
450 449 ; #############################
451 450
452 451 ; Full text search indexer is available in rhodecode-tools under
453 452 ; `rhodecode-tools index` command
454 453
455 454 ; WHOOSH Backend, doesn't require additional services to run
456 455 ; it works good with few dozen repos
457 456 search.module = rhodecode.lib.index.whoosh
458 457 search.location = %(here)s/data/index
459 458
460 459 ; ####################
461 460 ; CHANNELSTREAM CONFIG
462 461 ; ####################
463 462
464 463 ; channelstream enables persistent connections and live notification
465 464 ; in the system. It's also used by the chat system
466 465
467 466 channelstream.enabled = false
468 467
469 468 ; server address for channelstream server on the backend
470 469 channelstream.server = 127.0.0.1:9800
471 470
472 471 ; location of the channelstream server from outside world
473 472 ; use ws:// for http or wss:// for https. This address needs to be handled
474 473 ; by external HTTP server such as Nginx or Apache
475 474 ; see Nginx/Apache configuration examples in our docs
476 475 channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream
477 476 channelstream.secret = secret
478 477 channelstream.history.location = %(here)s/channelstream_history
479 478
480 479 ; Internal application path that Javascript uses to connect into.
481 480 ; If you use proxy-prefix the prefix should be added before /_channelstream
482 481 channelstream.proxy_path = /_channelstream
483 482
484 483
485 484 ; ##############################
486 485 ; MAIN RHODECODE DATABASE CONFIG
487 486 ; ##############################
488 487
489 488 #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
490 489 #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
491 490 #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8
492 491 ; pymysql is an alternative driver for MySQL, use in case of problems with default one
493 492 #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode
494 493
495 494 sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
496 495
497 496 ; see sqlalchemy docs for other advanced settings
498 497 ; print the sql statements to output
499 498 sqlalchemy.db1.echo = false
500 499
501 500 ; recycle the connections after this amount of seconds
502 501 sqlalchemy.db1.pool_recycle = 3600
503 502
504 503 ; the number of connections to keep open inside the connection pool.
505 504 ; 0 indicates no limit
506 505 ; the general calculus with gevent is:
507 506 ; if your system allows 500 concurrent greenlets (max_connections) that all do database access,
508 507 ; then increase pool size + max overflow so that they add up to 500.
509 508 #sqlalchemy.db1.pool_size = 5
510 509
511 510 ; The number of connections to allow in connection pool "overflow", that is
512 511 ; connections that can be opened above and beyond the pool_size setting,
513 512 ; which defaults to five.
514 513 #sqlalchemy.db1.max_overflow = 10
515 514
516 515 ; Connection check ping, used to detect broken database connections
517 516 ; could be enabled to better handle cases if MySQL has gone away errors
518 517 #sqlalchemy.db1.ping_connection = true
519 518
520 519 ; ##########
521 520 ; VCS CONFIG
522 521 ; ##########
523 522 vcs.server.enable = true
524 523 vcs.server = localhost:9900
525 524
526 525 ; Web server connectivity protocol, responsible for web based VCS operations
527 526 ; Available protocols are:
528 527 ; `http` - use http-rpc backend (default)
529 528 vcs.server.protocol = http
530 529
531 530 ; Push/Pull operations protocol, available options are:
532 531 ; `http` - use http-rpc backend (default)
533 532 vcs.scm_app_implementation = http
534 533
535 534 ; Push/Pull operations hooks protocol, available options are:
536 535 ; `http` - use http-rpc backend (default)
537 536 vcs.hooks.protocol = http
538 537
539 538 ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be
540 539 ; accessible via network.
541 540 ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker)
542 541 vcs.hooks.host = *
543 542
544 543 ; Start VCSServer with this instance as a subprocess, useful for development
545 544 vcs.start_server = false
546 545
547 546 ; List of enabled VCS backends, available options are:
548 547 ; `hg` - mercurial
549 548 ; `git` - git
550 549 ; `svn` - subversion
551 550 vcs.backends = hg, git, svn
552 551
553 552 ; Wait this number of seconds before killing connection to the vcsserver
554 553 vcs.connection_timeout = 3600
555 554
556 555 ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out.
557 556 ; Set a numeric version for your current SVN e.g 1.8, or 1.12
558 557 ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible
559 558 #vcs.svn.compatible_version = 1.8
560 559
561 560 ; Cache flag to cache vcsserver remote calls locally
562 561 ; It uses cache_region `cache_repo`
563 562 vcs.methods.cache = true
564 563
565 564 ; ####################################################
566 565 ; Subversion proxy support (mod_dav_svn)
567 566 ; Maps RhodeCode repo groups into SVN paths for Apache
568 567 ; ####################################################
569 568
570 569 ; Enable or disable the config file generation.
571 570 svn.proxy.generate_config = false
572 571
573 572 ; Generate config file with `SVNListParentPath` set to `On`.
574 573 svn.proxy.list_parent_path = true
575 574
576 575 ; Set location and file name of generated config file.
577 576 svn.proxy.config_file_path = %(here)s/mod_dav_svn.conf
578 577
579 578 ; alternative mod_dav config template. This needs to be a valid mako template
580 579 ; Example template can be found in the source code:
581 580 ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako
582 581 #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako
583 582
584 583 ; Used as a prefix to the `Location` block in the generated config file.
585 584 ; In most cases it should be set to `/`.
586 585 svn.proxy.location_root = /
587 586
588 587 ; Command to reload the mod dav svn configuration on change.
589 588 ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh
590 589 ; Make sure user who runs RhodeCode process is allowed to reload Apache
591 590 #svn.proxy.reload_cmd = /etc/init.d/apache2 reload
592 591
593 592 ; If the timeout expires before the reload command finishes, the command will
594 593 ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds.
595 594 #svn.proxy.reload_timeout = 10
596 595
597 596 ; ####################
598 597 ; SSH Support Settings
599 598 ; ####################
600 599
601 600 ; Defines if a custom authorized_keys file should be created and written on
602 601 ; any change user ssh keys. Setting this to false also disables possibility
603 602 ; of adding SSH keys by users from web interface. Super admins can still
604 603 ; manage SSH Keys.
605 604 ssh.generate_authorized_keyfile = false
606 605
607 606 ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding`
608 607 # ssh.authorized_keys_ssh_opts =
609 608
610 609 ; Path to the authorized_keys file where the generate entries are placed.
611 610 ; It is possible to have multiple key files specified in `sshd_config` e.g.
612 611 ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode
613 612 ssh.authorized_keys_file_path = ~/.ssh/authorized_keys_rhodecode
614 613
615 614 ; Command to execute the SSH wrapper. The binary is available in the
616 615 ; RhodeCode installation directory.
617 616 ; e.g ~/.rccontrol/community-1/profile/bin/rc-ssh-wrapper
618 617 ssh.wrapper_cmd = ~/.rccontrol/community-1/rc-ssh-wrapper
619 618
620 619 ; Allow shell when executing the ssh-wrapper command
621 620 ssh.wrapper_cmd_allow_shell = false
622 621
623 622 ; Enables logging, and detailed output send back to the client during SSH
624 623 ; operations. Useful for debugging, shouldn't be used in production.
625 624 ssh.enable_debug_logging = false
626 625
627 626 ; Paths to binary executable, by default they are the names, but we can
628 627 ; override them if we want to use a custom one
629 628 ssh.executable.hg = ~/.rccontrol/vcsserver-1/profile/bin/hg
630 629 ssh.executable.git = ~/.rccontrol/vcsserver-1/profile/bin/git
631 630 ssh.executable.svn = ~/.rccontrol/vcsserver-1/profile/bin/svnserve
632 631
633 632 ; Enables SSH key generator web interface. Disabling this still allows users
634 633 ; to add their own keys.
635 634 ssh.enable_ui_key_generator = true
636 635
637 636
638 637 ; #################
639 638 ; APPENLIGHT CONFIG
640 639 ; #################
641 640
642 641 ; Appenlight is tailored to work with RhodeCode, see
643 642 ; http://appenlight.rhodecode.com for details how to obtain an account
644 643
645 644 ; Appenlight integration enabled
646 645 #appenlight = false
647 646
648 647 #appenlight.server_url = https://api.appenlight.com
649 648 #appenlight.api_key = YOUR_API_KEY
650 649 #appenlight.transport_config = https://api.appenlight.com?threaded=1&timeout=5
651 650
652 651 ; used for JS client
653 652 #appenlight.api_public_key = YOUR_API_PUBLIC_KEY
654 653
655 654 ; TWEAK AMOUNT OF INFO SENT HERE
656 655
657 656 ; enables 404 error logging (default False)
658 657 #appenlight.report_404 = false
659 658
660 659 ; time in seconds after request is considered being slow (default 1)
661 660 #appenlight.slow_request_time = 1
662 661
663 662 ; record slow requests in application
664 663 ; (needs to be enabled for slow datastore recording and time tracking)
665 664 #appenlight.slow_requests = true
666 665
667 666 ; enable hooking to application loggers
668 667 #appenlight.logging = true
669 668
670 669 ; minimum log level for log capture
671 670 #ppenlight.logging.level = WARNING
672 671
673 672 ; send logs only from erroneous/slow requests
674 673 ; (saves API quota for intensive logging)
675 674 #appenlight.logging_on_error = false
676 675
677 676 ; list of additional keywords that should be grabbed from environ object
678 677 ; can be string with comma separated list of words in lowercase
679 678 ; (by default client will always send following info:
680 679 ; 'REMOTE_USER', 'REMOTE_ADDR', 'SERVER_NAME', 'CONTENT_TYPE' + all keys that
681 680 ; start with HTTP* this list be extended with additional keywords here
682 681 #appenlight.environ_keys_whitelist =
683 682
684 683 ; list of keywords that should be blanked from request object
685 684 ; can be string with comma separated list of words in lowercase
686 685 ; (by default client will always blank keys that contain following words
687 686 ; 'password', 'passwd', 'pwd', 'auth_tkt', 'secret', 'csrf'
688 687 ; this list be extended with additional keywords set here
689 688 #appenlight.request_keys_blacklist =
690 689
691 690 ; list of namespaces that should be ignores when gathering log entries
692 691 ; can be string with comma separated list of namespaces
693 692 ; (by default the client ignores own entries: appenlight_client.client)
694 693 #appenlight.log_namespace_blacklist =
695 694
696 695 ; Statsd client config, this is used to send metrics to statsd
697 696 ; We recommend setting statsd_exported and scrape them using Prometheus
698 697 #statsd.enabled = false
699 698 #statsd.statsd_host = 0.0.0.0
700 699 #statsd.statsd_port = 8125
701 700 #statsd.statsd_prefix =
702 701 #statsd.statsd_ipv6 = false
703 702
704 703 ; configure logging automatically at server startup set to false
705 704 ; to use the below custom logging config.
706 705 ; RC_LOGGING_FORMATTER
707 706 ; RC_LOGGING_LEVEL
708 707 ; env variables can control the settings for logging in case of autoconfigure
709 708
710 709 #logging.autoconfigure = true
711 710
712 711 ; specify your own custom logging config file to configure logging
713 712 #logging.logging_conf_file = /path/to/custom_logging.ini
714 713
715 714 ; Dummy marker to add new entries after.
716 715 ; Add any custom entries below. Please don't remove this marker.
717 716 custom.conf = 1
718 717
719 718
720 719 ; #####################
721 720 ; LOGGING CONFIGURATION
722 721 ; #####################
723 722
724 723 [loggers]
725 724 keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper
726 725
727 726 [handlers]
728 727 keys = console, console_sql
729 728
730 729 [formatters]
731 730 keys = generic, json, color_formatter, color_formatter_sql
732 731
733 732 ; #######
734 733 ; LOGGERS
735 734 ; #######
736 735 [logger_root]
737 736 level = NOTSET
738 737 handlers = console
739 738
740 739 [logger_sqlalchemy]
741 740 level = INFO
742 741 handlers = console_sql
743 742 qualname = sqlalchemy.engine
744 743 propagate = 0
745 744
746 745 [logger_beaker]
747 746 level = DEBUG
748 747 handlers =
749 748 qualname = beaker.container
750 749 propagate = 1
751 750
752 751 [logger_rhodecode]
753 752 level = DEBUG
754 753 handlers =
755 754 qualname = rhodecode
756 755 propagate = 1
757 756
758 757 [logger_ssh_wrapper]
759 758 level = DEBUG
760 759 handlers =
761 760 qualname = ssh_wrapper
762 761 propagate = 1
763 762
764 763 [logger_celery]
765 764 level = DEBUG
766 765 handlers =
767 766 qualname = celery
768 767
769 768
770 769 ; ########
771 770 ; HANDLERS
772 771 ; ########
773 772
774 773 [handler_console]
775 774 class = StreamHandler
776 775 args = (sys.stderr, )
777 776 level = INFO
778 777 ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json'
779 778 ; This allows sending properly formatted logs to grafana loki or elasticsearch
780 779 formatter = generic
781 780
782 781 [handler_console_sql]
783 782 ; "level = DEBUG" logs SQL queries and results.
784 783 ; "level = INFO" logs SQL queries.
785 784 ; "level = WARN" logs neither. (Recommended for production systems.)
786 785 class = StreamHandler
787 786 args = (sys.stderr, )
788 787 level = WARN
789 788 ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json'
790 789 ; This allows sending properly formatted logs to grafana loki or elasticsearch
791 790 formatter = generic
792 791
793 792 ; ##########
794 793 ; FORMATTERS
795 794 ; ##########
796 795
797 796 [formatter_generic]
798 797 class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter
799 798 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
800 799 datefmt = %Y-%m-%d %H:%M:%S
801 800
802 801 [formatter_color_formatter]
803 802 class = rhodecode.lib.logging_formatter.ColorFormatter
804 803 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
805 804 datefmt = %Y-%m-%d %H:%M:%S
806 805
807 806 [formatter_color_formatter_sql]
808 807 class = rhodecode.lib.logging_formatter.ColorFormatterSql
809 808 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
810 809 datefmt = %Y-%m-%d %H:%M:%S
811 810
812 811 [formatter_json]
813 812 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
814 813 class = rhodecode.lib._vendor.jsonlogger.JsonFormatter
General Comments 0
You need to be logged in to leave comments. Login now