##// END OF EJS Templates
configs: added changed require to track logging with loki or logstash
super-admin -
r1019:345b06ac default
parent child Browse files
Show More
@@ -1,247 +1,256 b''
1 1 ## -*- coding: utf-8 -*-
2 2
3 3 ; #################################
4 4 ; RHODECODE VCSSERVER CONFIGURATION
5 5 ; #################################
6 6
7 7 [server:main]
8 8 ; COMMON HOST/IP CONFIG
9 9 host = 0.0.0.0
10 10 port = 9900
11 11
12 12 ; ##################################################
13 13 ; WAITRESS WSGI SERVER - Recommended for Development
14 14 ; ##################################################
15 15
16 16 ; use server type
17 17 use = egg:waitress#main
18 18
19 19 ; number of worker threads
20 20 threads = 5
21 21
22 22 ; MAX BODY SIZE 100GB
23 23 max_request_body_size = 107374182400
24 24
25 25 ; Use poll instead of select, fixes file descriptors limits problems.
26 26 ; May not work on old windows systems.
27 27 asyncore_use_poll = true
28 28
29 29
30 30 ; ###########################
31 31 ; GUNICORN APPLICATION SERVER
32 32 ; ###########################
33 33
34 34 ; run with gunicorn --log-config rhodecode.ini --paste rhodecode.ini
35 35
36 36 ; Module to use, this setting shouldn't be changed
37 37 #use = egg:gunicorn#main
38 38
39 39 ; Sets the number of process workers. More workers means more concurrent connections
40 40 ; RhodeCode can handle at the same time. Each additional worker also it increases
41 41 ; memory usage as each has it's own set of caches.
42 42 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
43 43 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
44 44 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
45 45 ; when using more than 1 worker.
46 46 #workers = 2
47 47
48 48 ; Gunicorn access log level
49 49 #loglevel = info
50 50
51 51 ; Process name visible in process list
52 52 #proc_name = rhodecode_vcsserver
53 53
54 54 ; Type of worker class, one of `sync`, `gevent`
55 55 ; currently `sync` is the only option allowed.
56 56 #worker_class = sync
57 57
58 58 ; The maximum number of simultaneous clients. Valid only for gevent
59 59 #worker_connections = 10
60 60
61 61 ; Max number of requests that worker will handle before being gracefully restarted.
62 62 ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
63 63 #max_requests = 1000
64 64 #max_requests_jitter = 30
65 65
66 66 ; Amount of time a worker can spend with handling a request before it
67 67 ; gets killed and restarted. By default set to 21600 (6hrs)
68 68 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
69 69 #timeout = 21600
70 70
71 71 ; The maximum size of HTTP request line in bytes.
72 72 ; 0 for unlimited
73 73 #limit_request_line = 0
74 74
75 75 ; Limit the number of HTTP headers fields in a request.
76 76 ; By default this value is 100 and can't be larger than 32768.
77 77 #limit_request_fields = 32768
78 78
79 79 ; Limit the allowed size of an HTTP request header field.
80 80 ; Value is a positive number or 0.
81 81 ; Setting it to 0 will allow unlimited header field sizes.
82 82 #limit_request_field_size = 0
83 83
84 84 ; Timeout for graceful workers restart.
85 85 ; After receiving a restart signal, workers have this much time to finish
86 86 ; serving requests. Workers still alive after the timeout (starting from the
87 87 ; receipt of the restart signal) are force killed.
88 88 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
89 89 #graceful_timeout = 3600
90 90
91 91 # The number of seconds to wait for requests on a Keep-Alive connection.
92 92 # Generally set in the 1-5 seconds range.
93 93 #keepalive = 2
94 94
95 95 ; Maximum memory usage that each worker can use before it will receive a
96 96 ; graceful restart signal 0 = memory monitoring is disabled
97 97 ; Examples: 268435456 (256MB), 536870912 (512MB)
98 98 ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
99 99 #memory_max_usage = 0
100 100
101 101 ; How often in seconds to check for memory usage for each gunicorn worker
102 102 #memory_usage_check_interval = 60
103 103
104 104 ; Threshold value for which we don't recycle worker if GarbageCollection
105 105 ; frees up enough resources. Before each restart we try to run GC on worker
106 106 ; in case we get enough free memory after that, restart will not happen.
107 107 #memory_usage_recovery_threshold = 0.8
108 108
109 109
110 110 [app:main]
111 111 ; The %(here)s variable will be replaced with the absolute path of parent directory
112 112 ; of this file
113 113 use = egg:rhodecode-vcsserver
114 114
115 115
116 116 ; #############
117 117 ; DEBUG OPTIONS
118 118 ; #############
119 119
120 120 # During development the we want to have the debug toolbar enabled
121 121 pyramid.includes =
122 122 pyramid_debugtoolbar
123 123
124 124 debugtoolbar.hosts = 0.0.0.0/0
125 125 debugtoolbar.exclude_prefixes =
126 126 /css
127 127 /fonts
128 128 /images
129 129 /js
130 130
131 131 ; #################
132 132 ; END DEBUG OPTIONS
133 133 ; #################
134 134
135 135 ; Pyramid default locales, we need this to be set
136 136 pyramid.default_locale_name = en
137 137
138 138 ; default locale used by VCS systems
139 139 locale = en_US.UTF-8
140 140
141 141 ; path to binaries for vcsserver, it should be set by the installer
142 142 ; at installation time, e.g /home/user/vcsserver-1/profile/bin
143 143 ; it can also be a path to nix-build output in case of development
144 144 core.binary_dir = ""
145 145
146 146 ; Custom exception store path, defaults to TMPDIR
147 147 ; This is used to store exception from RhodeCode in shared directory
148 148 #exception_tracker.store_path =
149 149
150 150 ; #############
151 151 ; DOGPILE CACHE
152 152 ; #############
153 153
154 154 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
155 155 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
156 156 cache_dir = %(here)s/data
157 157
158 158 ; ***************************************
159 159 ; `repo_object` cache, default file based
160 160 ; ***************************************
161 161
162 162 ; `repo_object` cache settings for vcs methods for repositories
163 163 rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
164 164
165 165 ; cache auto-expires after N seconds
166 166 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
167 167 rc_cache.repo_object.expiration_time = 2592000
168 168
169 169 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
170 170 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache.db
171 171
172 172 ; ***********************************************************
173 173 ; `repo_object` cache with redis backend
174 174 ; recommended for larger instance, and for better performance
175 175 ; ***********************************************************
176 176
177 177 ; `repo_object` cache settings for vcs methods for repositories
178 178 #rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack
179 179
180 180 ; cache auto-expires after N seconds
181 181 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
182 182 #rc_cache.repo_object.expiration_time = 2592000
183 183
184 184 ; redis_expiration_time needs to be greater then expiration_time
185 185 #rc_cache.repo_object.arguments.redis_expiration_time = 3592000
186 186
187 187 #rc_cache.repo_object.arguments.host = localhost
188 188 #rc_cache.repo_object.arguments.port = 6379
189 189 #rc_cache.repo_object.arguments.db = 5
190 190 #rc_cache.repo_object.arguments.socket_timeout = 30
191 191 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
192 192 #rc_cache.repo_object.arguments.distributed_lock = true
193 193
194 194 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
195 195 #rc_cache.repo_object.arguments.lock_auto_renewal = true
196 196
197 197 ; Statsd client config, this is used to send metrics to statsd
198 198 ; We recommend setting statsd_exported and scrape them using Promethues
199 199 #statsd.enabled = false
200 200 #statsd.statsd_host = 0.0.0.0
201 201 #statsd.statsd_port = 8125
202 202 #statsd.statsd_prefix =
203 203 #statsd.statsd_ipv6 = false
204 204
205 205 ; #####################
206 206 ; LOGGING CONFIGURATION
207 207 ; #####################
208 208 [loggers]
209 209 keys = root, vcsserver
210 210
211 211 [handlers]
212 212 keys = console
213 213
214 214 [formatters]
215 215 keys = generic
216 216
217 217 ; #######
218 218 ; LOGGERS
219 219 ; #######
220 220 [logger_root]
221 221 level = NOTSET
222 222 handlers = console
223 223
224 224 [logger_vcsserver]
225 225 level = DEBUG
226 226 handlers =
227 227 qualname = vcsserver
228 228 propagate = 1
229 229
230 230
231 231 ; ########
232 232 ; HANDLERS
233 233 ; ########
234 234
235 235 [handler_console]
236 236 class = StreamHandler
237 237 args = (sys.stderr, )
238 238 level = DEBUG
239 239 formatter = generic
240 ; To enable JSON formatted logs replace generic with json
241 ; This allows sending properly formatted logs to grafana loki or elasticsearch
242 #formatter = json
243
240 244
241 245 ; ##########
242 246 ; FORMATTERS
243 247 ; ##########
244 248
245 249 [formatter_generic]
246 250 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
247 251 datefmt = %Y-%m-%d %H:%M:%S
252
253 [formatter_json]
254 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
255 class = vcsserver.lib._vendor.jsonlogger.JsonFormatter
256
@@ -1,265 +1,269 b''
1 1 """
2 2 Gunicorn config extension and hooks. This config file adds some extra settings and memory management.
3 3 Gunicorn configuration should be managed by .ini files entries of RhodeCode or VCSServer
4 4 """
5 5
6 6 import gc
7 7 import os
8 8 import sys
9 9 import math
10 10 import time
11 11 import threading
12 12 import traceback
13 13 import random
14 14 from gunicorn.glogging import Logger
15 15
16 16
17 17 def get_workers():
18 18 import multiprocessing
19 19 return multiprocessing.cpu_count() * 2 + 1
20 20
21 21 # GLOBAL
22 22 errorlog = '-'
23 23 accesslog = '-'
24 24
25 25
26 26 # SERVER MECHANICS
27 27 # None == system temp dir
28 28 # worker_tmp_dir is recommended to be set to some tmpfs
29 29 worker_tmp_dir = None
30 30 tmp_upload_dir = None
31 31
32 32 # Custom log format
33 33 access_log_format = (
34 34 '%(t)s %(p)s INFO [GNCRN] %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"')
35 35
36 # loki format for easier parsing in grafana
37 #access_log_format = (
38 # 'time="%(t)s" pid=%(p)s level="INFO" type="[GNCRN]" ip="%(h)-15s" rqt="%(L)s" response_code="%(s)s" response_bytes="%(b)-6s" uri="%(m)s:%(U)s %(q)s" user=":%(u)s" user_agent="%(a)s"')
39
36 40 # self adjust workers based on CPU count
37 41 # workers = get_workers()
38 42
39 43
40 44 def _get_process_rss(pid=None):
41 45 try:
42 46 import psutil
43 47 if pid:
44 48 proc = psutil.Process(pid)
45 49 else:
46 50 proc = psutil.Process()
47 51 return proc.memory_info().rss
48 52 except Exception:
49 53 return None
50 54
51 55
52 56 def _get_config(ini_path):
53 57
54 58 try:
55 59 import configparser
56 60 except ImportError:
57 61 import ConfigParser as configparser
58 62 try:
59 63 config = configparser.RawConfigParser()
60 64 config.read(ini_path)
61 65 return config
62 66 except Exception:
63 67 return None
64 68
65 69
66 70 def _time_with_offset(memory_usage_check_interval):
67 71 return time.time() - random.randint(0, memory_usage_check_interval/2.0)
68 72
69 73
70 74 def pre_fork(server, worker):
71 75 pass
72 76
73 77
74 78 def post_fork(server, worker):
75 79
76 80 # memory spec defaults
77 81 _memory_max_usage = 0
78 82 _memory_usage_check_interval = 60
79 83 _memory_usage_recovery_threshold = 0.8
80 84
81 85 ini_path = os.path.abspath(server.cfg.paste)
82 86 conf = _get_config(ini_path)
83 87
84 88 section = 'server:main'
85 89 if conf and conf.has_section(section):
86 90
87 91 if conf.has_option(section, 'memory_max_usage'):
88 92 _memory_max_usage = conf.getint(section, 'memory_max_usage')
89 93
90 94 if conf.has_option(section, 'memory_usage_check_interval'):
91 95 _memory_usage_check_interval = conf.getint(section, 'memory_usage_check_interval')
92 96
93 97 if conf.has_option(section, 'memory_usage_recovery_threshold'):
94 98 _memory_usage_recovery_threshold = conf.getfloat(section, 'memory_usage_recovery_threshold')
95 99
96 100 worker._memory_max_usage = _memory_max_usage
97 101 worker._memory_usage_check_interval = _memory_usage_check_interval
98 102 worker._memory_usage_recovery_threshold = _memory_usage_recovery_threshold
99 103
100 104 # register memory last check time, with some random offset so we don't recycle all
101 105 # at once
102 106 worker._last_memory_check_time = _time_with_offset(_memory_usage_check_interval)
103 107
104 108 if _memory_max_usage:
105 109 server.log.info("[%-10s] WORKER spawned with max memory set at %s", worker.pid,
106 110 _format_data_size(_memory_max_usage))
107 111 else:
108 112 server.log.info("[%-10s] WORKER spawned", worker.pid)
109 113
110 114
111 115 def pre_exec(server):
112 116 server.log.info("Forked child, re-executing.")
113 117
114 118
115 119 def on_starting(server):
116 120 server_lbl = '{} {}'.format(server.proc_name, server.address)
117 121 server.log.info("Server %s is starting.", server_lbl)
118 122
119 123
120 124 def when_ready(server):
121 125 server.log.info("Server %s is ready. Spawning workers", server)
122 126
123 127
124 128 def on_reload(server):
125 129 pass
126 130
127 131
128 132 def _format_data_size(size, unit="B", precision=1, binary=True):
129 133 """Format a number using SI units (kilo, mega, etc.).
130 134
131 135 ``size``: The number as a float or int.
132 136
133 137 ``unit``: The unit name in plural form. Examples: "bytes", "B".
134 138
135 139 ``precision``: How many digits to the right of the decimal point. Default
136 140 is 1. 0 suppresses the decimal point.
137 141
138 142 ``binary``: If false, use base-10 decimal prefixes (kilo = K = 1000).
139 143 If true, use base-2 binary prefixes (kibi = Ki = 1024).
140 144
141 145 ``full_name``: If false (default), use the prefix abbreviation ("k" or
142 146 "Ki"). If true, use the full prefix ("kilo" or "kibi"). If false,
143 147 use abbreviation ("k" or "Ki").
144 148
145 149 """
146 150
147 151 if not binary:
148 152 base = 1000
149 153 multiples = ('', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
150 154 else:
151 155 base = 1024
152 156 multiples = ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi')
153 157
154 158 sign = ""
155 159 if size > 0:
156 160 m = int(math.log(size, base))
157 161 elif size < 0:
158 162 sign = "-"
159 163 size = -size
160 164 m = int(math.log(size, base))
161 165 else:
162 166 m = 0
163 167 if m > 8:
164 168 m = 8
165 169
166 170 if m == 0:
167 171 precision = '%.0f'
168 172 else:
169 173 precision = '%%.%df' % precision
170 174
171 175 size = precision % (size / math.pow(base, m))
172 176
173 177 return '%s%s %s%s' % (sign, size.strip(), multiples[m], unit)
174 178
175 179
176 180 def _check_memory_usage(worker):
177 181 memory_max_usage = worker._memory_max_usage
178 182 if not memory_max_usage:
179 183 return
180 184
181 185 memory_usage_check_interval = worker._memory_usage_check_interval
182 186 memory_usage_recovery_threshold = memory_max_usage * worker._memory_usage_recovery_threshold
183 187
184 188 elapsed = time.time() - worker._last_memory_check_time
185 189 if elapsed > memory_usage_check_interval:
186 190 mem_usage = _get_process_rss()
187 191 if mem_usage and mem_usage > memory_max_usage:
188 192 worker.log.info(
189 193 "memory usage %s > %s, forcing gc",
190 194 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
191 195 # Try to clean it up by forcing a full collection.
192 196 gc.collect()
193 197 mem_usage = _get_process_rss()
194 198 if mem_usage > memory_usage_recovery_threshold:
195 199 # Didn't clean up enough, we'll have to terminate.
196 200 worker.log.warning(
197 201 "memory usage %s > %s after gc, quitting",
198 202 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
199 203 # This will cause worker to auto-restart itself
200 204 worker.alive = False
201 205 worker._last_memory_check_time = time.time()
202 206
203 207
204 208 def worker_int(worker):
205 209 worker.log.info("[%-10s] worker received INT or QUIT signal", worker.pid)
206 210
207 211 # get traceback info, on worker crash
208 212 id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
209 213 code = []
210 214 for thread_id, stack in sys._current_frames().items():
211 215 code.append(
212 216 "\n# Thread: %s(%d)" % (id2name.get(thread_id, ""), thread_id))
213 217 for fname, lineno, name, line in traceback.extract_stack(stack):
214 218 code.append('File: "%s", line %d, in %s' % (fname, lineno, name))
215 219 if line:
216 220 code.append(" %s" % (line.strip()))
217 221 worker.log.debug("\n".join(code))
218 222
219 223
220 224 def worker_abort(worker):
221 225 worker.log.info("[%-10s] worker received SIGABRT signal", worker.pid)
222 226
223 227
224 228 def worker_exit(server, worker):
225 229 worker.log.info("[%-10s] worker exit", worker.pid)
226 230
227 231
228 232 def child_exit(server, worker):
229 233 worker.log.info("[%-10s] worker child exit", worker.pid)
230 234
231 235
232 236 def pre_request(worker, req):
233 237 worker.start_time = time.time()
234 238 worker.log.debug(
235 239 "GNCRN PRE WORKER [cnt:%s]: %s %s", worker.nr, req.method, req.path)
236 240
237 241
238 242 def post_request(worker, req, environ, resp):
239 243 total_time = time.time() - worker.start_time
240 244 # Gunicorn sometimes has problems with reading the status_code
241 245 status_code = getattr(resp, 'status_code', '')
242 246 worker.log.debug(
243 247 "GNCRN POST WORKER [cnt:%s]: %s %s resp: %s, Load Time: %.4fs",
244 248 worker.nr, req.method, req.path, status_code, total_time)
245 249 _check_memory_usage(worker)
246 250
247 251
248 252 class RhodeCodeLogger(Logger):
249 253 """
250 254 Custom Logger that allows some customization that gunicorn doesn't allow
251 255 """
252 256
253 257 datefmt = r"%Y-%m-%d %H:%M:%S"
254 258
255 259 def __init__(self, cfg):
256 260 Logger.__init__(self, cfg)
257 261
258 262 def now(self):
259 263 """ return date in RhodeCode Log format """
260 264 now = time.time()
261 265 msecs = int((now - long(now)) * 1000)
262 266 return time.strftime(self.datefmt, time.localtime(now)) + '.{0:03d}'.format(msecs)
263 267
264 268
265 269 logger_class = RhodeCodeLogger
@@ -1,210 +1,219 b''
1 1 ## -*- coding: utf-8 -*-
2 2
3 3 ; #################################
4 4 ; RHODECODE VCSSERVER CONFIGURATION
5 5 ; #################################
6 6
7 7 [server:main]
8 8 ; COMMON HOST/IP CONFIG
9 9 host = 127.0.0.1
10 10 port = 9900
11 11
12 12
13 13 ; ###########################
14 14 ; GUNICORN APPLICATION SERVER
15 15 ; ###########################
16 16
17 17 ; run with gunicorn --log-config rhodecode.ini --paste rhodecode.ini
18 18
19 19 ; Module to use, this setting shouldn't be changed
20 20 use = egg:gunicorn#main
21 21
22 22 ; Sets the number of process workers. More workers means more concurrent connections
23 23 ; RhodeCode can handle at the same time. Each additional worker also it increases
24 24 ; memory usage as each has it's own set of caches.
25 25 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
26 26 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
27 27 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
28 28 ; when using more than 1 worker.
29 29 workers = 2
30 30
31 31 ; Gunicorn access log level
32 32 loglevel = info
33 33
34 34 ; Process name visible in process list
35 35 proc_name = rhodecode_vcsserver
36 36
37 37 ; Type of worker class, one of `sync`, `gevent`
38 38 ; currently `sync` is the only option allowed.
39 39 worker_class = sync
40 40
41 41 ; The maximum number of simultaneous clients. Valid only for gevent
42 42 worker_connections = 10
43 43
44 44 ; Max number of requests that worker will handle before being gracefully restarted.
45 45 ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
46 46 max_requests = 1000
47 47 max_requests_jitter = 30
48 48
49 49 ; Amount of time a worker can spend with handling a request before it
50 50 ; gets killed and restarted. By default set to 21600 (6hrs)
51 51 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
52 52 timeout = 21600
53 53
54 54 ; The maximum size of HTTP request line in bytes.
55 55 ; 0 for unlimited
56 56 limit_request_line = 0
57 57
58 58 ; Limit the number of HTTP headers fields in a request.
59 59 ; By default this value is 100 and can't be larger than 32768.
60 60 limit_request_fields = 32768
61 61
62 62 ; Limit the allowed size of an HTTP request header field.
63 63 ; Value is a positive number or 0.
64 64 ; Setting it to 0 will allow unlimited header field sizes.
65 65 limit_request_field_size = 0
66 66
67 67 ; Timeout for graceful workers restart.
68 68 ; After receiving a restart signal, workers have this much time to finish
69 69 ; serving requests. Workers still alive after the timeout (starting from the
70 70 ; receipt of the restart signal) are force killed.
71 71 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
72 72 graceful_timeout = 3600
73 73
74 74 # The number of seconds to wait for requests on a Keep-Alive connection.
75 75 # Generally set in the 1-5 seconds range.
76 76 keepalive = 2
77 77
78 78 ; Maximum memory usage that each worker can use before it will receive a
79 79 ; graceful restart signal 0 = memory monitoring is disabled
80 80 ; Examples: 268435456 (256MB), 536870912 (512MB)
81 81 ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
82 82 memory_max_usage = 0
83 83
84 84 ; How often in seconds to check for memory usage for each gunicorn worker
85 85 memory_usage_check_interval = 60
86 86
87 87 ; Threshold value for which we don't recycle worker if GarbageCollection
88 88 ; frees up enough resources. Before each restart we try to run GC on worker
89 89 ; in case we get enough free memory after that, restart will not happen.
90 90 memory_usage_recovery_threshold = 0.8
91 91
92 92
93 93 [app:main]
94 94 ; The %(here)s variable will be replaced with the absolute path of parent directory
95 95 ; of this file
96 96 use = egg:rhodecode-vcsserver
97 97
98 98 ; Pyramid default locales, we need this to be set
99 99 pyramid.default_locale_name = en
100 100
101 101 ; default locale used by VCS systems
102 102 locale = en_US.UTF-8
103 103
104 104 ; path to binaries for vcsserver, it should be set by the installer
105 105 ; at installation time, e.g /home/user/vcsserver-1/profile/bin
106 106 ; it can also be a path to nix-build output in case of development
107 107 core.binary_dir = ""
108 108
109 109 ; Custom exception store path, defaults to TMPDIR
110 110 ; This is used to store exception from RhodeCode in shared directory
111 111 #exception_tracker.store_path =
112 112
113 113 ; #############
114 114 ; DOGPILE CACHE
115 115 ; #############
116 116
117 117 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
118 118 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
119 119 cache_dir = %(here)s/data
120 120
121 121 ; ***************************************
122 122 ; `repo_object` cache, default file based
123 123 ; ***************************************
124 124
125 125 ; `repo_object` cache settings for vcs methods for repositories
126 126 rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
127 127
128 128 ; cache auto-expires after N seconds
129 129 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
130 130 rc_cache.repo_object.expiration_time = 2592000
131 131
132 132 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
133 133 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache.db
134 134
135 135 ; ***********************************************************
136 136 ; `repo_object` cache with redis backend
137 137 ; recommended for larger instance, and for better performance
138 138 ; ***********************************************************
139 139
140 140 ; `repo_object` cache settings for vcs methods for repositories
141 141 #rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack
142 142
143 143 ; cache auto-expires after N seconds
144 144 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
145 145 #rc_cache.repo_object.expiration_time = 2592000
146 146
147 147 ; redis_expiration_time needs to be greater then expiration_time
148 148 #rc_cache.repo_object.arguments.redis_expiration_time = 3592000
149 149
150 150 #rc_cache.repo_object.arguments.host = localhost
151 151 #rc_cache.repo_object.arguments.port = 6379
152 152 #rc_cache.repo_object.arguments.db = 5
153 153 #rc_cache.repo_object.arguments.socket_timeout = 30
154 154 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
155 155 #rc_cache.repo_object.arguments.distributed_lock = true
156 156
157 157 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
158 158 #rc_cache.repo_object.arguments.lock_auto_renewal = true
159 159
160 160 ; Statsd client config, this is used to send metrics to statsd
161 161 ; We recommend setting statsd_exported and scrape them using Promethues
162 162 #statsd.enabled = false
163 163 #statsd.statsd_host = 0.0.0.0
164 164 #statsd.statsd_port = 8125
165 165 #statsd.statsd_prefix =
166 166 #statsd.statsd_ipv6 = false
167 167
168 168 ; #####################
169 169 ; LOGGING CONFIGURATION
170 170 ; #####################
171 171 [loggers]
172 172 keys = root, vcsserver
173 173
174 174 [handlers]
175 175 keys = console
176 176
177 177 [formatters]
178 178 keys = generic
179 179
180 180 ; #######
181 181 ; LOGGERS
182 182 ; #######
183 183 [logger_root]
184 184 level = NOTSET
185 185 handlers = console
186 186
187 187 [logger_vcsserver]
188 188 level = DEBUG
189 189 handlers =
190 190 qualname = vcsserver
191 191 propagate = 1
192 192
193 193
194 194 ; ########
195 195 ; HANDLERS
196 196 ; ########
197 197
198 198 [handler_console]
199 199 class = StreamHandler
200 200 args = (sys.stderr, )
201 201 level = INFO
202 202 formatter = generic
203 ; To enable JSON formatted logs replace generic with json
204 ; This allows sending properly formatted logs to grafana loki or elasticsearch
205 #formatter = json
206
203 207
204 208 ; ##########
205 209 ; FORMATTERS
206 210 ; ##########
207 211
208 212 [formatter_generic]
209 213 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
210 214 datefmt = %Y-%m-%d %H:%M:%S
215
216 [formatter_json]
217 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
218 class = vcsserver.lib._vendor.jsonlogger.JsonFormatter
219
General Comments 0
You need to be logged in to leave comments. Login now