##// END OF EJS Templates
config: updated configs to still not comment out logging configuration....
super-admin -
r1024:8705056f default
parent child Browse files
Show More
@@ -1,276 +1,276 b''
1 1 ## -*- coding: utf-8 -*-
2 2
3 3 ; #################################
4 4 ; RHODECODE VCSSERVER CONFIGURATION
5 5 ; #################################
6 6
7 7 [server:main]
8 8 ; COMMON HOST/IP CONFIG
9 9 host = 0.0.0.0
10 10 port = 9900
11 11
12 12 ; ##################################################
13 13 ; WAITRESS WSGI SERVER - Recommended for Development
14 14 ; ##################################################
15 15
16 16 ; use server type
17 17 use = egg:waitress#main
18 18
19 19 ; number of worker threads
20 20 threads = 5
21 21
22 22 ; MAX BODY SIZE 100GB
23 23 max_request_body_size = 107374182400
24 24
25 25 ; Use poll instead of select, fixes file descriptors limits problems.
26 26 ; May not work on old windows systems.
27 27 asyncore_use_poll = true
28 28
29 29
30 30 ; ###########################
31 31 ; GUNICORN APPLICATION SERVER
32 32 ; ###########################
33 33
34 34 ; run with gunicorn --paste rhodecode.ini
35 35
36 36 ; Module to use, this setting shouldn't be changed
37 37 #use = egg:gunicorn#main
38 38
39 39 ; Sets the number of process workers. More workers means more concurrent connections
40 40 ; RhodeCode can handle at the same time. Each additional worker also it increases
41 41 ; memory usage as each has it's own set of caches.
42 42 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
43 43 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
44 44 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
45 45 ; when using more than 1 worker.
46 46 #workers = 2
47 47
48 48 ; Gunicorn access log level
49 49 #loglevel = info
50 50
51 51 ; Process name visible in process list
52 52 #proc_name = rhodecode_vcsserver
53 53
54 54 ; Type of worker class, one of `sync`, `gevent`
55 55 ; currently `sync` is the only option allowed.
56 56 #worker_class = sync
57 57
58 58 ; The maximum number of simultaneous clients. Valid only for gevent
59 59 #worker_connections = 10
60 60
61 61 ; Max number of requests that worker will handle before being gracefully restarted.
62 62 ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
63 63 #max_requests = 1000
64 64 #max_requests_jitter = 30
65 65
66 66 ; Amount of time a worker can spend with handling a request before it
67 67 ; gets killed and restarted. By default set to 21600 (6hrs)
68 68 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
69 69 #timeout = 21600
70 70
71 71 ; The maximum size of HTTP request line in bytes.
72 72 ; 0 for unlimited
73 73 #limit_request_line = 0
74 74
75 75 ; Limit the number of HTTP headers fields in a request.
76 76 ; By default this value is 100 and can't be larger than 32768.
77 77 #limit_request_fields = 32768
78 78
79 79 ; Limit the allowed size of an HTTP request header field.
80 80 ; Value is a positive number or 0.
81 81 ; Setting it to 0 will allow unlimited header field sizes.
82 82 #limit_request_field_size = 0
83 83
84 84 ; Timeout for graceful workers restart.
85 85 ; After receiving a restart signal, workers have this much time to finish
86 86 ; serving requests. Workers still alive after the timeout (starting from the
87 87 ; receipt of the restart signal) are force killed.
88 88 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
89 89 #graceful_timeout = 21600
90 90
91 91 # The number of seconds to wait for requests on a Keep-Alive connection.
92 92 # Generally set in the 1-5 seconds range.
93 93 #keepalive = 2
94 94
95 95 ; Maximum memory usage that each worker can use before it will receive a
96 96 ; graceful restart signal 0 = memory monitoring is disabled
97 97 ; Examples: 268435456 (256MB), 536870912 (512MB)
98 98 ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
99 99 #memory_max_usage = 0
100 100
101 101 ; How often in seconds to check for memory usage for each gunicorn worker
102 102 #memory_usage_check_interval = 60
103 103
104 104 ; Threshold value for which we don't recycle worker if GarbageCollection
105 105 ; frees up enough resources. Before each restart we try to run GC on worker
106 106 ; in case we get enough free memory after that, restart will not happen.
107 107 #memory_usage_recovery_threshold = 0.8
108 108
109 109
110 110 [app:main]
111 111 ; The %(here)s variable will be replaced with the absolute path of parent directory
112 112 ; of this file
113 113 ; Each option in the app:main can be override by an environmental variable
114 114 ;
115 115 ;To override an option:
116 116 ;
117 117 ;RC_<KeyName>
118 118 ;Everything should be uppercase, . and - should be replaced by _.
119 119 ;For example, if you have these configuration settings:
120 120 ;rc_cache.repo_object.backend = foo
121 121 ;can be overridden by
122 122 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
123 123
124 124 use = egg:rhodecode-vcsserver
125 125
126 126
127 127 ; #############
128 128 ; DEBUG OPTIONS
129 129 ; #############
130 130
131 131 # During development the we want to have the debug toolbar enabled
132 132 pyramid.includes =
133 133 pyramid_debugtoolbar
134 134
135 135 debugtoolbar.hosts = 0.0.0.0/0
136 136 debugtoolbar.exclude_prefixes =
137 137 /css
138 138 /fonts
139 139 /images
140 140 /js
141 141
142 142 ; #################
143 143 ; END DEBUG OPTIONS
144 144 ; #################
145 145
146 146 ; Pyramid default locales, we need this to be set
147 147 #pyramid.default_locale_name = en
148 148
149 149 ; default locale used by VCS systems
150 150 #locale = en_US.UTF-8
151 151
152 152 ; path to binaries for vcsserver, it should be set by the installer
153 153 ; at installation time, e.g /home/user/.rccontrol/vcsserver-1/profile/bin
154 154 ; it can also be a path to nix-build output in case of development
155 155 core.binary_dir = ""
156 156
157 157 ; Custom exception store path, defaults to TMPDIR
158 158 ; This is used to store exception from RhodeCode in shared directory
159 159 #exception_tracker.store_path =
160 160
161 161 ; #############
162 162 ; DOGPILE CACHE
163 163 ; #############
164 164
165 165 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
166 166 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
167 167 #cache_dir = %(here)s/data
168 168
169 169 ; ***************************************
170 170 ; `repo_object` cache, default file based
171 171 ; ***************************************
172 172
173 173 ; `repo_object` cache settings for vcs methods for repositories
174 174 #rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
175 175
176 176 ; cache auto-expires after N seconds
177 177 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
178 178 #rc_cache.repo_object.expiration_time = 2592000
179 179
180 180 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
181 181 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache_repo_object.db
182 182
183 183 ; ***********************************************************
184 184 ; `repo_object` cache with redis backend
185 185 ; recommended for larger instance, and for better performance
186 186 ; ***********************************************************
187 187
188 188 ; `repo_object` cache settings for vcs methods for repositories
189 189 #rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack
190 190
191 191 ; cache auto-expires after N seconds
192 192 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
193 193 #rc_cache.repo_object.expiration_time = 2592000
194 194
195 195 ; redis_expiration_time needs to be greater then expiration_time
196 196 #rc_cache.repo_object.arguments.redis_expiration_time = 3592000
197 197
198 198 #rc_cache.repo_object.arguments.host = localhost
199 199 #rc_cache.repo_object.arguments.port = 6379
200 200 #rc_cache.repo_object.arguments.db = 5
201 201 #rc_cache.repo_object.arguments.socket_timeout = 30
202 202 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
203 203 #rc_cache.repo_object.arguments.distributed_lock = true
204 204
205 205 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
206 206 #rc_cache.repo_object.arguments.lock_auto_renewal = true
207 207
208 208 ; Statsd client config, this is used to send metrics to statsd
209 209 ; We recommend setting statsd_exported and scrape them using Promethues
210 210 #statsd.enabled = false
211 211 #statsd.statsd_host = 0.0.0.0
212 212 #statsd.statsd_port = 8125
213 213 #statsd.statsd_prefix =
214 214 #statsd.statsd_ipv6 = false
215 215
216 216 ; configure logging automatically at server startup set to false
217 217 ; to use the below custom logging config.
218 218 ; RC_LOGGING_FORMATTER
219 219 ; RC_LOGGING_LEVEL
220 220 ; env variables can control the settings for logging in case of autoconfigure
221 221
222 222 #logging.autoconfigure = true
223 223
224 224 ; specify your own custom logging config file to configure logging
225 225 #logging.logging_conf_file = /path/to/custom_logging.ini
226 226
227 227 ; #####################
228 228 ; LOGGING CONFIGURATION
229 229 ; #####################
230 230
231 #[loggers]
232 #keys = root, vcsserver
231 [loggers]
232 keys = root, vcsserver
233 233
234 #[handlers]
235 #keys = console
234 [handlers]
235 keys = console
236 236
237 #[formatters]
238 #keys = generic
237 [formatters]
238 keys = generic, json
239 239
240 240 ; #######
241 241 ; LOGGERS
242 242 ; #######
243 #[logger_root]
244 #level = NOTSET
245 #handlers = console
243 [logger_root]
244 level = NOTSET
245 handlers = console
246 246
247 #[logger_vcsserver]
248 #level = INFO
249 #handlers =
250 #qualname = vcsserver
251 #propagate = 1
247 [logger_vcsserver]
248 level = DEBUG
249 handlers =
250 qualname = vcsserver
251 propagate = 1
252 252
253 253 ; ########
254 254 ; HANDLERS
255 255 ; ########
256 256
257 #[handler_console]
258 #class = StreamHandler
259 #args = (sys.stderr, )
260 #level = INFO
257 [handler_console]
258 class = StreamHandler
259 args = (sys.stderr, )
260 level = DEBUG
261 formatter = generic
261 262 ; To enable JSON formatted logs replace generic with json
262 263 ; This allows sending properly formatted logs to grafana loki or elasticsearch
263 264 #formatter = json
264 #formatter = generic
265 265
266 266 ; ##########
267 267 ; FORMATTERS
268 268 ; ##########
269 269
270 #[formatter_generic]
271 #format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
272 #datefmt = %Y-%m-%d %H:%M:%S
270 [formatter_generic]
271 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
272 datefmt = %Y-%m-%d %H:%M:%S
273 273
274 #[formatter_json]
275 #format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
276 #class = vcsserver.lib._vendor.jsonlogger.JsonFormatter
274 [formatter_json]
275 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
276 class = vcsserver.lib._vendor.jsonlogger.JsonFormatter
@@ -1,272 +1,274 b''
1 1 """
2 2 Gunicorn config extension and hooks. This config file adds some extra settings and memory management.
3 3 Gunicorn configuration should be managed by .ini files entries of RhodeCode or VCSServer
4 4 """
5 5
6 6 import gc
7 7 import os
8 8 import sys
9 9 import math
10 10 import time
11 11 import threading
12 12 import traceback
13 13 import random
14 14 from gunicorn.glogging import Logger
15 15
16 16
17 17 def get_workers():
18 18 import multiprocessing
19 19 return multiprocessing.cpu_count() * 2 + 1
20 20
21 21 # GLOBAL
22 22 errorlog = '-'
23 23 accesslog = '-'
24 24
25 25
26 26 # SERVER MECHANICS
27 27 # None == system temp dir
28 28 # worker_tmp_dir is recommended to be set to some tmpfs
29 29 worker_tmp_dir = None
30 30 tmp_upload_dir = None
31 31
32 #reuse_port = True
33
32 34 # Custom log format
33 35 #access_log_format = (
34 36 # '%(t)s %(p)s INFO [GNCRN] %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"')
35 37
36 38 # loki format for easier parsing in grafana
37 39 access_log_format = (
38 40 'time="%(t)s" pid=%(p)s level="INFO" type="[GNCRN]" ip="%(h)-15s" rqt="%(L)s" response_code="%(s)s" response_bytes="%(b)-6s" uri="%(m)s:%(U)s %(q)s" user=":%(u)s" user_agent="%(a)s"')
39 41
40 42 # self adjust workers based on CPU count
41 43 # workers = get_workers()
42 44
43 45
44 46 def _get_process_rss(pid=None):
45 47 try:
46 48 import psutil
47 49 if pid:
48 50 proc = psutil.Process(pid)
49 51 else:
50 52 proc = psutil.Process()
51 53 return proc.memory_info().rss
52 54 except Exception:
53 55 return None
54 56
55 57
56 58 def _get_config(ini_path):
57 59
58 60 try:
59 61 import configparser
60 62 except ImportError:
61 63 import ConfigParser as configparser
62 64 try:
63 65 config = configparser.RawConfigParser()
64 66 config.read(ini_path)
65 67 return config
66 68 except Exception:
67 69 return None
68 70
69 71
70 72 def _time_with_offset(memory_usage_check_interval):
71 73 return time.time() - random.randint(0, memory_usage_check_interval/2.0)
72 74
73 75
74 76 def pre_fork(server, worker):
75 77 pass
76 78
77 79
78 80 def post_fork(server, worker):
79 81
80 82 # memory spec defaults
81 83 _memory_max_usage = 0
82 84 _memory_usage_check_interval = 60
83 85 _memory_usage_recovery_threshold = 0.8
84 86
85 87 ini_path = os.path.abspath(server.cfg.paste)
86 88 conf = _get_config(ini_path)
87 89
88 90 section = 'server:main'
89 91 if conf and conf.has_section(section):
90 92
91 93 if conf.has_option(section, 'memory_max_usage'):
92 94 _memory_max_usage = conf.getint(section, 'memory_max_usage')
93 95
94 96 if conf.has_option(section, 'memory_usage_check_interval'):
95 97 _memory_usage_check_interval = conf.getint(section, 'memory_usage_check_interval')
96 98
97 99 if conf.has_option(section, 'memory_usage_recovery_threshold'):
98 100 _memory_usage_recovery_threshold = conf.getfloat(section, 'memory_usage_recovery_threshold')
99 101
100 102 worker._memory_max_usage = int(os.environ.get('RC_GUNICORN_MEMORY_MAX_USAGE', '')
101 103 or _memory_max_usage)
102 104 worker._memory_usage_check_interval = int(os.environ.get('RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL', '')
103 105 or _memory_usage_check_interval)
104 106 worker._memory_usage_recovery_threshold = float(os.environ.get('RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD', '')
105 107 or _memory_usage_recovery_threshold)
106 108
107 109 # register memory last check time, with some random offset so we don't recycle all
108 110 # at once
109 111 worker._last_memory_check_time = _time_with_offset(_memory_usage_check_interval)
110 112
111 113 if _memory_max_usage:
112 114 server.log.info("[%-10s] WORKER spawned with max memory set at %s", worker.pid,
113 115 _format_data_size(_memory_max_usage))
114 116 else:
115 117 server.log.info("[%-10s] WORKER spawned", worker.pid)
116 118
117 119
118 120 def pre_exec(server):
119 121 server.log.info("Forked child, re-executing.")
120 122
121 123
122 124 def on_starting(server):
123 125 server_lbl = '{} {}'.format(server.proc_name, server.address)
124 126 server.log.info("Server %s is starting.", server_lbl)
125 127
126 128
127 129 def when_ready(server):
128 130 server.log.info("Server %s is ready. Spawning workers", server)
129 131
130 132
131 133 def on_reload(server):
132 134 pass
133 135
134 136
135 137 def _format_data_size(size, unit="B", precision=1, binary=True):
136 138 """Format a number using SI units (kilo, mega, etc.).
137 139
138 140 ``size``: The number as a float or int.
139 141
140 142 ``unit``: The unit name in plural form. Examples: "bytes", "B".
141 143
142 144 ``precision``: How many digits to the right of the decimal point. Default
143 145 is 1. 0 suppresses the decimal point.
144 146
145 147 ``binary``: If false, use base-10 decimal prefixes (kilo = K = 1000).
146 148 If true, use base-2 binary prefixes (kibi = Ki = 1024).
147 149
148 150 ``full_name``: If false (default), use the prefix abbreviation ("k" or
149 151 "Ki"). If true, use the full prefix ("kilo" or "kibi"). If false,
150 152 use abbreviation ("k" or "Ki").
151 153
152 154 """
153 155
154 156 if not binary:
155 157 base = 1000
156 158 multiples = ('', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
157 159 else:
158 160 base = 1024
159 161 multiples = ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi')
160 162
161 163 sign = ""
162 164 if size > 0:
163 165 m = int(math.log(size, base))
164 166 elif size < 0:
165 167 sign = "-"
166 168 size = -size
167 169 m = int(math.log(size, base))
168 170 else:
169 171 m = 0
170 172 if m > 8:
171 173 m = 8
172 174
173 175 if m == 0:
174 176 precision = '%.0f'
175 177 else:
176 178 precision = '%%.%df' % precision
177 179
178 180 size = precision % (size / math.pow(base, m))
179 181
180 182 return '%s%s %s%s' % (sign, size.strip(), multiples[m], unit)
181 183
182 184
183 185 def _check_memory_usage(worker):
184 186 memory_max_usage = worker._memory_max_usage
185 187 if not memory_max_usage:
186 188 return
187 189
188 190 memory_usage_check_interval = worker._memory_usage_check_interval
189 191 memory_usage_recovery_threshold = memory_max_usage * worker._memory_usage_recovery_threshold
190 192
191 193 elapsed = time.time() - worker._last_memory_check_time
192 194 if elapsed > memory_usage_check_interval:
193 195 mem_usage = _get_process_rss()
194 196 if mem_usage and mem_usage > memory_max_usage:
195 197 worker.log.info(
196 198 "memory usage %s > %s, forcing gc",
197 199 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
198 200 # Try to clean it up by forcing a full collection.
199 201 gc.collect()
200 202 mem_usage = _get_process_rss()
201 203 if mem_usage > memory_usage_recovery_threshold:
202 204 # Didn't clean up enough, we'll have to terminate.
203 205 worker.log.warning(
204 206 "memory usage %s > %s after gc, quitting",
205 207 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
206 208 # This will cause worker to auto-restart itself
207 209 worker.alive = False
208 210 worker._last_memory_check_time = time.time()
209 211
210 212
211 213 def worker_int(worker):
212 214 worker.log.info("[%-10s] worker received INT or QUIT signal", worker.pid)
213 215
214 216 # get traceback info, on worker crash
215 217 id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
216 218 code = []
217 219 for thread_id, stack in sys._current_frames().items():
218 220 code.append(
219 221 "\n# Thread: %s(%d)" % (id2name.get(thread_id, ""), thread_id))
220 222 for fname, lineno, name, line in traceback.extract_stack(stack):
221 223 code.append('File: "%s", line %d, in %s' % (fname, lineno, name))
222 224 if line:
223 225 code.append(" %s" % (line.strip()))
224 226 worker.log.debug("\n".join(code))
225 227
226 228
227 229 def worker_abort(worker):
228 230 worker.log.info("[%-10s] worker received SIGABRT signal", worker.pid)
229 231
230 232
231 233 def worker_exit(server, worker):
232 234 worker.log.info("[%-10s] worker exit", worker.pid)
233 235
234 236
235 237 def child_exit(server, worker):
236 238 worker.log.info("[%-10s] worker child exit", worker.pid)
237 239
238 240
239 241 def pre_request(worker, req):
240 242 worker.start_time = time.time()
241 243 worker.log.debug(
242 244 "GNCRN PRE WORKER [cnt:%s]: %s %s", worker.nr, req.method, req.path)
243 245
244 246
245 247 def post_request(worker, req, environ, resp):
246 248 total_time = time.time() - worker.start_time
247 249 # Gunicorn sometimes has problems with reading the status_code
248 250 status_code = getattr(resp, 'status_code', '')
249 251 worker.log.debug(
250 252 "GNCRN POST WORKER [cnt:%s]: %s %s resp: %s, Load Time: %.4fs",
251 253 worker.nr, req.method, req.path, status_code, total_time)
252 254 _check_memory_usage(worker)
253 255
254 256
255 257 class RhodeCodeLogger(Logger):
256 258 """
257 259 Custom Logger that allows some customization that gunicorn doesn't allow
258 260 """
259 261
260 262 datefmt = r"%Y-%m-%d %H:%M:%S"
261 263
262 264 def __init__(self, cfg):
263 265 Logger.__init__(self, cfg)
264 266
265 267 def now(self):
266 268 """ return date in RhodeCode Log format """
267 269 now = time.time()
268 270 msecs = int((now - long(now)) * 1000)
269 271 return time.strftime(self.datefmt, time.localtime(now)) + '.{0:03d}'.format(msecs)
270 272
271 273
272 274 logger_class = RhodeCodeLogger
@@ -1,239 +1,239 b''
1 1 ## -*- coding: utf-8 -*-
2 2
3 3 ; #################################
4 4 ; RHODECODE VCSSERVER CONFIGURATION
5 5 ; #################################
6 6
7 7 [server:main]
8 8 ; COMMON HOST/IP CONFIG
9 9 host = 127.0.0.1
10 10 port = 9900
11 11
12 12
13 13 ; ###########################
14 14 ; GUNICORN APPLICATION SERVER
15 15 ; ###########################
16 16
17 17 ; run with gunicorn --paste rhodecode.ini
18 18
19 19 ; Module to use, this setting shouldn't be changed
20 20 use = egg:gunicorn#main
21 21
22 22 ; Sets the number of process workers. More workers means more concurrent connections
23 23 ; RhodeCode can handle at the same time. Each additional worker also it increases
24 24 ; memory usage as each has it's own set of caches.
25 25 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
26 26 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
27 27 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
28 28 ; when using more than 1 worker.
29 29 workers = 2
30 30
31 31 ; Gunicorn access log level
32 32 loglevel = info
33 33
34 34 ; Process name visible in process list
35 35 proc_name = rhodecode_vcsserver
36 36
37 37 ; Type of worker class, one of `sync`, `gevent`
38 38 ; currently `sync` is the only option allowed.
39 39 worker_class = sync
40 40
41 41 ; The maximum number of simultaneous clients. Valid only for gevent
42 42 worker_connections = 10
43 43
44 44 ; Max number of requests that worker will handle before being gracefully restarted.
45 45 ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
46 46 max_requests = 1000
47 47 max_requests_jitter = 30
48 48
49 49 ; Amount of time a worker can spend with handling a request before it
50 50 ; gets killed and restarted. By default set to 21600 (6hrs)
51 51 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
52 52 timeout = 21600
53 53
54 54 ; The maximum size of HTTP request line in bytes.
55 55 ; 0 for unlimited
56 56 limit_request_line = 0
57 57
58 58 ; Limit the number of HTTP headers fields in a request.
59 59 ; By default this value is 100 and can't be larger than 32768.
60 60 limit_request_fields = 32768
61 61
62 62 ; Limit the allowed size of an HTTP request header field.
63 63 ; Value is a positive number or 0.
64 64 ; Setting it to 0 will allow unlimited header field sizes.
65 65 limit_request_field_size = 0
66 66
67 67 ; Timeout for graceful workers restart.
68 68 ; After receiving a restart signal, workers have this much time to finish
69 69 ; serving requests. Workers still alive after the timeout (starting from the
70 70 ; receipt of the restart signal) are force killed.
71 71 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
72 72 graceful_timeout = 21600
73 73
74 74 # The number of seconds to wait for requests on a Keep-Alive connection.
75 75 # Generally set in the 1-5 seconds range.
76 76 keepalive = 2
77 77
78 78 ; Maximum memory usage that each worker can use before it will receive a
79 79 ; graceful restart signal 0 = memory monitoring is disabled
80 80 ; Examples: 268435456 (256MB), 536870912 (512MB)
81 81 ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
82 82 memory_max_usage = 0
83 83
84 84 ; How often in seconds to check for memory usage for each gunicorn worker
85 85 memory_usage_check_interval = 60
86 86
87 87 ; Threshold value for which we don't recycle worker if GarbageCollection
88 88 ; frees up enough resources. Before each restart we try to run GC on worker
89 89 ; in case we get enough free memory after that, restart will not happen.
90 90 memory_usage_recovery_threshold = 0.8
91 91
92 92
93 93 [app:main]
94 94 ; The %(here)s variable will be replaced with the absolute path of parent directory
95 95 ; of this file
96 96 ; Each option in the app:main can be override by an environmental variable
97 97 ;
98 98 ;To override an option:
99 99 ;
100 100 ;RC_<KeyName>
101 101 ;Everything should be uppercase, . and - should be replaced by _.
102 102 ;For example, if you have these configuration settings:
103 103 ;rc_cache.repo_object.backend = foo
104 104 ;can be overridden by
105 105 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
106 106
107 107 use = egg:rhodecode-vcsserver
108 108
109 109 ; Pyramid default locales, we need this to be set
110 110 #pyramid.default_locale_name = en
111 111
112 112 ; default locale used by VCS systems
113 113 #locale = en_US.UTF-8
114 114
115 115 ; path to binaries for vcsserver, it should be set by the installer
116 116 ; at installation time, e.g /home/user/.rccontrol/vcsserver-1/profile/bin
117 117 ; it can also be a path to nix-build output in case of development
118 118 core.binary_dir = ""
119 119
120 120 ; Custom exception store path, defaults to TMPDIR
121 121 ; This is used to store exception from RhodeCode in shared directory
122 122 #exception_tracker.store_path =
123 123
124 124 ; #############
125 125 ; DOGPILE CACHE
126 126 ; #############
127 127
128 128 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
129 129 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
130 130 #cache_dir = %(here)s/data
131 131
132 132 ; ***************************************
133 133 ; `repo_object` cache, default file based
134 134 ; ***************************************
135 135
136 136 ; `repo_object` cache settings for vcs methods for repositories
137 137 #rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
138 138
139 139 ; cache auto-expires after N seconds
140 140 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
141 141 #rc_cache.repo_object.expiration_time = 2592000
142 142
143 143 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
144 144 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache_repo_object.db
145 145
146 146 ; ***********************************************************
147 147 ; `repo_object` cache with redis backend
148 148 ; recommended for larger instance, and for better performance
149 149 ; ***********************************************************
150 150
151 151 ; `repo_object` cache settings for vcs methods for repositories
152 152 #rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack
153 153
154 154 ; cache auto-expires after N seconds
155 155 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
156 156 #rc_cache.repo_object.expiration_time = 2592000
157 157
158 158 ; redis_expiration_time needs to be greater then expiration_time
159 159 #rc_cache.repo_object.arguments.redis_expiration_time = 3592000
160 160
161 161 #rc_cache.repo_object.arguments.host = localhost
162 162 #rc_cache.repo_object.arguments.port = 6379
163 163 #rc_cache.repo_object.arguments.db = 5
164 164 #rc_cache.repo_object.arguments.socket_timeout = 30
165 165 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
166 166 #rc_cache.repo_object.arguments.distributed_lock = true
167 167
168 168 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
169 169 #rc_cache.repo_object.arguments.lock_auto_renewal = true
170 170
171 171 ; Statsd client config, this is used to send metrics to statsd
172 172 ; We recommend setting statsd_exported and scrape them using Promethues
173 173 #statsd.enabled = false
174 174 #statsd.statsd_host = 0.0.0.0
175 175 #statsd.statsd_port = 8125
176 176 #statsd.statsd_prefix =
177 177 #statsd.statsd_ipv6 = false
178 178
179 179 ; configure logging automatically at server startup set to false
180 180 ; to use the below custom logging config.
181 181 ; RC_LOGGING_FORMATTER
182 182 ; RC_LOGGING_LEVEL
183 183 ; env variables can control the settings for logging in case of autoconfigure
184 184
185 185 #logging.autoconfigure = true
186 186
187 187 ; specify your own custom logging config file to configure logging
188 188 #logging.logging_conf_file = /path/to/custom_logging.ini
189 189
190 190 ; #####################
191 191 ; LOGGING CONFIGURATION
192 192 ; #####################
193 193
194 #[loggers]
195 #keys = root, vcsserver
194 [loggers]
195 keys = root, vcsserver
196 196
197 #[handlers]
198 #keys = console
197 [handlers]
198 keys = console
199 199
200 #[formatters]
201 #keys = generic
200 [formatters]
201 keys = generic, json
202 202
203 203 ; #######
204 204 ; LOGGERS
205 205 ; #######
206 #[logger_root]
207 #level = NOTSET
208 #handlers = console
206 [logger_root]
207 level = NOTSET
208 handlers = console
209 209
210 #[logger_vcsserver]
211 #level = INFO
212 #handlers =
213 #qualname = vcsserver
214 #propagate = 1
210 [logger_vcsserver]
211 level = INFO
212 handlers =
213 qualname = vcsserver
214 propagate = 1
215 215
216 216 ; ########
217 217 ; HANDLERS
218 218 ; ########
219 219
220 #[handler_console]
221 #class = StreamHandler
222 #args = (sys.stderr, )
223 #level = INFO
220 [handler_console]
221 class = StreamHandler
222 args = (sys.stderr, )
223 level = INFO
224 formatter = generic
224 225 ; To enable JSON formatted logs replace generic with json
225 226 ; This allows sending properly formatted logs to grafana loki or elasticsearch
226 227 #formatter = json
227 #formatter = generic
228 228
229 229 ; ##########
230 230 ; FORMATTERS
231 231 ; ##########
232 232
233 #[formatter_generic]
234 #format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
235 #datefmt = %Y-%m-%d %H:%M:%S
233 [formatter_generic]
234 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
235 datefmt = %Y-%m-%d %H:%M:%S
236 236
237 #[formatter_json]
238 #format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
239 #class = vcsserver.lib._vendor.jsonlogger.JsonFormatter
237 [formatter_json]
238 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
239 class = vcsserver.lib._vendor.jsonlogger.JsonFormatter
General Comments 0
You need to be logged in to leave comments. Login now