##// END OF EJS Templates
configs: added changed require to track logging with loki or logstash
super-admin -
r1019:345b06ac default
parent child Browse files
Show More
@@ -1,247 +1,256 b''
1 ## -*- coding: utf-8 -*-
1 ## -*- coding: utf-8 -*-
2
2
3 ; #################################
3 ; #################################
4 ; RHODECODE VCSSERVER CONFIGURATION
4 ; RHODECODE VCSSERVER CONFIGURATION
5 ; #################################
5 ; #################################
6
6
7 [server:main]
7 [server:main]
8 ; COMMON HOST/IP CONFIG
8 ; COMMON HOST/IP CONFIG
9 host = 0.0.0.0
9 host = 0.0.0.0
10 port = 9900
10 port = 9900
11
11
12 ; ##################################################
12 ; ##################################################
13 ; WAITRESS WSGI SERVER - Recommended for Development
13 ; WAITRESS WSGI SERVER - Recommended for Development
14 ; ##################################################
14 ; ##################################################
15
15
16 ; use server type
16 ; use server type
17 use = egg:waitress#main
17 use = egg:waitress#main
18
18
19 ; number of worker threads
19 ; number of worker threads
20 threads = 5
20 threads = 5
21
21
22 ; MAX BODY SIZE 100GB
22 ; MAX BODY SIZE 100GB
23 max_request_body_size = 107374182400
23 max_request_body_size = 107374182400
24
24
25 ; Use poll instead of select, fixes file descriptors limits problems.
25 ; Use poll instead of select, fixes file descriptors limits problems.
26 ; May not work on old windows systems.
26 ; May not work on old windows systems.
27 asyncore_use_poll = true
27 asyncore_use_poll = true
28
28
29
29
30 ; ###########################
30 ; ###########################
31 ; GUNICORN APPLICATION SERVER
31 ; GUNICORN APPLICATION SERVER
32 ; ###########################
32 ; ###########################
33
33
34 ; run with gunicorn --log-config rhodecode.ini --paste rhodecode.ini
34 ; run with gunicorn --log-config rhodecode.ini --paste rhodecode.ini
35
35
36 ; Module to use, this setting shouldn't be changed
36 ; Module to use, this setting shouldn't be changed
37 #use = egg:gunicorn#main
37 #use = egg:gunicorn#main
38
38
39 ; Sets the number of process workers. More workers means more concurrent connections
39 ; Sets the number of process workers. More workers means more concurrent connections
40 ; RhodeCode can handle at the same time. Each additional worker also it increases
40 ; RhodeCode can handle at the same time. Each additional worker also it increases
41 ; memory usage as each has it's own set of caches.
41 ; memory usage as each has it's own set of caches.
42 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
42 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
43 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
43 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
44 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
44 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
45 ; when using more than 1 worker.
45 ; when using more than 1 worker.
46 #workers = 2
46 #workers = 2
47
47
48 ; Gunicorn access log level
48 ; Gunicorn access log level
49 #loglevel = info
49 #loglevel = info
50
50
51 ; Process name visible in process list
51 ; Process name visible in process list
52 #proc_name = rhodecode_vcsserver
52 #proc_name = rhodecode_vcsserver
53
53
54 ; Type of worker class, one of `sync`, `gevent`
54 ; Type of worker class, one of `sync`, `gevent`
55 ; currently `sync` is the only option allowed.
55 ; currently `sync` is the only option allowed.
56 #worker_class = sync
56 #worker_class = sync
57
57
58 ; The maximum number of simultaneous clients. Valid only for gevent
58 ; The maximum number of simultaneous clients. Valid only for gevent
59 #worker_connections = 10
59 #worker_connections = 10
60
60
61 ; Max number of requests that worker will handle before being gracefully restarted.
61 ; Max number of requests that worker will handle before being gracefully restarted.
62 ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
62 ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
63 #max_requests = 1000
63 #max_requests = 1000
64 #max_requests_jitter = 30
64 #max_requests_jitter = 30
65
65
66 ; Amount of time a worker can spend with handling a request before it
66 ; Amount of time a worker can spend with handling a request before it
67 ; gets killed and restarted. By default set to 21600 (6hrs)
67 ; gets killed and restarted. By default set to 21600 (6hrs)
68 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
68 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
69 #timeout = 21600
69 #timeout = 21600
70
70
71 ; The maximum size of HTTP request line in bytes.
71 ; The maximum size of HTTP request line in bytes.
72 ; 0 for unlimited
72 ; 0 for unlimited
73 #limit_request_line = 0
73 #limit_request_line = 0
74
74
75 ; Limit the number of HTTP headers fields in a request.
75 ; Limit the number of HTTP headers fields in a request.
76 ; By default this value is 100 and can't be larger than 32768.
76 ; By default this value is 100 and can't be larger than 32768.
77 #limit_request_fields = 32768
77 #limit_request_fields = 32768
78
78
79 ; Limit the allowed size of an HTTP request header field.
79 ; Limit the allowed size of an HTTP request header field.
80 ; Value is a positive number or 0.
80 ; Value is a positive number or 0.
81 ; Setting it to 0 will allow unlimited header field sizes.
81 ; Setting it to 0 will allow unlimited header field sizes.
82 #limit_request_field_size = 0
82 #limit_request_field_size = 0
83
83
84 ; Timeout for graceful workers restart.
84 ; Timeout for graceful workers restart.
85 ; After receiving a restart signal, workers have this much time to finish
85 ; After receiving a restart signal, workers have this much time to finish
86 ; serving requests. Workers still alive after the timeout (starting from the
86 ; serving requests. Workers still alive after the timeout (starting from the
87 ; receipt of the restart signal) are force killed.
87 ; receipt of the restart signal) are force killed.
88 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
88 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
89 #graceful_timeout = 3600
89 #graceful_timeout = 3600
90
90
91 # The number of seconds to wait for requests on a Keep-Alive connection.
91 # The number of seconds to wait for requests on a Keep-Alive connection.
92 # Generally set in the 1-5 seconds range.
92 # Generally set in the 1-5 seconds range.
93 #keepalive = 2
93 #keepalive = 2
94
94
95 ; Maximum memory usage that each worker can use before it will receive a
95 ; Maximum memory usage that each worker can use before it will receive a
96 ; graceful restart signal 0 = memory monitoring is disabled
96 ; graceful restart signal 0 = memory monitoring is disabled
97 ; Examples: 268435456 (256MB), 536870912 (512MB)
97 ; Examples: 268435456 (256MB), 536870912 (512MB)
98 ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
98 ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
99 #memory_max_usage = 0
99 #memory_max_usage = 0
100
100
101 ; How often in seconds to check for memory usage for each gunicorn worker
101 ; How often in seconds to check for memory usage for each gunicorn worker
102 #memory_usage_check_interval = 60
102 #memory_usage_check_interval = 60
103
103
104 ; Threshold value for which we don't recycle worker if GarbageCollection
104 ; Threshold value for which we don't recycle worker if GarbageCollection
105 ; frees up enough resources. Before each restart we try to run GC on worker
105 ; frees up enough resources. Before each restart we try to run GC on worker
106 ; in case we get enough free memory after that, restart will not happen.
106 ; in case we get enough free memory after that, restart will not happen.
107 #memory_usage_recovery_threshold = 0.8
107 #memory_usage_recovery_threshold = 0.8
108
108
109
109
110 [app:main]
110 [app:main]
111 ; The %(here)s variable will be replaced with the absolute path of parent directory
111 ; The %(here)s variable will be replaced with the absolute path of parent directory
112 ; of this file
112 ; of this file
113 use = egg:rhodecode-vcsserver
113 use = egg:rhodecode-vcsserver
114
114
115
115
116 ; #############
116 ; #############
117 ; DEBUG OPTIONS
117 ; DEBUG OPTIONS
118 ; #############
118 ; #############
119
119
120 # During development the we want to have the debug toolbar enabled
120 # During development the we want to have the debug toolbar enabled
121 pyramid.includes =
121 pyramid.includes =
122 pyramid_debugtoolbar
122 pyramid_debugtoolbar
123
123
124 debugtoolbar.hosts = 0.0.0.0/0
124 debugtoolbar.hosts = 0.0.0.0/0
125 debugtoolbar.exclude_prefixes =
125 debugtoolbar.exclude_prefixes =
126 /css
126 /css
127 /fonts
127 /fonts
128 /images
128 /images
129 /js
129 /js
130
130
131 ; #################
131 ; #################
132 ; END DEBUG OPTIONS
132 ; END DEBUG OPTIONS
133 ; #################
133 ; #################
134
134
135 ; Pyramid default locales, we need this to be set
135 ; Pyramid default locales, we need this to be set
136 pyramid.default_locale_name = en
136 pyramid.default_locale_name = en
137
137
138 ; default locale used by VCS systems
138 ; default locale used by VCS systems
139 locale = en_US.UTF-8
139 locale = en_US.UTF-8
140
140
141 ; path to binaries for vcsserver, it should be set by the installer
141 ; path to binaries for vcsserver, it should be set by the installer
142 ; at installation time, e.g /home/user/vcsserver-1/profile/bin
142 ; at installation time, e.g /home/user/vcsserver-1/profile/bin
143 ; it can also be a path to nix-build output in case of development
143 ; it can also be a path to nix-build output in case of development
144 core.binary_dir = ""
144 core.binary_dir = ""
145
145
146 ; Custom exception store path, defaults to TMPDIR
146 ; Custom exception store path, defaults to TMPDIR
147 ; This is used to store exception from RhodeCode in shared directory
147 ; This is used to store exception from RhodeCode in shared directory
148 #exception_tracker.store_path =
148 #exception_tracker.store_path =
149
149
150 ; #############
150 ; #############
151 ; DOGPILE CACHE
151 ; DOGPILE CACHE
152 ; #############
152 ; #############
153
153
154 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
154 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
155 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
155 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
156 cache_dir = %(here)s/data
156 cache_dir = %(here)s/data
157
157
158 ; ***************************************
158 ; ***************************************
159 ; `repo_object` cache, default file based
159 ; `repo_object` cache, default file based
160 ; ***************************************
160 ; ***************************************
161
161
162 ; `repo_object` cache settings for vcs methods for repositories
162 ; `repo_object` cache settings for vcs methods for repositories
163 rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
163 rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
164
164
165 ; cache auto-expires after N seconds
165 ; cache auto-expires after N seconds
166 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
166 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
167 rc_cache.repo_object.expiration_time = 2592000
167 rc_cache.repo_object.expiration_time = 2592000
168
168
169 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
169 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
170 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache.db
170 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache.db
171
171
172 ; ***********************************************************
172 ; ***********************************************************
173 ; `repo_object` cache with redis backend
173 ; `repo_object` cache with redis backend
174 ; recommended for larger instance, and for better performance
174 ; recommended for larger instance, and for better performance
175 ; ***********************************************************
175 ; ***********************************************************
176
176
177 ; `repo_object` cache settings for vcs methods for repositories
177 ; `repo_object` cache settings for vcs methods for repositories
178 #rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack
178 #rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack
179
179
180 ; cache auto-expires after N seconds
180 ; cache auto-expires after N seconds
181 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
181 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
182 #rc_cache.repo_object.expiration_time = 2592000
182 #rc_cache.repo_object.expiration_time = 2592000
183
183
184 ; redis_expiration_time needs to be greater then expiration_time
184 ; redis_expiration_time needs to be greater then expiration_time
185 #rc_cache.repo_object.arguments.redis_expiration_time = 3592000
185 #rc_cache.repo_object.arguments.redis_expiration_time = 3592000
186
186
187 #rc_cache.repo_object.arguments.host = localhost
187 #rc_cache.repo_object.arguments.host = localhost
188 #rc_cache.repo_object.arguments.port = 6379
188 #rc_cache.repo_object.arguments.port = 6379
189 #rc_cache.repo_object.arguments.db = 5
189 #rc_cache.repo_object.arguments.db = 5
190 #rc_cache.repo_object.arguments.socket_timeout = 30
190 #rc_cache.repo_object.arguments.socket_timeout = 30
191 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
191 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
192 #rc_cache.repo_object.arguments.distributed_lock = true
192 #rc_cache.repo_object.arguments.distributed_lock = true
193
193
194 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
194 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
195 #rc_cache.repo_object.arguments.lock_auto_renewal = true
195 #rc_cache.repo_object.arguments.lock_auto_renewal = true
196
196
197 ; Statsd client config, this is used to send metrics to statsd
197 ; Statsd client config, this is used to send metrics to statsd
198 ; We recommend setting statsd_exported and scrape them using Promethues
198 ; We recommend setting statsd_exported and scrape them using Promethues
199 #statsd.enabled = false
199 #statsd.enabled = false
200 #statsd.statsd_host = 0.0.0.0
200 #statsd.statsd_host = 0.0.0.0
201 #statsd.statsd_port = 8125
201 #statsd.statsd_port = 8125
202 #statsd.statsd_prefix =
202 #statsd.statsd_prefix =
203 #statsd.statsd_ipv6 = false
203 #statsd.statsd_ipv6 = false
204
204
205 ; #####################
205 ; #####################
206 ; LOGGING CONFIGURATION
206 ; LOGGING CONFIGURATION
207 ; #####################
207 ; #####################
208 [loggers]
208 [loggers]
209 keys = root, vcsserver
209 keys = root, vcsserver
210
210
211 [handlers]
211 [handlers]
212 keys = console
212 keys = console
213
213
214 [formatters]
214 [formatters]
215 keys = generic
215 keys = generic
216
216
217 ; #######
217 ; #######
218 ; LOGGERS
218 ; LOGGERS
219 ; #######
219 ; #######
220 [logger_root]
220 [logger_root]
221 level = NOTSET
221 level = NOTSET
222 handlers = console
222 handlers = console
223
223
224 [logger_vcsserver]
224 [logger_vcsserver]
225 level = DEBUG
225 level = DEBUG
226 handlers =
226 handlers =
227 qualname = vcsserver
227 qualname = vcsserver
228 propagate = 1
228 propagate = 1
229
229
230
230
231 ; ########
231 ; ########
232 ; HANDLERS
232 ; HANDLERS
233 ; ########
233 ; ########
234
234
235 [handler_console]
235 [handler_console]
236 class = StreamHandler
236 class = StreamHandler
237 args = (sys.stderr, )
237 args = (sys.stderr, )
238 level = DEBUG
238 level = DEBUG
239 formatter = generic
239 formatter = generic
240 ; To enable JSON formatted logs replace generic with json
241 ; This allows sending properly formatted logs to grafana loki or elasticsearch
242 #formatter = json
243
240
244
241 ; ##########
245 ; ##########
242 ; FORMATTERS
246 ; FORMATTERS
243 ; ##########
247 ; ##########
244
248
245 [formatter_generic]
249 [formatter_generic]
246 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
250 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
247 datefmt = %Y-%m-%d %H:%M:%S
251 datefmt = %Y-%m-%d %H:%M:%S
252
253 [formatter_json]
254 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
255 class = vcsserver.lib._vendor.jsonlogger.JsonFormatter
256
@@ -1,265 +1,269 b''
1 """
1 """
2 Gunicorn config extension and hooks. This config file adds some extra settings and memory management.
2 Gunicorn config extension and hooks. This config file adds some extra settings and memory management.
3 Gunicorn configuration should be managed by .ini files entries of RhodeCode or VCSServer
3 Gunicorn configuration should be managed by .ini files entries of RhodeCode or VCSServer
4 """
4 """
5
5
6 import gc
6 import gc
7 import os
7 import os
8 import sys
8 import sys
9 import math
9 import math
10 import time
10 import time
11 import threading
11 import threading
12 import traceback
12 import traceback
13 import random
13 import random
14 from gunicorn.glogging import Logger
14 from gunicorn.glogging import Logger
15
15
16
16
17 def get_workers():
17 def get_workers():
18 import multiprocessing
18 import multiprocessing
19 return multiprocessing.cpu_count() * 2 + 1
19 return multiprocessing.cpu_count() * 2 + 1
20
20
21 # GLOBAL
21 # GLOBAL
22 errorlog = '-'
22 errorlog = '-'
23 accesslog = '-'
23 accesslog = '-'
24
24
25
25
26 # SERVER MECHANICS
26 # SERVER MECHANICS
27 # None == system temp dir
27 # None == system temp dir
28 # worker_tmp_dir is recommended to be set to some tmpfs
28 # worker_tmp_dir is recommended to be set to some tmpfs
29 worker_tmp_dir = None
29 worker_tmp_dir = None
30 tmp_upload_dir = None
30 tmp_upload_dir = None
31
31
32 # Custom log format
32 # Custom log format
33 access_log_format = (
33 access_log_format = (
34 '%(t)s %(p)s INFO [GNCRN] %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"')
34 '%(t)s %(p)s INFO [GNCRN] %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"')
35
35
36 # loki format for easier parsing in grafana
37 #access_log_format = (
38 # 'time="%(t)s" pid=%(p)s level="INFO" type="[GNCRN]" ip="%(h)-15s" rqt="%(L)s" response_code="%(s)s" response_bytes="%(b)-6s" uri="%(m)s:%(U)s %(q)s" user=":%(u)s" user_agent="%(a)s"')
39
36 # self adjust workers based on CPU count
40 # self adjust workers based on CPU count
37 # workers = get_workers()
41 # workers = get_workers()
38
42
39
43
40 def _get_process_rss(pid=None):
44 def _get_process_rss(pid=None):
41 try:
45 try:
42 import psutil
46 import psutil
43 if pid:
47 if pid:
44 proc = psutil.Process(pid)
48 proc = psutil.Process(pid)
45 else:
49 else:
46 proc = psutil.Process()
50 proc = psutil.Process()
47 return proc.memory_info().rss
51 return proc.memory_info().rss
48 except Exception:
52 except Exception:
49 return None
53 return None
50
54
51
55
52 def _get_config(ini_path):
56 def _get_config(ini_path):
53
57
54 try:
58 try:
55 import configparser
59 import configparser
56 except ImportError:
60 except ImportError:
57 import ConfigParser as configparser
61 import ConfigParser as configparser
58 try:
62 try:
59 config = configparser.RawConfigParser()
63 config = configparser.RawConfigParser()
60 config.read(ini_path)
64 config.read(ini_path)
61 return config
65 return config
62 except Exception:
66 except Exception:
63 return None
67 return None
64
68
65
69
66 def _time_with_offset(memory_usage_check_interval):
70 def _time_with_offset(memory_usage_check_interval):
67 return time.time() - random.randint(0, memory_usage_check_interval/2.0)
71 return time.time() - random.randint(0, memory_usage_check_interval/2.0)
68
72
69
73
70 def pre_fork(server, worker):
74 def pre_fork(server, worker):
71 pass
75 pass
72
76
73
77
74 def post_fork(server, worker):
78 def post_fork(server, worker):
75
79
76 # memory spec defaults
80 # memory spec defaults
77 _memory_max_usage = 0
81 _memory_max_usage = 0
78 _memory_usage_check_interval = 60
82 _memory_usage_check_interval = 60
79 _memory_usage_recovery_threshold = 0.8
83 _memory_usage_recovery_threshold = 0.8
80
84
81 ini_path = os.path.abspath(server.cfg.paste)
85 ini_path = os.path.abspath(server.cfg.paste)
82 conf = _get_config(ini_path)
86 conf = _get_config(ini_path)
83
87
84 section = 'server:main'
88 section = 'server:main'
85 if conf and conf.has_section(section):
89 if conf and conf.has_section(section):
86
90
87 if conf.has_option(section, 'memory_max_usage'):
91 if conf.has_option(section, 'memory_max_usage'):
88 _memory_max_usage = conf.getint(section, 'memory_max_usage')
92 _memory_max_usage = conf.getint(section, 'memory_max_usage')
89
93
90 if conf.has_option(section, 'memory_usage_check_interval'):
94 if conf.has_option(section, 'memory_usage_check_interval'):
91 _memory_usage_check_interval = conf.getint(section, 'memory_usage_check_interval')
95 _memory_usage_check_interval = conf.getint(section, 'memory_usage_check_interval')
92
96
93 if conf.has_option(section, 'memory_usage_recovery_threshold'):
97 if conf.has_option(section, 'memory_usage_recovery_threshold'):
94 _memory_usage_recovery_threshold = conf.getfloat(section, 'memory_usage_recovery_threshold')
98 _memory_usage_recovery_threshold = conf.getfloat(section, 'memory_usage_recovery_threshold')
95
99
96 worker._memory_max_usage = _memory_max_usage
100 worker._memory_max_usage = _memory_max_usage
97 worker._memory_usage_check_interval = _memory_usage_check_interval
101 worker._memory_usage_check_interval = _memory_usage_check_interval
98 worker._memory_usage_recovery_threshold = _memory_usage_recovery_threshold
102 worker._memory_usage_recovery_threshold = _memory_usage_recovery_threshold
99
103
100 # register memory last check time, with some random offset so we don't recycle all
104 # register memory last check time, with some random offset so we don't recycle all
101 # at once
105 # at once
102 worker._last_memory_check_time = _time_with_offset(_memory_usage_check_interval)
106 worker._last_memory_check_time = _time_with_offset(_memory_usage_check_interval)
103
107
104 if _memory_max_usage:
108 if _memory_max_usage:
105 server.log.info("[%-10s] WORKER spawned with max memory set at %s", worker.pid,
109 server.log.info("[%-10s] WORKER spawned with max memory set at %s", worker.pid,
106 _format_data_size(_memory_max_usage))
110 _format_data_size(_memory_max_usage))
107 else:
111 else:
108 server.log.info("[%-10s] WORKER spawned", worker.pid)
112 server.log.info("[%-10s] WORKER spawned", worker.pid)
109
113
110
114
111 def pre_exec(server):
115 def pre_exec(server):
112 server.log.info("Forked child, re-executing.")
116 server.log.info("Forked child, re-executing.")
113
117
114
118
115 def on_starting(server):
119 def on_starting(server):
116 server_lbl = '{} {}'.format(server.proc_name, server.address)
120 server_lbl = '{} {}'.format(server.proc_name, server.address)
117 server.log.info("Server %s is starting.", server_lbl)
121 server.log.info("Server %s is starting.", server_lbl)
118
122
119
123
120 def when_ready(server):
124 def when_ready(server):
121 server.log.info("Server %s is ready. Spawning workers", server)
125 server.log.info("Server %s is ready. Spawning workers", server)
122
126
123
127
124 def on_reload(server):
128 def on_reload(server):
125 pass
129 pass
126
130
127
131
128 def _format_data_size(size, unit="B", precision=1, binary=True):
132 def _format_data_size(size, unit="B", precision=1, binary=True):
129 """Format a number using SI units (kilo, mega, etc.).
133 """Format a number using SI units (kilo, mega, etc.).
130
134
131 ``size``: The number as a float or int.
135 ``size``: The number as a float or int.
132
136
133 ``unit``: The unit name in plural form. Examples: "bytes", "B".
137 ``unit``: The unit name in plural form. Examples: "bytes", "B".
134
138
135 ``precision``: How many digits to the right of the decimal point. Default
139 ``precision``: How many digits to the right of the decimal point. Default
136 is 1. 0 suppresses the decimal point.
140 is 1. 0 suppresses the decimal point.
137
141
138 ``binary``: If false, use base-10 decimal prefixes (kilo = K = 1000).
142 ``binary``: If false, use base-10 decimal prefixes (kilo = K = 1000).
139 If true, use base-2 binary prefixes (kibi = Ki = 1024).
143 If true, use base-2 binary prefixes (kibi = Ki = 1024).
140
144
141 ``full_name``: If false (default), use the prefix abbreviation ("k" or
145 ``full_name``: If false (default), use the prefix abbreviation ("k" or
142 "Ki"). If true, use the full prefix ("kilo" or "kibi"). If false,
146 "Ki"). If true, use the full prefix ("kilo" or "kibi"). If false,
143 use abbreviation ("k" or "Ki").
147 use abbreviation ("k" or "Ki").
144
148
145 """
149 """
146
150
147 if not binary:
151 if not binary:
148 base = 1000
152 base = 1000
149 multiples = ('', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
153 multiples = ('', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
150 else:
154 else:
151 base = 1024
155 base = 1024
152 multiples = ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi')
156 multiples = ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi')
153
157
154 sign = ""
158 sign = ""
155 if size > 0:
159 if size > 0:
156 m = int(math.log(size, base))
160 m = int(math.log(size, base))
157 elif size < 0:
161 elif size < 0:
158 sign = "-"
162 sign = "-"
159 size = -size
163 size = -size
160 m = int(math.log(size, base))
164 m = int(math.log(size, base))
161 else:
165 else:
162 m = 0
166 m = 0
163 if m > 8:
167 if m > 8:
164 m = 8
168 m = 8
165
169
166 if m == 0:
170 if m == 0:
167 precision = '%.0f'
171 precision = '%.0f'
168 else:
172 else:
169 precision = '%%.%df' % precision
173 precision = '%%.%df' % precision
170
174
171 size = precision % (size / math.pow(base, m))
175 size = precision % (size / math.pow(base, m))
172
176
173 return '%s%s %s%s' % (sign, size.strip(), multiples[m], unit)
177 return '%s%s %s%s' % (sign, size.strip(), multiples[m], unit)
174
178
175
179
176 def _check_memory_usage(worker):
180 def _check_memory_usage(worker):
177 memory_max_usage = worker._memory_max_usage
181 memory_max_usage = worker._memory_max_usage
178 if not memory_max_usage:
182 if not memory_max_usage:
179 return
183 return
180
184
181 memory_usage_check_interval = worker._memory_usage_check_interval
185 memory_usage_check_interval = worker._memory_usage_check_interval
182 memory_usage_recovery_threshold = memory_max_usage * worker._memory_usage_recovery_threshold
186 memory_usage_recovery_threshold = memory_max_usage * worker._memory_usage_recovery_threshold
183
187
184 elapsed = time.time() - worker._last_memory_check_time
188 elapsed = time.time() - worker._last_memory_check_time
185 if elapsed > memory_usage_check_interval:
189 if elapsed > memory_usage_check_interval:
186 mem_usage = _get_process_rss()
190 mem_usage = _get_process_rss()
187 if mem_usage and mem_usage > memory_max_usage:
191 if mem_usage and mem_usage > memory_max_usage:
188 worker.log.info(
192 worker.log.info(
189 "memory usage %s > %s, forcing gc",
193 "memory usage %s > %s, forcing gc",
190 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
194 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
191 # Try to clean it up by forcing a full collection.
195 # Try to clean it up by forcing a full collection.
192 gc.collect()
196 gc.collect()
193 mem_usage = _get_process_rss()
197 mem_usage = _get_process_rss()
194 if mem_usage > memory_usage_recovery_threshold:
198 if mem_usage > memory_usage_recovery_threshold:
195 # Didn't clean up enough, we'll have to terminate.
199 # Didn't clean up enough, we'll have to terminate.
196 worker.log.warning(
200 worker.log.warning(
197 "memory usage %s > %s after gc, quitting",
201 "memory usage %s > %s after gc, quitting",
198 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
202 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
199 # This will cause worker to auto-restart itself
203 # This will cause worker to auto-restart itself
200 worker.alive = False
204 worker.alive = False
201 worker._last_memory_check_time = time.time()
205 worker._last_memory_check_time = time.time()
202
206
203
207
204 def worker_int(worker):
208 def worker_int(worker):
205 worker.log.info("[%-10s] worker received INT or QUIT signal", worker.pid)
209 worker.log.info("[%-10s] worker received INT or QUIT signal", worker.pid)
206
210
207 # get traceback info, on worker crash
211 # get traceback info, on worker crash
208 id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
212 id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
209 code = []
213 code = []
210 for thread_id, stack in sys._current_frames().items():
214 for thread_id, stack in sys._current_frames().items():
211 code.append(
215 code.append(
212 "\n# Thread: %s(%d)" % (id2name.get(thread_id, ""), thread_id))
216 "\n# Thread: %s(%d)" % (id2name.get(thread_id, ""), thread_id))
213 for fname, lineno, name, line in traceback.extract_stack(stack):
217 for fname, lineno, name, line in traceback.extract_stack(stack):
214 code.append('File: "%s", line %d, in %s' % (fname, lineno, name))
218 code.append('File: "%s", line %d, in %s' % (fname, lineno, name))
215 if line:
219 if line:
216 code.append(" %s" % (line.strip()))
220 code.append(" %s" % (line.strip()))
217 worker.log.debug("\n".join(code))
221 worker.log.debug("\n".join(code))
218
222
219
223
220 def worker_abort(worker):
224 def worker_abort(worker):
221 worker.log.info("[%-10s] worker received SIGABRT signal", worker.pid)
225 worker.log.info("[%-10s] worker received SIGABRT signal", worker.pid)
222
226
223
227
224 def worker_exit(server, worker):
228 def worker_exit(server, worker):
225 worker.log.info("[%-10s] worker exit", worker.pid)
229 worker.log.info("[%-10s] worker exit", worker.pid)
226
230
227
231
228 def child_exit(server, worker):
232 def child_exit(server, worker):
229 worker.log.info("[%-10s] worker child exit", worker.pid)
233 worker.log.info("[%-10s] worker child exit", worker.pid)
230
234
231
235
232 def pre_request(worker, req):
236 def pre_request(worker, req):
233 worker.start_time = time.time()
237 worker.start_time = time.time()
234 worker.log.debug(
238 worker.log.debug(
235 "GNCRN PRE WORKER [cnt:%s]: %s %s", worker.nr, req.method, req.path)
239 "GNCRN PRE WORKER [cnt:%s]: %s %s", worker.nr, req.method, req.path)
236
240
237
241
238 def post_request(worker, req, environ, resp):
242 def post_request(worker, req, environ, resp):
239 total_time = time.time() - worker.start_time
243 total_time = time.time() - worker.start_time
240 # Gunicorn sometimes has problems with reading the status_code
244 # Gunicorn sometimes has problems with reading the status_code
241 status_code = getattr(resp, 'status_code', '')
245 status_code = getattr(resp, 'status_code', '')
242 worker.log.debug(
246 worker.log.debug(
243 "GNCRN POST WORKER [cnt:%s]: %s %s resp: %s, Load Time: %.4fs",
247 "GNCRN POST WORKER [cnt:%s]: %s %s resp: %s, Load Time: %.4fs",
244 worker.nr, req.method, req.path, status_code, total_time)
248 worker.nr, req.method, req.path, status_code, total_time)
245 _check_memory_usage(worker)
249 _check_memory_usage(worker)
246
250
247
251
248 class RhodeCodeLogger(Logger):
252 class RhodeCodeLogger(Logger):
249 """
253 """
250 Custom Logger that allows some customization that gunicorn doesn't allow
254 Custom Logger that allows some customization that gunicorn doesn't allow
251 """
255 """
252
256
253 datefmt = r"%Y-%m-%d %H:%M:%S"
257 datefmt = r"%Y-%m-%d %H:%M:%S"
254
258
255 def __init__(self, cfg):
259 def __init__(self, cfg):
256 Logger.__init__(self, cfg)
260 Logger.__init__(self, cfg)
257
261
258 def now(self):
262 def now(self):
259 """ return date in RhodeCode Log format """
263 """ return date in RhodeCode Log format """
260 now = time.time()
264 now = time.time()
261 msecs = int((now - long(now)) * 1000)
265 msecs = int((now - long(now)) * 1000)
262 return time.strftime(self.datefmt, time.localtime(now)) + '.{0:03d}'.format(msecs)
266 return time.strftime(self.datefmt, time.localtime(now)) + '.{0:03d}'.format(msecs)
263
267
264
268
265 logger_class = RhodeCodeLogger
269 logger_class = RhodeCodeLogger
@@ -1,210 +1,219 b''
1 ## -*- coding: utf-8 -*-
1 ## -*- coding: utf-8 -*-
2
2
3 ; #################################
3 ; #################################
4 ; RHODECODE VCSSERVER CONFIGURATION
4 ; RHODECODE VCSSERVER CONFIGURATION
5 ; #################################
5 ; #################################
6
6
7 [server:main]
7 [server:main]
8 ; COMMON HOST/IP CONFIG
8 ; COMMON HOST/IP CONFIG
9 host = 127.0.0.1
9 host = 127.0.0.1
10 port = 9900
10 port = 9900
11
11
12
12
13 ; ###########################
13 ; ###########################
14 ; GUNICORN APPLICATION SERVER
14 ; GUNICORN APPLICATION SERVER
15 ; ###########################
15 ; ###########################
16
16
17 ; run with gunicorn --log-config rhodecode.ini --paste rhodecode.ini
17 ; run with gunicorn --log-config rhodecode.ini --paste rhodecode.ini
18
18
19 ; Module to use, this setting shouldn't be changed
19 ; Module to use, this setting shouldn't be changed
20 use = egg:gunicorn#main
20 use = egg:gunicorn#main
21
21
22 ; Sets the number of process workers. More workers means more concurrent connections
22 ; Sets the number of process workers. More workers means more concurrent connections
23 ; RhodeCode can handle at the same time. Each additional worker also it increases
23 ; RhodeCode can handle at the same time. Each additional worker also it increases
24 ; memory usage as each has it's own set of caches.
24 ; memory usage as each has it's own set of caches.
25 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
25 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
26 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
26 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
27 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
27 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
28 ; when using more than 1 worker.
28 ; when using more than 1 worker.
29 workers = 2
29 workers = 2
30
30
31 ; Gunicorn access log level
31 ; Gunicorn access log level
32 loglevel = info
32 loglevel = info
33
33
34 ; Process name visible in process list
34 ; Process name visible in process list
35 proc_name = rhodecode_vcsserver
35 proc_name = rhodecode_vcsserver
36
36
37 ; Type of worker class, one of `sync`, `gevent`
37 ; Type of worker class, one of `sync`, `gevent`
38 ; currently `sync` is the only option allowed.
38 ; currently `sync` is the only option allowed.
39 worker_class = sync
39 worker_class = sync
40
40
41 ; The maximum number of simultaneous clients. Valid only for gevent
41 ; The maximum number of simultaneous clients. Valid only for gevent
42 worker_connections = 10
42 worker_connections = 10
43
43
44 ; Max number of requests that worker will handle before being gracefully restarted.
44 ; Max number of requests that worker will handle before being gracefully restarted.
45 ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
45 ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
46 max_requests = 1000
46 max_requests = 1000
47 max_requests_jitter = 30
47 max_requests_jitter = 30
48
48
49 ; Amount of time a worker can spend with handling a request before it
49 ; Amount of time a worker can spend with handling a request before it
50 ; gets killed and restarted. By default set to 21600 (6hrs)
50 ; gets killed and restarted. By default set to 21600 (6hrs)
51 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
51 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
52 timeout = 21600
52 timeout = 21600
53
53
54 ; The maximum size of HTTP request line in bytes.
54 ; The maximum size of HTTP request line in bytes.
55 ; 0 for unlimited
55 ; 0 for unlimited
56 limit_request_line = 0
56 limit_request_line = 0
57
57
58 ; Limit the number of HTTP headers fields in a request.
58 ; Limit the number of HTTP headers fields in a request.
59 ; By default this value is 100 and can't be larger than 32768.
59 ; By default this value is 100 and can't be larger than 32768.
60 limit_request_fields = 32768
60 limit_request_fields = 32768
61
61
62 ; Limit the allowed size of an HTTP request header field.
62 ; Limit the allowed size of an HTTP request header field.
63 ; Value is a positive number or 0.
63 ; Value is a positive number or 0.
64 ; Setting it to 0 will allow unlimited header field sizes.
64 ; Setting it to 0 will allow unlimited header field sizes.
65 limit_request_field_size = 0
65 limit_request_field_size = 0
66
66
67 ; Timeout for graceful workers restart.
67 ; Timeout for graceful workers restart.
68 ; After receiving a restart signal, workers have this much time to finish
68 ; After receiving a restart signal, workers have this much time to finish
69 ; serving requests. Workers still alive after the timeout (starting from the
69 ; serving requests. Workers still alive after the timeout (starting from the
70 ; receipt of the restart signal) are force killed.
70 ; receipt of the restart signal) are force killed.
71 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
71 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
72 graceful_timeout = 3600
72 graceful_timeout = 3600
73
73
74 # The number of seconds to wait for requests on a Keep-Alive connection.
74 # The number of seconds to wait for requests on a Keep-Alive connection.
75 # Generally set in the 1-5 seconds range.
75 # Generally set in the 1-5 seconds range.
76 keepalive = 2
76 keepalive = 2
77
77
78 ; Maximum memory usage that each worker can use before it will receive a
78 ; Maximum memory usage that each worker can use before it will receive a
79 ; graceful restart signal 0 = memory monitoring is disabled
79 ; graceful restart signal 0 = memory monitoring is disabled
80 ; Examples: 268435456 (256MB), 536870912 (512MB)
80 ; Examples: 268435456 (256MB), 536870912 (512MB)
81 ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
81 ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
82 memory_max_usage = 0
82 memory_max_usage = 0
83
83
84 ; How often in seconds to check for memory usage for each gunicorn worker
84 ; How often in seconds to check for memory usage for each gunicorn worker
85 memory_usage_check_interval = 60
85 memory_usage_check_interval = 60
86
86
87 ; Threshold value for which we don't recycle worker if GarbageCollection
87 ; Threshold value for which we don't recycle worker if GarbageCollection
88 ; frees up enough resources. Before each restart we try to run GC on worker
88 ; frees up enough resources. Before each restart we try to run GC on worker
89 ; in case we get enough free memory after that, restart will not happen.
89 ; in case we get enough free memory after that, restart will not happen.
90 memory_usage_recovery_threshold = 0.8
90 memory_usage_recovery_threshold = 0.8
91
91
92
92
93 [app:main]
93 [app:main]
94 ; The %(here)s variable will be replaced with the absolute path of parent directory
94 ; The %(here)s variable will be replaced with the absolute path of parent directory
95 ; of this file
95 ; of this file
96 use = egg:rhodecode-vcsserver
96 use = egg:rhodecode-vcsserver
97
97
98 ; Pyramid default locales, we need this to be set
98 ; Pyramid default locales, we need this to be set
99 pyramid.default_locale_name = en
99 pyramid.default_locale_name = en
100
100
101 ; default locale used by VCS systems
101 ; default locale used by VCS systems
102 locale = en_US.UTF-8
102 locale = en_US.UTF-8
103
103
104 ; path to binaries for vcsserver, it should be set by the installer
104 ; path to binaries for vcsserver, it should be set by the installer
105 ; at installation time, e.g /home/user/vcsserver-1/profile/bin
105 ; at installation time, e.g /home/user/vcsserver-1/profile/bin
106 ; it can also be a path to nix-build output in case of development
106 ; it can also be a path to nix-build output in case of development
107 core.binary_dir = ""
107 core.binary_dir = ""
108
108
109 ; Custom exception store path, defaults to TMPDIR
109 ; Custom exception store path, defaults to TMPDIR
110 ; This is used to store exception from RhodeCode in shared directory
110 ; This is used to store exception from RhodeCode in shared directory
111 #exception_tracker.store_path =
111 #exception_tracker.store_path =
112
112
113 ; #############
113 ; #############
114 ; DOGPILE CACHE
114 ; DOGPILE CACHE
115 ; #############
115 ; #############
116
116
117 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
117 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
118 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
118 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
119 cache_dir = %(here)s/data
119 cache_dir = %(here)s/data
120
120
121 ; ***************************************
121 ; ***************************************
122 ; `repo_object` cache, default file based
122 ; `repo_object` cache, default file based
123 ; ***************************************
123 ; ***************************************
124
124
125 ; `repo_object` cache settings for vcs methods for repositories
125 ; `repo_object` cache settings for vcs methods for repositories
126 rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
126 rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
127
127
128 ; cache auto-expires after N seconds
128 ; cache auto-expires after N seconds
129 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
129 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
130 rc_cache.repo_object.expiration_time = 2592000
130 rc_cache.repo_object.expiration_time = 2592000
131
131
132 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
132 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
133 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache.db
133 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache.db
134
134
135 ; ***********************************************************
135 ; ***********************************************************
136 ; `repo_object` cache with redis backend
136 ; `repo_object` cache with redis backend
137 ; recommended for larger instance, and for better performance
137 ; recommended for larger instance, and for better performance
138 ; ***********************************************************
138 ; ***********************************************************
139
139
140 ; `repo_object` cache settings for vcs methods for repositories
140 ; `repo_object` cache settings for vcs methods for repositories
141 #rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack
141 #rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack
142
142
143 ; cache auto-expires after N seconds
143 ; cache auto-expires after N seconds
144 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
144 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
145 #rc_cache.repo_object.expiration_time = 2592000
145 #rc_cache.repo_object.expiration_time = 2592000
146
146
147 ; redis_expiration_time needs to be greater then expiration_time
147 ; redis_expiration_time needs to be greater then expiration_time
148 #rc_cache.repo_object.arguments.redis_expiration_time = 3592000
148 #rc_cache.repo_object.arguments.redis_expiration_time = 3592000
149
149
150 #rc_cache.repo_object.arguments.host = localhost
150 #rc_cache.repo_object.arguments.host = localhost
151 #rc_cache.repo_object.arguments.port = 6379
151 #rc_cache.repo_object.arguments.port = 6379
152 #rc_cache.repo_object.arguments.db = 5
152 #rc_cache.repo_object.arguments.db = 5
153 #rc_cache.repo_object.arguments.socket_timeout = 30
153 #rc_cache.repo_object.arguments.socket_timeout = 30
154 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
154 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
155 #rc_cache.repo_object.arguments.distributed_lock = true
155 #rc_cache.repo_object.arguments.distributed_lock = true
156
156
157 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
157 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
158 #rc_cache.repo_object.arguments.lock_auto_renewal = true
158 #rc_cache.repo_object.arguments.lock_auto_renewal = true
159
159
160 ; Statsd client config, this is used to send metrics to statsd
160 ; Statsd client config, this is used to send metrics to statsd
161 ; We recommend setting statsd_exported and scrape them using Promethues
161 ; We recommend setting statsd_exported and scrape them using Promethues
162 #statsd.enabled = false
162 #statsd.enabled = false
163 #statsd.statsd_host = 0.0.0.0
163 #statsd.statsd_host = 0.0.0.0
164 #statsd.statsd_port = 8125
164 #statsd.statsd_port = 8125
165 #statsd.statsd_prefix =
165 #statsd.statsd_prefix =
166 #statsd.statsd_ipv6 = false
166 #statsd.statsd_ipv6 = false
167
167
168 ; #####################
168 ; #####################
169 ; LOGGING CONFIGURATION
169 ; LOGGING CONFIGURATION
170 ; #####################
170 ; #####################
171 [loggers]
171 [loggers]
172 keys = root, vcsserver
172 keys = root, vcsserver
173
173
174 [handlers]
174 [handlers]
175 keys = console
175 keys = console
176
176
177 [formatters]
177 [formatters]
178 keys = generic
178 keys = generic
179
179
180 ; #######
180 ; #######
181 ; LOGGERS
181 ; LOGGERS
182 ; #######
182 ; #######
183 [logger_root]
183 [logger_root]
184 level = NOTSET
184 level = NOTSET
185 handlers = console
185 handlers = console
186
186
187 [logger_vcsserver]
187 [logger_vcsserver]
188 level = DEBUG
188 level = DEBUG
189 handlers =
189 handlers =
190 qualname = vcsserver
190 qualname = vcsserver
191 propagate = 1
191 propagate = 1
192
192
193
193
194 ; ########
194 ; ########
195 ; HANDLERS
195 ; HANDLERS
196 ; ########
196 ; ########
197
197
198 [handler_console]
198 [handler_console]
199 class = StreamHandler
199 class = StreamHandler
200 args = (sys.stderr, )
200 args = (sys.stderr, )
201 level = INFO
201 level = INFO
202 formatter = generic
202 formatter = generic
203 ; To enable JSON formatted logs replace generic with json
204 ; This allows sending properly formatted logs to grafana loki or elasticsearch
205 #formatter = json
206
203
207
204 ; ##########
208 ; ##########
205 ; FORMATTERS
209 ; FORMATTERS
206 ; ##########
210 ; ##########
207
211
208 [formatter_generic]
212 [formatter_generic]
209 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
213 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
210 datefmt = %Y-%m-%d %H:%M:%S
214 datefmt = %Y-%m-%d %H:%M:%S
215
216 [formatter_json]
217 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
218 class = vcsserver.lib._vendor.jsonlogger.JsonFormatter
219
General Comments 0
You need to be logged in to leave comments. Login now