##// END OF EJS Templates
config: updated configs to still not comment out logging configuration....
super-admin -
r1024:8705056f default
parent child Browse files
Show More
@@ -1,276 +1,276 b''
1 ## -*- coding: utf-8 -*-
1 ## -*- coding: utf-8 -*-
2
2
3 ; #################################
3 ; #################################
4 ; RHODECODE VCSSERVER CONFIGURATION
4 ; RHODECODE VCSSERVER CONFIGURATION
5 ; #################################
5 ; #################################
6
6
7 [server:main]
7 [server:main]
8 ; COMMON HOST/IP CONFIG
8 ; COMMON HOST/IP CONFIG
9 host = 0.0.0.0
9 host = 0.0.0.0
10 port = 9900
10 port = 9900
11
11
12 ; ##################################################
12 ; ##################################################
13 ; WAITRESS WSGI SERVER - Recommended for Development
13 ; WAITRESS WSGI SERVER - Recommended for Development
14 ; ##################################################
14 ; ##################################################
15
15
16 ; use server type
16 ; use server type
17 use = egg:waitress#main
17 use = egg:waitress#main
18
18
19 ; number of worker threads
19 ; number of worker threads
20 threads = 5
20 threads = 5
21
21
22 ; MAX BODY SIZE 100GB
22 ; MAX BODY SIZE 100GB
23 max_request_body_size = 107374182400
23 max_request_body_size = 107374182400
24
24
25 ; Use poll instead of select, fixes file descriptors limits problems.
25 ; Use poll instead of select, fixes file descriptors limits problems.
26 ; May not work on old windows systems.
26 ; May not work on old windows systems.
27 asyncore_use_poll = true
27 asyncore_use_poll = true
28
28
29
29
30 ; ###########################
30 ; ###########################
31 ; GUNICORN APPLICATION SERVER
31 ; GUNICORN APPLICATION SERVER
32 ; ###########################
32 ; ###########################
33
33
34 ; run with gunicorn --paste rhodecode.ini
34 ; run with gunicorn --paste rhodecode.ini
35
35
36 ; Module to use, this setting shouldn't be changed
36 ; Module to use, this setting shouldn't be changed
37 #use = egg:gunicorn#main
37 #use = egg:gunicorn#main
38
38
39 ; Sets the number of process workers. More workers means more concurrent connections
39 ; Sets the number of process workers. More workers means more concurrent connections
40 ; RhodeCode can handle at the same time. Each additional worker also it increases
40 ; RhodeCode can handle at the same time. Each additional worker also it increases
41 ; memory usage as each has it's own set of caches.
41 ; memory usage as each has it's own set of caches.
42 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
42 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
43 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
43 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
44 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
44 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
45 ; when using more than 1 worker.
45 ; when using more than 1 worker.
46 #workers = 2
46 #workers = 2
47
47
48 ; Gunicorn access log level
48 ; Gunicorn access log level
49 #loglevel = info
49 #loglevel = info
50
50
51 ; Process name visible in process list
51 ; Process name visible in process list
52 #proc_name = rhodecode_vcsserver
52 #proc_name = rhodecode_vcsserver
53
53
54 ; Type of worker class, one of `sync`, `gevent`
54 ; Type of worker class, one of `sync`, `gevent`
55 ; currently `sync` is the only option allowed.
55 ; currently `sync` is the only option allowed.
56 #worker_class = sync
56 #worker_class = sync
57
57
58 ; The maximum number of simultaneous clients. Valid only for gevent
58 ; The maximum number of simultaneous clients. Valid only for gevent
59 #worker_connections = 10
59 #worker_connections = 10
60
60
61 ; Max number of requests that worker will handle before being gracefully restarted.
61 ; Max number of requests that worker will handle before being gracefully restarted.
62 ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
62 ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
63 #max_requests = 1000
63 #max_requests = 1000
64 #max_requests_jitter = 30
64 #max_requests_jitter = 30
65
65
66 ; Amount of time a worker can spend with handling a request before it
66 ; Amount of time a worker can spend with handling a request before it
67 ; gets killed and restarted. By default set to 21600 (6hrs)
67 ; gets killed and restarted. By default set to 21600 (6hrs)
68 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
68 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
69 #timeout = 21600
69 #timeout = 21600
70
70
71 ; The maximum size of HTTP request line in bytes.
71 ; The maximum size of HTTP request line in bytes.
72 ; 0 for unlimited
72 ; 0 for unlimited
73 #limit_request_line = 0
73 #limit_request_line = 0
74
74
75 ; Limit the number of HTTP headers fields in a request.
75 ; Limit the number of HTTP headers fields in a request.
76 ; By default this value is 100 and can't be larger than 32768.
76 ; By default this value is 100 and can't be larger than 32768.
77 #limit_request_fields = 32768
77 #limit_request_fields = 32768
78
78
79 ; Limit the allowed size of an HTTP request header field.
79 ; Limit the allowed size of an HTTP request header field.
80 ; Value is a positive number or 0.
80 ; Value is a positive number or 0.
81 ; Setting it to 0 will allow unlimited header field sizes.
81 ; Setting it to 0 will allow unlimited header field sizes.
82 #limit_request_field_size = 0
82 #limit_request_field_size = 0
83
83
84 ; Timeout for graceful workers restart.
84 ; Timeout for graceful workers restart.
85 ; After receiving a restart signal, workers have this much time to finish
85 ; After receiving a restart signal, workers have this much time to finish
86 ; serving requests. Workers still alive after the timeout (starting from the
86 ; serving requests. Workers still alive after the timeout (starting from the
87 ; receipt of the restart signal) are force killed.
87 ; receipt of the restart signal) are force killed.
88 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
88 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
89 #graceful_timeout = 21600
89 #graceful_timeout = 21600
90
90
91 # The number of seconds to wait for requests on a Keep-Alive connection.
91 # The number of seconds to wait for requests on a Keep-Alive connection.
92 # Generally set in the 1-5 seconds range.
92 # Generally set in the 1-5 seconds range.
93 #keepalive = 2
93 #keepalive = 2
94
94
95 ; Maximum memory usage that each worker can use before it will receive a
95 ; Maximum memory usage that each worker can use before it will receive a
96 ; graceful restart signal 0 = memory monitoring is disabled
96 ; graceful restart signal 0 = memory monitoring is disabled
97 ; Examples: 268435456 (256MB), 536870912 (512MB)
97 ; Examples: 268435456 (256MB), 536870912 (512MB)
98 ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
98 ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
99 #memory_max_usage = 0
99 #memory_max_usage = 0
100
100
101 ; How often in seconds to check for memory usage for each gunicorn worker
101 ; How often in seconds to check for memory usage for each gunicorn worker
102 #memory_usage_check_interval = 60
102 #memory_usage_check_interval = 60
103
103
104 ; Threshold value for which we don't recycle worker if GarbageCollection
104 ; Threshold value for which we don't recycle worker if GarbageCollection
105 ; frees up enough resources. Before each restart we try to run GC on worker
105 ; frees up enough resources. Before each restart we try to run GC on worker
106 ; in case we get enough free memory after that, restart will not happen.
106 ; in case we get enough free memory after that, restart will not happen.
107 #memory_usage_recovery_threshold = 0.8
107 #memory_usage_recovery_threshold = 0.8
108
108
109
109
110 [app:main]
110 [app:main]
111 ; The %(here)s variable will be replaced with the absolute path of parent directory
111 ; The %(here)s variable will be replaced with the absolute path of parent directory
112 ; of this file
112 ; of this file
113 ; Each option in the app:main can be override by an environmental variable
113 ; Each option in the app:main can be override by an environmental variable
114 ;
114 ;
115 ;To override an option:
115 ;To override an option:
116 ;
116 ;
117 ;RC_<KeyName>
117 ;RC_<KeyName>
118 ;Everything should be uppercase, . and - should be replaced by _.
118 ;Everything should be uppercase, . and - should be replaced by _.
119 ;For example, if you have these configuration settings:
119 ;For example, if you have these configuration settings:
120 ;rc_cache.repo_object.backend = foo
120 ;rc_cache.repo_object.backend = foo
121 ;can be overridden by
121 ;can be overridden by
122 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
122 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
123
123
124 use = egg:rhodecode-vcsserver
124 use = egg:rhodecode-vcsserver
125
125
126
126
127 ; #############
127 ; #############
128 ; DEBUG OPTIONS
128 ; DEBUG OPTIONS
129 ; #############
129 ; #############
130
130
131 # During development the we want to have the debug toolbar enabled
131 # During development the we want to have the debug toolbar enabled
132 pyramid.includes =
132 pyramid.includes =
133 pyramid_debugtoolbar
133 pyramid_debugtoolbar
134
134
135 debugtoolbar.hosts = 0.0.0.0/0
135 debugtoolbar.hosts = 0.0.0.0/0
136 debugtoolbar.exclude_prefixes =
136 debugtoolbar.exclude_prefixes =
137 /css
137 /css
138 /fonts
138 /fonts
139 /images
139 /images
140 /js
140 /js
141
141
142 ; #################
142 ; #################
143 ; END DEBUG OPTIONS
143 ; END DEBUG OPTIONS
144 ; #################
144 ; #################
145
145
146 ; Pyramid default locales, we need this to be set
146 ; Pyramid default locales, we need this to be set
147 #pyramid.default_locale_name = en
147 #pyramid.default_locale_name = en
148
148
149 ; default locale used by VCS systems
149 ; default locale used by VCS systems
150 #locale = en_US.UTF-8
150 #locale = en_US.UTF-8
151
151
152 ; path to binaries for vcsserver, it should be set by the installer
152 ; path to binaries for vcsserver, it should be set by the installer
153 ; at installation time, e.g /home/user/.rccontrol/vcsserver-1/profile/bin
153 ; at installation time, e.g /home/user/.rccontrol/vcsserver-1/profile/bin
154 ; it can also be a path to nix-build output in case of development
154 ; it can also be a path to nix-build output in case of development
155 core.binary_dir = ""
155 core.binary_dir = ""
156
156
157 ; Custom exception store path, defaults to TMPDIR
157 ; Custom exception store path, defaults to TMPDIR
158 ; This is used to store exception from RhodeCode in shared directory
158 ; This is used to store exception from RhodeCode in shared directory
159 #exception_tracker.store_path =
159 #exception_tracker.store_path =
160
160
161 ; #############
161 ; #############
162 ; DOGPILE CACHE
162 ; DOGPILE CACHE
163 ; #############
163 ; #############
164
164
165 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
165 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
166 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
166 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
167 #cache_dir = %(here)s/data
167 #cache_dir = %(here)s/data
168
168
169 ; ***************************************
169 ; ***************************************
170 ; `repo_object` cache, default file based
170 ; `repo_object` cache, default file based
171 ; ***************************************
171 ; ***************************************
172
172
173 ; `repo_object` cache settings for vcs methods for repositories
173 ; `repo_object` cache settings for vcs methods for repositories
174 #rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
174 #rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
175
175
176 ; cache auto-expires after N seconds
176 ; cache auto-expires after N seconds
177 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
177 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
178 #rc_cache.repo_object.expiration_time = 2592000
178 #rc_cache.repo_object.expiration_time = 2592000
179
179
180 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
180 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
181 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache_repo_object.db
181 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache_repo_object.db
182
182
183 ; ***********************************************************
183 ; ***********************************************************
184 ; `repo_object` cache with redis backend
184 ; `repo_object` cache with redis backend
185 ; recommended for larger instance, and for better performance
185 ; recommended for larger instance, and for better performance
186 ; ***********************************************************
186 ; ***********************************************************
187
187
188 ; `repo_object` cache settings for vcs methods for repositories
188 ; `repo_object` cache settings for vcs methods for repositories
189 #rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack
189 #rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack
190
190
191 ; cache auto-expires after N seconds
191 ; cache auto-expires after N seconds
192 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
192 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
193 #rc_cache.repo_object.expiration_time = 2592000
193 #rc_cache.repo_object.expiration_time = 2592000
194
194
195 ; redis_expiration_time needs to be greater then expiration_time
195 ; redis_expiration_time needs to be greater then expiration_time
196 #rc_cache.repo_object.arguments.redis_expiration_time = 3592000
196 #rc_cache.repo_object.arguments.redis_expiration_time = 3592000
197
197
198 #rc_cache.repo_object.arguments.host = localhost
198 #rc_cache.repo_object.arguments.host = localhost
199 #rc_cache.repo_object.arguments.port = 6379
199 #rc_cache.repo_object.arguments.port = 6379
200 #rc_cache.repo_object.arguments.db = 5
200 #rc_cache.repo_object.arguments.db = 5
201 #rc_cache.repo_object.arguments.socket_timeout = 30
201 #rc_cache.repo_object.arguments.socket_timeout = 30
202 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
202 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
203 #rc_cache.repo_object.arguments.distributed_lock = true
203 #rc_cache.repo_object.arguments.distributed_lock = true
204
204
205 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
205 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
206 #rc_cache.repo_object.arguments.lock_auto_renewal = true
206 #rc_cache.repo_object.arguments.lock_auto_renewal = true
207
207
208 ; Statsd client config, this is used to send metrics to statsd
208 ; Statsd client config, this is used to send metrics to statsd
209 ; We recommend setting statsd_exported and scrape them using Promethues
209 ; We recommend setting statsd_exported and scrape them using Promethues
210 #statsd.enabled = false
210 #statsd.enabled = false
211 #statsd.statsd_host = 0.0.0.0
211 #statsd.statsd_host = 0.0.0.0
212 #statsd.statsd_port = 8125
212 #statsd.statsd_port = 8125
213 #statsd.statsd_prefix =
213 #statsd.statsd_prefix =
214 #statsd.statsd_ipv6 = false
214 #statsd.statsd_ipv6 = false
215
215
216 ; configure logging automatically at server startup set to false
216 ; configure logging automatically at server startup set to false
217 ; to use the below custom logging config.
217 ; to use the below custom logging config.
218 ; RC_LOGGING_FORMATTER
218 ; RC_LOGGING_FORMATTER
219 ; RC_LOGGING_LEVEL
219 ; RC_LOGGING_LEVEL
220 ; env variables can control the settings for logging in case of autoconfigure
220 ; env variables can control the settings for logging in case of autoconfigure
221
221
222 #logging.autoconfigure = true
222 #logging.autoconfigure = true
223
223
224 ; specify your own custom logging config file to configure logging
224 ; specify your own custom logging config file to configure logging
225 #logging.logging_conf_file = /path/to/custom_logging.ini
225 #logging.logging_conf_file = /path/to/custom_logging.ini
226
226
227 ; #####################
227 ; #####################
228 ; LOGGING CONFIGURATION
228 ; LOGGING CONFIGURATION
229 ; #####################
229 ; #####################
230
230
231 #[loggers]
231 [loggers]
232 #keys = root, vcsserver
232 keys = root, vcsserver
233
233
234 #[handlers]
234 [handlers]
235 #keys = console
235 keys = console
236
236
237 #[formatters]
237 [formatters]
238 #keys = generic
238 keys = generic, json
239
239
240 ; #######
240 ; #######
241 ; LOGGERS
241 ; LOGGERS
242 ; #######
242 ; #######
243 #[logger_root]
243 [logger_root]
244 #level = NOTSET
244 level = NOTSET
245 #handlers = console
245 handlers = console
246
246
247 #[logger_vcsserver]
247 [logger_vcsserver]
248 #level = INFO
248 level = DEBUG
249 #handlers =
249 handlers =
250 #qualname = vcsserver
250 qualname = vcsserver
251 #propagate = 1
251 propagate = 1
252
252
253 ; ########
253 ; ########
254 ; HANDLERS
254 ; HANDLERS
255 ; ########
255 ; ########
256
256
257 #[handler_console]
257 [handler_console]
258 #class = StreamHandler
258 class = StreamHandler
259 #args = (sys.stderr, )
259 args = (sys.stderr, )
260 #level = INFO
260 level = DEBUG
261 formatter = generic
261 ; To enable JSON formatted logs replace generic with json
262 ; To enable JSON formatted logs replace generic with json
262 ; This allows sending properly formatted logs to grafana loki or elasticsearch
263 ; This allows sending properly formatted logs to grafana loki or elasticsearch
263 #formatter = json
264 #formatter = json
264 #formatter = generic
265
265
266 ; ##########
266 ; ##########
267 ; FORMATTERS
267 ; FORMATTERS
268 ; ##########
268 ; ##########
269
269
270 #[formatter_generic]
270 [formatter_generic]
271 #format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
271 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
272 #datefmt = %Y-%m-%d %H:%M:%S
272 datefmt = %Y-%m-%d %H:%M:%S
273
273
274 #[formatter_json]
274 [formatter_json]
275 #format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
275 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
276 #class = vcsserver.lib._vendor.jsonlogger.JsonFormatter
276 class = vcsserver.lib._vendor.jsonlogger.JsonFormatter
@@ -1,272 +1,274 b''
1 """
1 """
2 Gunicorn config extension and hooks. This config file adds some extra settings and memory management.
2 Gunicorn config extension and hooks. This config file adds some extra settings and memory management.
3 Gunicorn configuration should be managed by .ini files entries of RhodeCode or VCSServer
3 Gunicorn configuration should be managed by .ini files entries of RhodeCode or VCSServer
4 """
4 """
5
5
6 import gc
6 import gc
7 import os
7 import os
8 import sys
8 import sys
9 import math
9 import math
10 import time
10 import time
11 import threading
11 import threading
12 import traceback
12 import traceback
13 import random
13 import random
14 from gunicorn.glogging import Logger
14 from gunicorn.glogging import Logger
15
15
16
16
17 def get_workers():
17 def get_workers():
18 import multiprocessing
18 import multiprocessing
19 return multiprocessing.cpu_count() * 2 + 1
19 return multiprocessing.cpu_count() * 2 + 1
20
20
21 # GLOBAL
21 # GLOBAL
22 errorlog = '-'
22 errorlog = '-'
23 accesslog = '-'
23 accesslog = '-'
24
24
25
25
26 # SERVER MECHANICS
26 # SERVER MECHANICS
27 # None == system temp dir
27 # None == system temp dir
28 # worker_tmp_dir is recommended to be set to some tmpfs
28 # worker_tmp_dir is recommended to be set to some tmpfs
29 worker_tmp_dir = None
29 worker_tmp_dir = None
30 tmp_upload_dir = None
30 tmp_upload_dir = None
31
31
32 #reuse_port = True
33
32 # Custom log format
34 # Custom log format
33 #access_log_format = (
35 #access_log_format = (
34 # '%(t)s %(p)s INFO [GNCRN] %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"')
36 # '%(t)s %(p)s INFO [GNCRN] %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"')
35
37
36 # loki format for easier parsing in grafana
38 # loki format for easier parsing in grafana
37 access_log_format = (
39 access_log_format = (
38 'time="%(t)s" pid=%(p)s level="INFO" type="[GNCRN]" ip="%(h)-15s" rqt="%(L)s" response_code="%(s)s" response_bytes="%(b)-6s" uri="%(m)s:%(U)s %(q)s" user=":%(u)s" user_agent="%(a)s"')
40 'time="%(t)s" pid=%(p)s level="INFO" type="[GNCRN]" ip="%(h)-15s" rqt="%(L)s" response_code="%(s)s" response_bytes="%(b)-6s" uri="%(m)s:%(U)s %(q)s" user=":%(u)s" user_agent="%(a)s"')
39
41
40 # self adjust workers based on CPU count
42 # self adjust workers based on CPU count
41 # workers = get_workers()
43 # workers = get_workers()
42
44
43
45
44 def _get_process_rss(pid=None):
46 def _get_process_rss(pid=None):
45 try:
47 try:
46 import psutil
48 import psutil
47 if pid:
49 if pid:
48 proc = psutil.Process(pid)
50 proc = psutil.Process(pid)
49 else:
51 else:
50 proc = psutil.Process()
52 proc = psutil.Process()
51 return proc.memory_info().rss
53 return proc.memory_info().rss
52 except Exception:
54 except Exception:
53 return None
55 return None
54
56
55
57
56 def _get_config(ini_path):
58 def _get_config(ini_path):
57
59
58 try:
60 try:
59 import configparser
61 import configparser
60 except ImportError:
62 except ImportError:
61 import ConfigParser as configparser
63 import ConfigParser as configparser
62 try:
64 try:
63 config = configparser.RawConfigParser()
65 config = configparser.RawConfigParser()
64 config.read(ini_path)
66 config.read(ini_path)
65 return config
67 return config
66 except Exception:
68 except Exception:
67 return None
69 return None
68
70
69
71
70 def _time_with_offset(memory_usage_check_interval):
72 def _time_with_offset(memory_usage_check_interval):
71 return time.time() - random.randint(0, memory_usage_check_interval/2.0)
73 return time.time() - random.randint(0, memory_usage_check_interval/2.0)
72
74
73
75
74 def pre_fork(server, worker):
76 def pre_fork(server, worker):
75 pass
77 pass
76
78
77
79
78 def post_fork(server, worker):
80 def post_fork(server, worker):
79
81
80 # memory spec defaults
82 # memory spec defaults
81 _memory_max_usage = 0
83 _memory_max_usage = 0
82 _memory_usage_check_interval = 60
84 _memory_usage_check_interval = 60
83 _memory_usage_recovery_threshold = 0.8
85 _memory_usage_recovery_threshold = 0.8
84
86
85 ini_path = os.path.abspath(server.cfg.paste)
87 ini_path = os.path.abspath(server.cfg.paste)
86 conf = _get_config(ini_path)
88 conf = _get_config(ini_path)
87
89
88 section = 'server:main'
90 section = 'server:main'
89 if conf and conf.has_section(section):
91 if conf and conf.has_section(section):
90
92
91 if conf.has_option(section, 'memory_max_usage'):
93 if conf.has_option(section, 'memory_max_usage'):
92 _memory_max_usage = conf.getint(section, 'memory_max_usage')
94 _memory_max_usage = conf.getint(section, 'memory_max_usage')
93
95
94 if conf.has_option(section, 'memory_usage_check_interval'):
96 if conf.has_option(section, 'memory_usage_check_interval'):
95 _memory_usage_check_interval = conf.getint(section, 'memory_usage_check_interval')
97 _memory_usage_check_interval = conf.getint(section, 'memory_usage_check_interval')
96
98
97 if conf.has_option(section, 'memory_usage_recovery_threshold'):
99 if conf.has_option(section, 'memory_usage_recovery_threshold'):
98 _memory_usage_recovery_threshold = conf.getfloat(section, 'memory_usage_recovery_threshold')
100 _memory_usage_recovery_threshold = conf.getfloat(section, 'memory_usage_recovery_threshold')
99
101
100 worker._memory_max_usage = int(os.environ.get('RC_GUNICORN_MEMORY_MAX_USAGE', '')
102 worker._memory_max_usage = int(os.environ.get('RC_GUNICORN_MEMORY_MAX_USAGE', '')
101 or _memory_max_usage)
103 or _memory_max_usage)
102 worker._memory_usage_check_interval = int(os.environ.get('RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL', '')
104 worker._memory_usage_check_interval = int(os.environ.get('RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL', '')
103 or _memory_usage_check_interval)
105 or _memory_usage_check_interval)
104 worker._memory_usage_recovery_threshold = float(os.environ.get('RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD', '')
106 worker._memory_usage_recovery_threshold = float(os.environ.get('RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD', '')
105 or _memory_usage_recovery_threshold)
107 or _memory_usage_recovery_threshold)
106
108
107 # register memory last check time, with some random offset so we don't recycle all
109 # register memory last check time, with some random offset so we don't recycle all
108 # at once
110 # at once
109 worker._last_memory_check_time = _time_with_offset(_memory_usage_check_interval)
111 worker._last_memory_check_time = _time_with_offset(_memory_usage_check_interval)
110
112
111 if _memory_max_usage:
113 if _memory_max_usage:
112 server.log.info("[%-10s] WORKER spawned with max memory set at %s", worker.pid,
114 server.log.info("[%-10s] WORKER spawned with max memory set at %s", worker.pid,
113 _format_data_size(_memory_max_usage))
115 _format_data_size(_memory_max_usage))
114 else:
116 else:
115 server.log.info("[%-10s] WORKER spawned", worker.pid)
117 server.log.info("[%-10s] WORKER spawned", worker.pid)
116
118
117
119
118 def pre_exec(server):
120 def pre_exec(server):
119 server.log.info("Forked child, re-executing.")
121 server.log.info("Forked child, re-executing.")
120
122
121
123
122 def on_starting(server):
124 def on_starting(server):
123 server_lbl = '{} {}'.format(server.proc_name, server.address)
125 server_lbl = '{} {}'.format(server.proc_name, server.address)
124 server.log.info("Server %s is starting.", server_lbl)
126 server.log.info("Server %s is starting.", server_lbl)
125
127
126
128
127 def when_ready(server):
129 def when_ready(server):
128 server.log.info("Server %s is ready. Spawning workers", server)
130 server.log.info("Server %s is ready. Spawning workers", server)
129
131
130
132
131 def on_reload(server):
133 def on_reload(server):
132 pass
134 pass
133
135
134
136
135 def _format_data_size(size, unit="B", precision=1, binary=True):
137 def _format_data_size(size, unit="B", precision=1, binary=True):
136 """Format a number using SI units (kilo, mega, etc.).
138 """Format a number using SI units (kilo, mega, etc.).
137
139
138 ``size``: The number as a float or int.
140 ``size``: The number as a float or int.
139
141
140 ``unit``: The unit name in plural form. Examples: "bytes", "B".
142 ``unit``: The unit name in plural form. Examples: "bytes", "B".
141
143
142 ``precision``: How many digits to the right of the decimal point. Default
144 ``precision``: How many digits to the right of the decimal point. Default
143 is 1. 0 suppresses the decimal point.
145 is 1. 0 suppresses the decimal point.
144
146
145 ``binary``: If false, use base-10 decimal prefixes (kilo = K = 1000).
147 ``binary``: If false, use base-10 decimal prefixes (kilo = K = 1000).
146 If true, use base-2 binary prefixes (kibi = Ki = 1024).
148 If true, use base-2 binary prefixes (kibi = Ki = 1024).
147
149
148 ``full_name``: If false (default), use the prefix abbreviation ("k" or
150 ``full_name``: If false (default), use the prefix abbreviation ("k" or
149 "Ki"). If true, use the full prefix ("kilo" or "kibi"). If false,
151 "Ki"). If true, use the full prefix ("kilo" or "kibi"). If false,
150 use abbreviation ("k" or "Ki").
152 use abbreviation ("k" or "Ki").
151
153
152 """
154 """
153
155
154 if not binary:
156 if not binary:
155 base = 1000
157 base = 1000
156 multiples = ('', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
158 multiples = ('', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
157 else:
159 else:
158 base = 1024
160 base = 1024
159 multiples = ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi')
161 multiples = ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi')
160
162
161 sign = ""
163 sign = ""
162 if size > 0:
164 if size > 0:
163 m = int(math.log(size, base))
165 m = int(math.log(size, base))
164 elif size < 0:
166 elif size < 0:
165 sign = "-"
167 sign = "-"
166 size = -size
168 size = -size
167 m = int(math.log(size, base))
169 m = int(math.log(size, base))
168 else:
170 else:
169 m = 0
171 m = 0
170 if m > 8:
172 if m > 8:
171 m = 8
173 m = 8
172
174
173 if m == 0:
175 if m == 0:
174 precision = '%.0f'
176 precision = '%.0f'
175 else:
177 else:
176 precision = '%%.%df' % precision
178 precision = '%%.%df' % precision
177
179
178 size = precision % (size / math.pow(base, m))
180 size = precision % (size / math.pow(base, m))
179
181
180 return '%s%s %s%s' % (sign, size.strip(), multiples[m], unit)
182 return '%s%s %s%s' % (sign, size.strip(), multiples[m], unit)
181
183
182
184
183 def _check_memory_usage(worker):
185 def _check_memory_usage(worker):
184 memory_max_usage = worker._memory_max_usage
186 memory_max_usage = worker._memory_max_usage
185 if not memory_max_usage:
187 if not memory_max_usage:
186 return
188 return
187
189
188 memory_usage_check_interval = worker._memory_usage_check_interval
190 memory_usage_check_interval = worker._memory_usage_check_interval
189 memory_usage_recovery_threshold = memory_max_usage * worker._memory_usage_recovery_threshold
191 memory_usage_recovery_threshold = memory_max_usage * worker._memory_usage_recovery_threshold
190
192
191 elapsed = time.time() - worker._last_memory_check_time
193 elapsed = time.time() - worker._last_memory_check_time
192 if elapsed > memory_usage_check_interval:
194 if elapsed > memory_usage_check_interval:
193 mem_usage = _get_process_rss()
195 mem_usage = _get_process_rss()
194 if mem_usage and mem_usage > memory_max_usage:
196 if mem_usage and mem_usage > memory_max_usage:
195 worker.log.info(
197 worker.log.info(
196 "memory usage %s > %s, forcing gc",
198 "memory usage %s > %s, forcing gc",
197 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
199 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
198 # Try to clean it up by forcing a full collection.
200 # Try to clean it up by forcing a full collection.
199 gc.collect()
201 gc.collect()
200 mem_usage = _get_process_rss()
202 mem_usage = _get_process_rss()
201 if mem_usage > memory_usage_recovery_threshold:
203 if mem_usage > memory_usage_recovery_threshold:
202 # Didn't clean up enough, we'll have to terminate.
204 # Didn't clean up enough, we'll have to terminate.
203 worker.log.warning(
205 worker.log.warning(
204 "memory usage %s > %s after gc, quitting",
206 "memory usage %s > %s after gc, quitting",
205 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
207 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
206 # This will cause worker to auto-restart itself
208 # This will cause worker to auto-restart itself
207 worker.alive = False
209 worker.alive = False
208 worker._last_memory_check_time = time.time()
210 worker._last_memory_check_time = time.time()
209
211
210
212
211 def worker_int(worker):
213 def worker_int(worker):
212 worker.log.info("[%-10s] worker received INT or QUIT signal", worker.pid)
214 worker.log.info("[%-10s] worker received INT or QUIT signal", worker.pid)
213
215
214 # get traceback info, on worker crash
216 # get traceback info, on worker crash
215 id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
217 id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
216 code = []
218 code = []
217 for thread_id, stack in sys._current_frames().items():
219 for thread_id, stack in sys._current_frames().items():
218 code.append(
220 code.append(
219 "\n# Thread: %s(%d)" % (id2name.get(thread_id, ""), thread_id))
221 "\n# Thread: %s(%d)" % (id2name.get(thread_id, ""), thread_id))
220 for fname, lineno, name, line in traceback.extract_stack(stack):
222 for fname, lineno, name, line in traceback.extract_stack(stack):
221 code.append('File: "%s", line %d, in %s' % (fname, lineno, name))
223 code.append('File: "%s", line %d, in %s' % (fname, lineno, name))
222 if line:
224 if line:
223 code.append(" %s" % (line.strip()))
225 code.append(" %s" % (line.strip()))
224 worker.log.debug("\n".join(code))
226 worker.log.debug("\n".join(code))
225
227
226
228
227 def worker_abort(worker):
229 def worker_abort(worker):
228 worker.log.info("[%-10s] worker received SIGABRT signal", worker.pid)
230 worker.log.info("[%-10s] worker received SIGABRT signal", worker.pid)
229
231
230
232
231 def worker_exit(server, worker):
233 def worker_exit(server, worker):
232 worker.log.info("[%-10s] worker exit", worker.pid)
234 worker.log.info("[%-10s] worker exit", worker.pid)
233
235
234
236
235 def child_exit(server, worker):
237 def child_exit(server, worker):
236 worker.log.info("[%-10s] worker child exit", worker.pid)
238 worker.log.info("[%-10s] worker child exit", worker.pid)
237
239
238
240
239 def pre_request(worker, req):
241 def pre_request(worker, req):
240 worker.start_time = time.time()
242 worker.start_time = time.time()
241 worker.log.debug(
243 worker.log.debug(
242 "GNCRN PRE WORKER [cnt:%s]: %s %s", worker.nr, req.method, req.path)
244 "GNCRN PRE WORKER [cnt:%s]: %s %s", worker.nr, req.method, req.path)
243
245
244
246
245 def post_request(worker, req, environ, resp):
247 def post_request(worker, req, environ, resp):
246 total_time = time.time() - worker.start_time
248 total_time = time.time() - worker.start_time
247 # Gunicorn sometimes has problems with reading the status_code
249 # Gunicorn sometimes has problems with reading the status_code
248 status_code = getattr(resp, 'status_code', '')
250 status_code = getattr(resp, 'status_code', '')
249 worker.log.debug(
251 worker.log.debug(
250 "GNCRN POST WORKER [cnt:%s]: %s %s resp: %s, Load Time: %.4fs",
252 "GNCRN POST WORKER [cnt:%s]: %s %s resp: %s, Load Time: %.4fs",
251 worker.nr, req.method, req.path, status_code, total_time)
253 worker.nr, req.method, req.path, status_code, total_time)
252 _check_memory_usage(worker)
254 _check_memory_usage(worker)
253
255
254
256
255 class RhodeCodeLogger(Logger):
257 class RhodeCodeLogger(Logger):
256 """
258 """
257 Custom Logger that allows some customization that gunicorn doesn't allow
259 Custom Logger that allows some customization that gunicorn doesn't allow
258 """
260 """
259
261
260 datefmt = r"%Y-%m-%d %H:%M:%S"
262 datefmt = r"%Y-%m-%d %H:%M:%S"
261
263
262 def __init__(self, cfg):
264 def __init__(self, cfg):
263 Logger.__init__(self, cfg)
265 Logger.__init__(self, cfg)
264
266
265 def now(self):
267 def now(self):
266 """ return date in RhodeCode Log format """
268 """ return date in RhodeCode Log format """
267 now = time.time()
269 now = time.time()
268 msecs = int((now - long(now)) * 1000)
270 msecs = int((now - long(now)) * 1000)
269 return time.strftime(self.datefmt, time.localtime(now)) + '.{0:03d}'.format(msecs)
271 return time.strftime(self.datefmt, time.localtime(now)) + '.{0:03d}'.format(msecs)
270
272
271
273
272 logger_class = RhodeCodeLogger
274 logger_class = RhodeCodeLogger
@@ -1,239 +1,239 b''
1 ## -*- coding: utf-8 -*-
1 ## -*- coding: utf-8 -*-
2
2
3 ; #################################
3 ; #################################
4 ; RHODECODE VCSSERVER CONFIGURATION
4 ; RHODECODE VCSSERVER CONFIGURATION
5 ; #################################
5 ; #################################
6
6
7 [server:main]
7 [server:main]
8 ; COMMON HOST/IP CONFIG
8 ; COMMON HOST/IP CONFIG
9 host = 127.0.0.1
9 host = 127.0.0.1
10 port = 9900
10 port = 9900
11
11
12
12
13 ; ###########################
13 ; ###########################
14 ; GUNICORN APPLICATION SERVER
14 ; GUNICORN APPLICATION SERVER
15 ; ###########################
15 ; ###########################
16
16
17 ; run with gunicorn --paste rhodecode.ini
17 ; run with gunicorn --paste rhodecode.ini
18
18
19 ; Module to use, this setting shouldn't be changed
19 ; Module to use, this setting shouldn't be changed
20 use = egg:gunicorn#main
20 use = egg:gunicorn#main
21
21
22 ; Sets the number of process workers. More workers means more concurrent connections
22 ; Sets the number of process workers. More workers means more concurrent connections
23 ; RhodeCode can handle at the same time. Each additional worker also it increases
23 ; RhodeCode can handle at the same time. Each additional worker also it increases
24 ; memory usage as each has it's own set of caches.
24 ; memory usage as each has it's own set of caches.
25 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
25 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
26 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
26 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
27 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
27 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
28 ; when using more than 1 worker.
28 ; when using more than 1 worker.
29 workers = 2
29 workers = 2
30
30
31 ; Gunicorn access log level
31 ; Gunicorn access log level
32 loglevel = info
32 loglevel = info
33
33
34 ; Process name visible in process list
34 ; Process name visible in process list
35 proc_name = rhodecode_vcsserver
35 proc_name = rhodecode_vcsserver
36
36
37 ; Type of worker class, one of `sync`, `gevent`
37 ; Type of worker class, one of `sync`, `gevent`
38 ; currently `sync` is the only option allowed.
38 ; currently `sync` is the only option allowed.
39 worker_class = sync
39 worker_class = sync
40
40
41 ; The maximum number of simultaneous clients. Valid only for gevent
41 ; The maximum number of simultaneous clients. Valid only for gevent
42 worker_connections = 10
42 worker_connections = 10
43
43
44 ; Max number of requests that worker will handle before being gracefully restarted.
44 ; Max number of requests that worker will handle before being gracefully restarted.
45 ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
45 ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
46 max_requests = 1000
46 max_requests = 1000
47 max_requests_jitter = 30
47 max_requests_jitter = 30
48
48
49 ; Amount of time a worker can spend with handling a request before it
49 ; Amount of time a worker can spend with handling a request before it
50 ; gets killed and restarted. By default set to 21600 (6hrs)
50 ; gets killed and restarted. By default set to 21600 (6hrs)
51 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
51 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
52 timeout = 21600
52 timeout = 21600
53
53
54 ; The maximum size of HTTP request line in bytes.
54 ; The maximum size of HTTP request line in bytes.
55 ; 0 for unlimited
55 ; 0 for unlimited
56 limit_request_line = 0
56 limit_request_line = 0
57
57
58 ; Limit the number of HTTP headers fields in a request.
58 ; Limit the number of HTTP headers fields in a request.
59 ; By default this value is 100 and can't be larger than 32768.
59 ; By default this value is 100 and can't be larger than 32768.
60 limit_request_fields = 32768
60 limit_request_fields = 32768
61
61
62 ; Limit the allowed size of an HTTP request header field.
62 ; Limit the allowed size of an HTTP request header field.
63 ; Value is a positive number or 0.
63 ; Value is a positive number or 0.
64 ; Setting it to 0 will allow unlimited header field sizes.
64 ; Setting it to 0 will allow unlimited header field sizes.
65 limit_request_field_size = 0
65 limit_request_field_size = 0
66
66
67 ; Timeout for graceful workers restart.
67 ; Timeout for graceful workers restart.
68 ; After receiving a restart signal, workers have this much time to finish
68 ; After receiving a restart signal, workers have this much time to finish
69 ; serving requests. Workers still alive after the timeout (starting from the
69 ; serving requests. Workers still alive after the timeout (starting from the
70 ; receipt of the restart signal) are force killed.
70 ; receipt of the restart signal) are force killed.
71 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
71 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
72 graceful_timeout = 21600
72 graceful_timeout = 21600
73
73
74 # The number of seconds to wait for requests on a Keep-Alive connection.
74 # The number of seconds to wait for requests on a Keep-Alive connection.
75 # Generally set in the 1-5 seconds range.
75 # Generally set in the 1-5 seconds range.
76 keepalive = 2
76 keepalive = 2
77
77
78 ; Maximum memory usage that each worker can use before it will receive a
78 ; Maximum memory usage that each worker can use before it will receive a
79 ; graceful restart signal 0 = memory monitoring is disabled
79 ; graceful restart signal 0 = memory monitoring is disabled
80 ; Examples: 268435456 (256MB), 536870912 (512MB)
80 ; Examples: 268435456 (256MB), 536870912 (512MB)
81 ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
81 ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
82 memory_max_usage = 0
82 memory_max_usage = 0
83
83
84 ; How often in seconds to check for memory usage for each gunicorn worker
84 ; How often in seconds to check for memory usage for each gunicorn worker
85 memory_usage_check_interval = 60
85 memory_usage_check_interval = 60
86
86
87 ; Threshold value for which we don't recycle worker if GarbageCollection
87 ; Threshold value for which we don't recycle worker if GarbageCollection
88 ; frees up enough resources. Before each restart we try to run GC on worker
88 ; frees up enough resources. Before each restart we try to run GC on worker
89 ; in case we get enough free memory after that, restart will not happen.
89 ; in case we get enough free memory after that, restart will not happen.
90 memory_usage_recovery_threshold = 0.8
90 memory_usage_recovery_threshold = 0.8
91
91
92
92
93 [app:main]
93 [app:main]
94 ; The %(here)s variable will be replaced with the absolute path of parent directory
94 ; The %(here)s variable will be replaced with the absolute path of parent directory
95 ; of this file
95 ; of this file
96 ; Each option in the app:main can be override by an environmental variable
96 ; Each option in the app:main can be override by an environmental variable
97 ;
97 ;
98 ;To override an option:
98 ;To override an option:
99 ;
99 ;
100 ;RC_<KeyName>
100 ;RC_<KeyName>
101 ;Everything should be uppercase, . and - should be replaced by _.
101 ;Everything should be uppercase, . and - should be replaced by _.
102 ;For example, if you have these configuration settings:
102 ;For example, if you have these configuration settings:
103 ;rc_cache.repo_object.backend = foo
103 ;rc_cache.repo_object.backend = foo
104 ;can be overridden by
104 ;can be overridden by
105 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
105 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
106
106
107 use = egg:rhodecode-vcsserver
107 use = egg:rhodecode-vcsserver
108
108
109 ; Pyramid default locales, we need this to be set
109 ; Pyramid default locales, we need this to be set
110 #pyramid.default_locale_name = en
110 #pyramid.default_locale_name = en
111
111
112 ; default locale used by VCS systems
112 ; default locale used by VCS systems
113 #locale = en_US.UTF-8
113 #locale = en_US.UTF-8
114
114
115 ; path to binaries for vcsserver, it should be set by the installer
115 ; path to binaries for vcsserver, it should be set by the installer
116 ; at installation time, e.g /home/user/.rccontrol/vcsserver-1/profile/bin
116 ; at installation time, e.g /home/user/.rccontrol/vcsserver-1/profile/bin
117 ; it can also be a path to nix-build output in case of development
117 ; it can also be a path to nix-build output in case of development
118 core.binary_dir = ""
118 core.binary_dir = ""
119
119
120 ; Custom exception store path, defaults to TMPDIR
120 ; Custom exception store path, defaults to TMPDIR
121 ; This is used to store exception from RhodeCode in shared directory
121 ; This is used to store exception from RhodeCode in shared directory
122 #exception_tracker.store_path =
122 #exception_tracker.store_path =
123
123
124 ; #############
124 ; #############
125 ; DOGPILE CACHE
125 ; DOGPILE CACHE
126 ; #############
126 ; #############
127
127
128 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
128 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
129 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
129 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
130 #cache_dir = %(here)s/data
130 #cache_dir = %(here)s/data
131
131
132 ; ***************************************
132 ; ***************************************
133 ; `repo_object` cache, default file based
133 ; `repo_object` cache, default file based
134 ; ***************************************
134 ; ***************************************
135
135
136 ; `repo_object` cache settings for vcs methods for repositories
136 ; `repo_object` cache settings for vcs methods for repositories
137 #rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
137 #rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
138
138
139 ; cache auto-expires after N seconds
139 ; cache auto-expires after N seconds
140 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
140 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
141 #rc_cache.repo_object.expiration_time = 2592000
141 #rc_cache.repo_object.expiration_time = 2592000
142
142
143 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
143 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
144 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache_repo_object.db
144 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache_repo_object.db
145
145
146 ; ***********************************************************
146 ; ***********************************************************
147 ; `repo_object` cache with redis backend
147 ; `repo_object` cache with redis backend
148 ; recommended for larger instance, and for better performance
148 ; recommended for larger instance, and for better performance
149 ; ***********************************************************
149 ; ***********************************************************
150
150
151 ; `repo_object` cache settings for vcs methods for repositories
151 ; `repo_object` cache settings for vcs methods for repositories
152 #rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack
152 #rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack
153
153
154 ; cache auto-expires after N seconds
154 ; cache auto-expires after N seconds
155 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
155 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
156 #rc_cache.repo_object.expiration_time = 2592000
156 #rc_cache.repo_object.expiration_time = 2592000
157
157
158 ; redis_expiration_time needs to be greater then expiration_time
158 ; redis_expiration_time needs to be greater then expiration_time
159 #rc_cache.repo_object.arguments.redis_expiration_time = 3592000
159 #rc_cache.repo_object.arguments.redis_expiration_time = 3592000
160
160
161 #rc_cache.repo_object.arguments.host = localhost
161 #rc_cache.repo_object.arguments.host = localhost
162 #rc_cache.repo_object.arguments.port = 6379
162 #rc_cache.repo_object.arguments.port = 6379
163 #rc_cache.repo_object.arguments.db = 5
163 #rc_cache.repo_object.arguments.db = 5
164 #rc_cache.repo_object.arguments.socket_timeout = 30
164 #rc_cache.repo_object.arguments.socket_timeout = 30
165 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
165 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
166 #rc_cache.repo_object.arguments.distributed_lock = true
166 #rc_cache.repo_object.arguments.distributed_lock = true
167
167
168 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
168 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
169 #rc_cache.repo_object.arguments.lock_auto_renewal = true
169 #rc_cache.repo_object.arguments.lock_auto_renewal = true
170
170
171 ; Statsd client config, this is used to send metrics to statsd
171 ; Statsd client config, this is used to send metrics to statsd
172 ; We recommend setting statsd_exported and scrape them using Promethues
172 ; We recommend setting statsd_exported and scrape them using Promethues
173 #statsd.enabled = false
173 #statsd.enabled = false
174 #statsd.statsd_host = 0.0.0.0
174 #statsd.statsd_host = 0.0.0.0
175 #statsd.statsd_port = 8125
175 #statsd.statsd_port = 8125
176 #statsd.statsd_prefix =
176 #statsd.statsd_prefix =
177 #statsd.statsd_ipv6 = false
177 #statsd.statsd_ipv6 = false
178
178
179 ; configure logging automatically at server startup set to false
179 ; configure logging automatically at server startup set to false
180 ; to use the below custom logging config.
180 ; to use the below custom logging config.
181 ; RC_LOGGING_FORMATTER
181 ; RC_LOGGING_FORMATTER
182 ; RC_LOGGING_LEVEL
182 ; RC_LOGGING_LEVEL
183 ; env variables can control the settings for logging in case of autoconfigure
183 ; env variables can control the settings for logging in case of autoconfigure
184
184
185 #logging.autoconfigure = true
185 #logging.autoconfigure = true
186
186
187 ; specify your own custom logging config file to configure logging
187 ; specify your own custom logging config file to configure logging
188 #logging.logging_conf_file = /path/to/custom_logging.ini
188 #logging.logging_conf_file = /path/to/custom_logging.ini
189
189
190 ; #####################
190 ; #####################
191 ; LOGGING CONFIGURATION
191 ; LOGGING CONFIGURATION
192 ; #####################
192 ; #####################
193
193
194 #[loggers]
194 [loggers]
195 #keys = root, vcsserver
195 keys = root, vcsserver
196
196
197 #[handlers]
197 [handlers]
198 #keys = console
198 keys = console
199
199
200 #[formatters]
200 [formatters]
201 #keys = generic
201 keys = generic, json
202
202
203 ; #######
203 ; #######
204 ; LOGGERS
204 ; LOGGERS
205 ; #######
205 ; #######
206 #[logger_root]
206 [logger_root]
207 #level = NOTSET
207 level = NOTSET
208 #handlers = console
208 handlers = console
209
209
210 #[logger_vcsserver]
210 [logger_vcsserver]
211 #level = INFO
211 level = INFO
212 #handlers =
212 handlers =
213 #qualname = vcsserver
213 qualname = vcsserver
214 #propagate = 1
214 propagate = 1
215
215
216 ; ########
216 ; ########
217 ; HANDLERS
217 ; HANDLERS
218 ; ########
218 ; ########
219
219
220 #[handler_console]
220 [handler_console]
221 #class = StreamHandler
221 class = StreamHandler
222 #args = (sys.stderr, )
222 args = (sys.stderr, )
223 #level = INFO
223 level = INFO
224 formatter = generic
224 ; To enable JSON formatted logs replace generic with json
225 ; To enable JSON formatted logs replace generic with json
225 ; This allows sending properly formatted logs to grafana loki or elasticsearch
226 ; This allows sending properly formatted logs to grafana loki or elasticsearch
226 #formatter = json
227 #formatter = json
227 #formatter = generic
228
228
229 ; ##########
229 ; ##########
230 ; FORMATTERS
230 ; FORMATTERS
231 ; ##########
231 ; ##########
232
232
233 #[formatter_generic]
233 [formatter_generic]
234 #format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
234 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
235 #datefmt = %Y-%m-%d %H:%M:%S
235 datefmt = %Y-%m-%d %H:%M:%S
236
236
237 #[formatter_json]
237 [formatter_json]
238 #format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
238 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
239 #class = vcsserver.lib._vendor.jsonlogger.JsonFormatter
239 class = vcsserver.lib._vendor.jsonlogger.JsonFormatter
General Comments 0
You need to be logged in to leave comments. Login now