##// END OF EJS Templates
configs: removed utf8 not needed on python3
super-admin -
r1119:dcf51ded python3
parent child Browse files
Show More
@@ -1,275 +1,275 b''
1 ## -*- coding: utf-8 -*-
1 #
2 2
3 3 ; #################################
4 4 ; RHODECODE VCSSERVER CONFIGURATION
5 5 ; #################################
6 6
7 7 [server:main]
8 8 ; COMMON HOST/IP CONFIG
9 9 host = 0.0.0.0
10 10 port = 9900
11 11
12 12 ; ##################################################
13 13 ; WAITRESS WSGI SERVER - Recommended for Development
14 14 ; ##################################################
15 15
16 16 ; use server type
17 17 use = egg:waitress#main
18 18
19 19 ; number of worker threads
20 20 threads = 5
21 21
22 22 ; MAX BODY SIZE 100GB
23 23 max_request_body_size = 107374182400
24 24
25 25 ; Use poll instead of select, fixes file descriptors limits problems.
26 26 ; May not work on old windows systems.
27 27 asyncore_use_poll = true
28 28
29 29
30 30 ; ###########################
31 31 ; GUNICORN APPLICATION SERVER
32 32 ; ###########################
33 33
34 34 ; run with gunicorn --paste rhodecode.ini
35 35
36 36 ; Module to use, this setting shouldn't be changed
37 37 #use = egg:gunicorn#main
38 38
39 39 ; Sets the number of process workers. More workers means more concurrent connections
40 40 ; RhodeCode can handle at the same time. Each additional worker also it increases
41 41 ; memory usage as each has it's own set of caches.
42 42 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
43 43 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
44 44 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
45 45 ; when using more than 1 worker.
46 46 #workers = 2
47 47
48 48 ; Gunicorn access log level
49 49 #loglevel = info
50 50
51 51 ; Process name visible in process list
52 52 #proc_name = rhodecode_vcsserver
53 53
54 54 ; Type of worker class, one of `sync`, `gevent`
55 55 ; currently `sync` is the only option allowed.
56 56 #worker_class = sync
57 57
58 58 ; The maximum number of simultaneous clients. Valid only for gevent
59 59 #worker_connections = 10
60 60
61 61 ; Max number of requests that worker will handle before being gracefully restarted.
62 62 ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
63 63 #max_requests = 1000
64 64 #max_requests_jitter = 30
65 65
66 66 ; Amount of time a worker can spend with handling a request before it
67 67 ; gets killed and restarted. By default set to 21600 (6hrs)
68 68 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
69 69 #timeout = 21600
70 70
71 71 ; The maximum size of HTTP request line in bytes.
72 72 ; 0 for unlimited
73 73 #limit_request_line = 0
74 74
75 75 ; Limit the number of HTTP headers fields in a request.
76 76 ; By default this value is 100 and can't be larger than 32768.
77 77 #limit_request_fields = 32768
78 78
79 79 ; Limit the allowed size of an HTTP request header field.
80 80 ; Value is a positive number or 0.
81 81 ; Setting it to 0 will allow unlimited header field sizes.
82 82 #limit_request_field_size = 0
83 83
84 84 ; Timeout for graceful workers restart.
85 85 ; After receiving a restart signal, workers have this much time to finish
86 86 ; serving requests. Workers still alive after the timeout (starting from the
87 87 ; receipt of the restart signal) are force killed.
88 88 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
89 89 #graceful_timeout = 21600
90 90
91 91 # The number of seconds to wait for requests on a Keep-Alive connection.
92 92 # Generally set in the 1-5 seconds range.
93 93 #keepalive = 2
94 94
95 95 ; Maximum memory usage that each worker can use before it will receive a
96 96 ; graceful restart signal 0 = memory monitoring is disabled
97 97 ; Examples: 268435456 (256MB), 536870912 (512MB)
98 98 ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
99 99 #memory_max_usage = 0
100 100
101 101 ; How often in seconds to check for memory usage for each gunicorn worker
102 102 #memory_usage_check_interval = 60
103 103
104 104 ; Threshold value for which we don't recycle worker if GarbageCollection
105 105 ; frees up enough resources. Before each restart we try to run GC on worker
106 106 ; in case we get enough free memory after that, restart will not happen.
107 107 #memory_usage_recovery_threshold = 0.8
108 108
109 109
110 110 [app:main]
111 111 ; The %(here)s variable will be replaced with the absolute path of parent directory
112 112 ; of this file
113 113 ; Each option in the app:main can be override by an environmental variable
114 114 ;
115 115 ;To override an option:
116 116 ;
117 117 ;RC_<KeyName>
118 118 ;Everything should be uppercase, . and - should be replaced by _.
119 119 ;For example, if you have these configuration settings:
120 120 ;rc_cache.repo_object.backend = foo
121 121 ;can be overridden by
122 122 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
123 123
124 124 use = egg:rhodecode-vcsserver
125 125
126 126
127 127 ; #############
128 128 ; DEBUG OPTIONS
129 129 ; #############
130 130
131 131 # During development the we want to have the debug toolbar enabled
132 132 pyramid.includes =
133 133 pyramid_debugtoolbar
134 134
135 135 debugtoolbar.hosts = 0.0.0.0/0
136 136 debugtoolbar.exclude_prefixes =
137 137 /css
138 138 /fonts
139 139 /images
140 140 /js
141 141
142 142 ; #################
143 143 ; END DEBUG OPTIONS
144 144 ; #################
145 145
146 146 ; Pyramid default locales, we need this to be set
147 147 #pyramid.default_locale_name = en
148 148
149 149 ; default locale used by VCS systems
150 150 #locale = en_US.UTF-8
151 151
152 152 ; path to binaries for vcsserver, it should be set by the installer
153 153 ; at installation time, e.g /home/user/.rccontrol/vcsserver-1/profile/bin
154 154 ; it can also be a path to nix-build output in case of development
155 155 core.binary_dir = ""
156 156
157 157 ; Custom exception store path, defaults to TMPDIR
158 158 ; This is used to store exception from RhodeCode in shared directory
159 159 #exception_tracker.store_path =
160 160
161 161 ; #############
162 162 ; DOGPILE CACHE
163 163 ; #############
164 164
165 165 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
166 166 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
167 167 #cache_dir = %(here)s/data
168 168
169 169 ; ***************************************
170 170 ; `repo_object` cache, default file based
171 171 ; ***************************************
172 172
173 173 ; `repo_object` cache settings for vcs methods for repositories
174 174 #rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
175 175
176 176 ; cache auto-expires after N seconds
177 177 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
178 178 #rc_cache.repo_object.expiration_time = 2592000
179 179
180 180 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
181 181 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache_repo_object.db
182 182
183 183 ; ***********************************************************
184 184 ; `repo_object` cache with redis backend
185 185 ; recommended for larger instance, and for better performance
186 186 ; ***********************************************************
187 187
188 188 ; `repo_object` cache settings for vcs methods for repositories
189 189 #rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack
190 190
191 191 ; cache auto-expires after N seconds
192 192 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
193 193 #rc_cache.repo_object.expiration_time = 2592000
194 194
195 195 ; redis_expiration_time needs to be greater then expiration_time
196 196 #rc_cache.repo_object.arguments.redis_expiration_time = 3592000
197 197
198 198 #rc_cache.repo_object.arguments.host = localhost
199 199 #rc_cache.repo_object.arguments.port = 6379
200 200 #rc_cache.repo_object.arguments.db = 5
201 201 #rc_cache.repo_object.arguments.socket_timeout = 30
202 202 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
203 203 #rc_cache.repo_object.arguments.distributed_lock = true
204 204
205 205 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
206 206 #rc_cache.repo_object.arguments.lock_auto_renewal = true
207 207
208 208 ; Statsd client config, this is used to send metrics to statsd
209 209 ; We recommend setting statsd_exported and scrape them using Promethues
210 210 #statsd.enabled = false
211 211 #statsd.statsd_host = 0.0.0.0
212 212 #statsd.statsd_port = 8125
213 213 #statsd.statsd_prefix =
214 214 #statsd.statsd_ipv6 = false
215 215
216 216 ; configure logging automatically at server startup set to false
217 217 ; to use the below custom logging config.
218 218 ; RC_LOGGING_FORMATTER
219 219 ; RC_LOGGING_LEVEL
220 220 ; env variables can control the settings for logging in case of autoconfigure
221 221
222 222 #logging.autoconfigure = true
223 223
224 224 ; specify your own custom logging config file to configure logging
225 225 #logging.logging_conf_file = /path/to/custom_logging.ini
226 226
227 227 ; #####################
228 228 ; LOGGING CONFIGURATION
229 229 ; #####################
230 230
231 231 [loggers]
232 232 keys = root, vcsserver
233 233
234 234 [handlers]
235 235 keys = console
236 236
237 237 [formatters]
238 238 keys = generic, json
239 239
240 240 ; #######
241 241 ; LOGGERS
242 242 ; #######
243 243 [logger_root]
244 244 level = NOTSET
245 245 handlers = console
246 246
247 247 [logger_vcsserver]
248 248 level = DEBUG
249 249 handlers =
250 250 qualname = vcsserver
251 251 propagate = 1
252 252
253 253 ; ########
254 254 ; HANDLERS
255 255 ; ########
256 256
257 257 [handler_console]
258 258 class = StreamHandler
259 259 args = (sys.stderr, )
260 260 level = DEBUG
261 261 ; To enable JSON formatted logs replace 'generic' with 'json'
262 262 ; This allows sending properly formatted logs to grafana loki or elasticsearch
263 263 formatter = generic
264 264
265 265 ; ##########
266 266 ; FORMATTERS
267 267 ; ##########
268 268
269 269 [formatter_generic]
270 270 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
271 271 datefmt = %Y-%m-%d %H:%M:%S
272 272
273 273 [formatter_json]
274 274 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
275 275 class = vcsserver.lib._vendor.jsonlogger.JsonFormatter
@@ -1,238 +1,238 b''
1 ## -*- coding: utf-8 -*-
1 #
2 2
3 3 ; #################################
4 4 ; RHODECODE VCSSERVER CONFIGURATION
5 5 ; #################################
6 6
7 7 [server:main]
8 8 ; COMMON HOST/IP CONFIG
9 9 host = 127.0.0.1
10 10 port = 9900
11 11
12 12
13 13 ; ###########################
14 14 ; GUNICORN APPLICATION SERVER
15 15 ; ###########################
16 16
17 17 ; run with gunicorn --paste rhodecode.ini
18 18
19 19 ; Module to use, this setting shouldn't be changed
20 20 use = egg:gunicorn#main
21 21
22 22 ; Sets the number of process workers. More workers means more concurrent connections
23 23 ; RhodeCode can handle at the same time. Each additional worker also it increases
24 24 ; memory usage as each has it's own set of caches.
25 25 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
26 26 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
27 27 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
28 28 ; when using more than 1 worker.
29 29 workers = 2
30 30
31 31 ; Gunicorn access log level
32 32 loglevel = info
33 33
34 34 ; Process name visible in process list
35 35 proc_name = rhodecode_vcsserver
36 36
37 37 ; Type of worker class, one of `sync`, `gevent`
38 38 ; currently `sync` is the only option allowed.
39 39 worker_class = sync
40 40
41 41 ; The maximum number of simultaneous clients. Valid only for gevent
42 42 worker_connections = 10
43 43
44 44 ; Max number of requests that worker will handle before being gracefully restarted.
45 45 ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
46 46 max_requests = 1000
47 47 max_requests_jitter = 30
48 48
49 49 ; Amount of time a worker can spend with handling a request before it
50 50 ; gets killed and restarted. By default set to 21600 (6hrs)
51 51 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
52 52 timeout = 21600
53 53
54 54 ; The maximum size of HTTP request line in bytes.
55 55 ; 0 for unlimited
56 56 limit_request_line = 0
57 57
58 58 ; Limit the number of HTTP headers fields in a request.
59 59 ; By default this value is 100 and can't be larger than 32768.
60 60 limit_request_fields = 32768
61 61
62 62 ; Limit the allowed size of an HTTP request header field.
63 63 ; Value is a positive number or 0.
64 64 ; Setting it to 0 will allow unlimited header field sizes.
65 65 limit_request_field_size = 0
66 66
67 67 ; Timeout for graceful workers restart.
68 68 ; After receiving a restart signal, workers have this much time to finish
69 69 ; serving requests. Workers still alive after the timeout (starting from the
70 70 ; receipt of the restart signal) are force killed.
71 71 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
72 72 graceful_timeout = 21600
73 73
74 74 # The number of seconds to wait for requests on a Keep-Alive connection.
75 75 # Generally set in the 1-5 seconds range.
76 76 keepalive = 2
77 77
78 78 ; Maximum memory usage that each worker can use before it will receive a
79 79 ; graceful restart signal 0 = memory monitoring is disabled
80 80 ; Examples: 268435456 (256MB), 536870912 (512MB)
81 81 ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
82 82 memory_max_usage = 0
83 83
84 84 ; How often in seconds to check for memory usage for each gunicorn worker
85 85 memory_usage_check_interval = 60
86 86
87 87 ; Threshold value for which we don't recycle worker if GarbageCollection
88 88 ; frees up enough resources. Before each restart we try to run GC on worker
89 89 ; in case we get enough free memory after that, restart will not happen.
90 90 memory_usage_recovery_threshold = 0.8
91 91
92 92
93 93 [app:main]
94 94 ; The %(here)s variable will be replaced with the absolute path of parent directory
95 95 ; of this file
96 96 ; Each option in the app:main can be override by an environmental variable
97 97 ;
98 98 ;To override an option:
99 99 ;
100 100 ;RC_<KeyName>
101 101 ;Everything should be uppercase, . and - should be replaced by _.
102 102 ;For example, if you have these configuration settings:
103 103 ;rc_cache.repo_object.backend = foo
104 104 ;can be overridden by
105 105 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
106 106
107 107 use = egg:rhodecode-vcsserver
108 108
109 109 ; Pyramid default locales, we need this to be set
110 110 #pyramid.default_locale_name = en
111 111
112 112 ; default locale used by VCS systems
113 113 #locale = en_US.UTF-8
114 114
115 115 ; path to binaries for vcsserver, it should be set by the installer
116 116 ; at installation time, e.g /home/user/.rccontrol/vcsserver-1/profile/bin
117 117 ; it can also be a path to nix-build output in case of development
118 118 core.binary_dir = ""
119 119
120 120 ; Custom exception store path, defaults to TMPDIR
121 121 ; This is used to store exception from RhodeCode in shared directory
122 122 #exception_tracker.store_path =
123 123
124 124 ; #############
125 125 ; DOGPILE CACHE
126 126 ; #############
127 127
128 128 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
129 129 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
130 130 #cache_dir = %(here)s/data
131 131
132 132 ; ***************************************
133 133 ; `repo_object` cache, default file based
134 134 ; ***************************************
135 135
136 136 ; `repo_object` cache settings for vcs methods for repositories
137 137 #rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
138 138
139 139 ; cache auto-expires after N seconds
140 140 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
141 141 #rc_cache.repo_object.expiration_time = 2592000
142 142
143 143 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
144 144 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache_repo_object.db
145 145
146 146 ; ***********************************************************
147 147 ; `repo_object` cache with redis backend
148 148 ; recommended for larger instance, and for better performance
149 149 ; ***********************************************************
150 150
151 151 ; `repo_object` cache settings for vcs methods for repositories
152 152 #rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack
153 153
154 154 ; cache auto-expires after N seconds
155 155 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
156 156 #rc_cache.repo_object.expiration_time = 2592000
157 157
158 158 ; redis_expiration_time needs to be greater then expiration_time
159 159 #rc_cache.repo_object.arguments.redis_expiration_time = 3592000
160 160
161 161 #rc_cache.repo_object.arguments.host = localhost
162 162 #rc_cache.repo_object.arguments.port = 6379
163 163 #rc_cache.repo_object.arguments.db = 5
164 164 #rc_cache.repo_object.arguments.socket_timeout = 30
165 165 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
166 166 #rc_cache.repo_object.arguments.distributed_lock = true
167 167
168 168 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
169 169 #rc_cache.repo_object.arguments.lock_auto_renewal = true
170 170
171 171 ; Statsd client config, this is used to send metrics to statsd
172 172 ; We recommend setting statsd_exported and scrape them using Promethues
173 173 #statsd.enabled = false
174 174 #statsd.statsd_host = 0.0.0.0
175 175 #statsd.statsd_port = 8125
176 176 #statsd.statsd_prefix =
177 177 #statsd.statsd_ipv6 = false
178 178
179 179 ; configure logging automatically at server startup set to false
180 180 ; to use the below custom logging config.
181 181 ; RC_LOGGING_FORMATTER
182 182 ; RC_LOGGING_LEVEL
183 183 ; env variables can control the settings for logging in case of autoconfigure
184 184
185 185 #logging.autoconfigure = true
186 186
187 187 ; specify your own custom logging config file to configure logging
188 188 #logging.logging_conf_file = /path/to/custom_logging.ini
189 189
190 190 ; #####################
191 191 ; LOGGING CONFIGURATION
192 192 ; #####################
193 193
194 194 [loggers]
195 195 keys = root, vcsserver
196 196
197 197 [handlers]
198 198 keys = console
199 199
200 200 [formatters]
201 201 keys = generic, json
202 202
203 203 ; #######
204 204 ; LOGGERS
205 205 ; #######
206 206 [logger_root]
207 207 level = NOTSET
208 208 handlers = console
209 209
210 210 [logger_vcsserver]
211 211 level = INFO
212 212 handlers =
213 213 qualname = vcsserver
214 214 propagate = 1
215 215
216 216 ; ########
217 217 ; HANDLERS
218 218 ; ########
219 219
220 220 [handler_console]
221 221 class = StreamHandler
222 222 args = (sys.stderr, )
223 223 level = INFO
224 224 ; To enable JSON formatted logs replace 'generic' with 'json'
225 225 ; This allows sending properly formatted logs to grafana loki or elasticsearch
226 226 formatter = generic
227 227
228 228 ; ##########
229 229 ; FORMATTERS
230 230 ; ##########
231 231
232 232 [formatter_generic]
233 233 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
234 234 datefmt = %Y-%m-%d %H:%M:%S
235 235
236 236 [formatter_json]
237 237 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
238 238 class = vcsserver.lib._vendor.jsonlogger.JsonFormatter
General Comments 0
You need to be logged in to leave comments. Login now