##// END OF EJS Templates
fix(configs): optimize configs for 5.0.0 release defaults...
super-admin -
r5295:7d8e7465 default
parent child Browse files
Show More
@@ -1,863 +1,865 b''
1
2
1
3 ; #########################################
2 ; #########################################
4 ; RHODECODE COMMUNITY EDITION CONFIGURATION
3 ; RHODECODE COMMUNITY EDITION CONFIGURATION
5 ; #########################################
4 ; #########################################
6
5
7 [DEFAULT]
6 [DEFAULT]
8 ; Debug flag sets all loggers to debug, and enables request tracking
7 ; Debug flag sets all loggers to debug, and enables request tracking
9 debug = true
8 debug = true
10
9
11 ; ########################################################################
10 ; ########################################################################
12 ; EMAIL CONFIGURATION
11 ; EMAIL CONFIGURATION
13 ; These settings will be used by the RhodeCode mailing system
12 ; These settings will be used by the RhodeCode mailing system
14 ; ########################################################################
13 ; ########################################################################
15
14
16 ; prefix all emails subjects with given prefix, helps filtering out emails
15 ; prefix all emails subjects with given prefix, helps filtering out emails
17 #email_prefix = [RhodeCode]
16 #email_prefix = [RhodeCode]
18
17
19 ; email FROM address all mails will be sent
18 ; email FROM address all mails will be sent
20 #app_email_from = rhodecode-noreply@localhost
19 #app_email_from = rhodecode-noreply@localhost
21
20
22 #smtp_server = mail.server.com
21 #smtp_server = mail.server.com
23 #smtp_username =
22 #smtp_username =
24 #smtp_password =
23 #smtp_password =
25 #smtp_port =
24 #smtp_port =
26 #smtp_use_tls = false
25 #smtp_use_tls = false
27 #smtp_use_ssl = true
26 #smtp_use_ssl = true
28
27
29 [server:main]
28 [server:main]
30 ; COMMON HOST/IP CONFIG, This applies mostly to develop setup,
29 ; COMMON HOST/IP CONFIG, This applies mostly to develop setup,
31 ; Host port for gunicorn are controlled by gunicorn_conf.py
30 ; Host port for gunicorn are controlled by gunicorn_conf.py
32 host = 127.0.0.1
31 host = 127.0.0.1
33 port = 10020
32 port = 10020
34
33
35 ; ##################################################
34 ; ##################################################
36 ; WAITRESS WSGI SERVER - Recommended for Development
35 ; WAITRESS WSGI SERVER - Recommended for Development
37 ; ##################################################
36 ; ##################################################
38
37
39 ; use server type
38 ; use server type
40 use = egg:waitress#main
39 use = egg:waitress#main
41
40
42 ; number of worker threads
41 ; number of worker threads
43 threads = 5
42 threads = 5
44
43
45 ; MAX BODY SIZE 100GB
44 ; MAX BODY SIZE 100GB
46 max_request_body_size = 107374182400
45 max_request_body_size = 107374182400
47
46
48 ; Use poll instead of select, fixes file descriptors limits problems.
47 ; Use poll instead of select, fixes file descriptors limits problems.
49 ; May not work on old windows systems.
48 ; May not work on old windows systems.
50 asyncore_use_poll = true
49 asyncore_use_poll = true
51
50
52
51
53 ; ###########################
52 ; ###########################
54 ; GUNICORN APPLICATION SERVER
53 ; GUNICORN APPLICATION SERVER
55 ; ###########################
54 ; ###########################
56
55
57 ; run with gunicorn --paste rhodecode.ini --config gunicorn_conf.py
56 ; run with gunicorn --paste rhodecode.ini --config gunicorn_conf.py
58
57
59 ; Module to use, this setting shouldn't be changed
58 ; Module to use, this setting shouldn't be changed
60 #use = egg:gunicorn#main
59 #use = egg:gunicorn#main
61
60
62 ; Prefix middleware for RhodeCode.
61 ; Prefix middleware for RhodeCode.
63 ; recommended when using proxy setup.
62 ; recommended when using proxy setup.
64 ; allows to set RhodeCode under a prefix in server.
63 ; allows to set RhodeCode under a prefix in server.
65 ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well.
64 ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well.
66 ; And set your prefix like: `prefix = /custom_prefix`
65 ; And set your prefix like: `prefix = /custom_prefix`
67 ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need
66 ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need
68 ; to make your cookies only work on prefix url
67 ; to make your cookies only work on prefix url
69 [filter:proxy-prefix]
68 [filter:proxy-prefix]
70 use = egg:PasteDeploy#prefix
69 use = egg:PasteDeploy#prefix
71 prefix = /
70 prefix = /
72
71
73 [app:main]
72 [app:main]
74 ; The %(here)s variable will be replaced with the absolute path of parent directory
73 ; The %(here)s variable will be replaced with the absolute path of parent directory
75 ; of this file
74 ; of this file
76 ; Each option in the app:main can be override by an environmental variable
75 ; Each option in the app:main can be override by an environmental variable
77 ;
76 ;
78 ;To override an option:
77 ;To override an option:
79 ;
78 ;
80 ;RC_<KeyName>
79 ;RC_<KeyName>
81 ;Everything should be uppercase, . and - should be replaced by _.
80 ;Everything should be uppercase, . and - should be replaced by _.
82 ;For example, if you have these configuration settings:
81 ;For example, if you have these configuration settings:
83 ;rc_cache.repo_object.backend = foo
82 ;rc_cache.repo_object.backend = foo
84 ;can be overridden by
83 ;can be overridden by
85 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
84 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
86
85
87 use = egg:rhodecode-enterprise-ce
86 use = egg:rhodecode-enterprise-ce
88
87
89 ; enable proxy prefix middleware, defined above
88 ; enable proxy prefix middleware, defined above
90 #filter-with = proxy-prefix
89 #filter-with = proxy-prefix
91
90
92 ; #############
91 ; #############
93 ; DEBUG OPTIONS
92 ; DEBUG OPTIONS
94 ; #############
93 ; #############
95
94
96 pyramid.reload_templates = true
95 pyramid.reload_templates = true
97
96
98 # During development the we want to have the debug toolbar enabled
97 # During development the we want to have the debug toolbar enabled
99 pyramid.includes =
98 pyramid.includes =
100 pyramid_debugtoolbar
99 pyramid_debugtoolbar
101
100
102 debugtoolbar.hosts = 0.0.0.0/0
101 debugtoolbar.hosts = 0.0.0.0/0
103 debugtoolbar.exclude_prefixes =
102 debugtoolbar.exclude_prefixes =
104 /css
103 /css
105 /fonts
104 /fonts
106 /images
105 /images
107 /js
106 /js
108
107
109 ## RHODECODE PLUGINS ##
108 ## RHODECODE PLUGINS ##
110 rhodecode.includes =
109 rhodecode.includes =
111 rhodecode.api
110 rhodecode.api
112
111
113
112
114 # api prefix url
113 # api prefix url
115 rhodecode.api.url = /_admin/api
114 rhodecode.api.url = /_admin/api
116
115
117 ; enable debug style page
116 ; enable debug style page
118 debug_style = true
117 debug_style = true
119
118
120 ; #################
119 ; #################
121 ; END DEBUG OPTIONS
120 ; END DEBUG OPTIONS
122 ; #################
121 ; #################
123
122
124 ; encryption key used to encrypt social plugin tokens,
123 ; encryption key used to encrypt social plugin tokens,
125 ; remote_urls with credentials etc, if not set it defaults to
124 ; remote_urls with credentials etc, if not set it defaults to
126 ; `beaker.session.secret`
125 ; `beaker.session.secret`
127 #rhodecode.encrypted_values.secret =
126 #rhodecode.encrypted_values.secret =
128
127
129 ; decryption strict mode (enabled by default). It controls if decryption raises
128 ; decryption strict mode (enabled by default). It controls if decryption raises
130 ; `SignatureVerificationError` in case of wrong key, or damaged encryption data.
129 ; `SignatureVerificationError` in case of wrong key, or damaged encryption data.
131 #rhodecode.encrypted_values.strict = false
130 #rhodecode.encrypted_values.strict = false
132
131
133 ; Pick algorithm for encryption. Either fernet (more secure) or aes (default)
132 ; Pick algorithm for encryption. Either fernet (more secure) or aes (default)
134 ; fernet is safer, and we strongly recommend switching to it.
133 ; fernet is safer, and we strongly recommend switching to it.
135 ; Due to backward compatibility aes is used as default.
134 ; Due to backward compatibility aes is used as default.
136 #rhodecode.encrypted_values.algorithm = fernet
135 #rhodecode.encrypted_values.algorithm = fernet
137
136
138 ; Return gzipped responses from RhodeCode (static files/application)
137 ; Return gzipped responses from RhodeCode (static files/application)
139 gzip_responses = false
138 gzip_responses = false
140
139
141 ; Auto-generate javascript routes file on startup
140 ; Auto-generate javascript routes file on startup
142 generate_js_files = false
141 generate_js_files = false
143
142
144 ; System global default language.
143 ; System global default language.
145 ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh
144 ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh
146 lang = en
145 lang = en
147
146
148 ; Perform a full repository scan and import on each server start.
147 ; Perform a full repository scan and import on each server start.
149 ; Settings this to true could lead to very long startup time.
148 ; Settings this to true could lead to very long startup time.
150 startup.import_repos = false
149 startup.import_repos = false
151
150
152 ; URL at which the application is running. This is used for Bootstrapping
151 ; URL at which the application is running. This is used for Bootstrapping
153 ; requests in context when no web request is available. Used in ishell, or
152 ; requests in context when no web request is available. Used in ishell, or
154 ; SSH calls. Set this for events to receive proper url for SSH calls.
153 ; SSH calls. Set this for events to receive proper url for SSH calls.
155 app.base_url = http://rhodecode.local
154 app.base_url = http://rhodecode.local
156
155
157 ; Unique application ID. Should be a random unique string for security.
156 ; Unique application ID. Should be a random unique string for security.
158 app_instance_uuid = rc-production
157 app_instance_uuid = rc-production
159
158
160 ; Cut off limit for large diffs (size in bytes). If overall diff size on
159 ; Cut off limit for large diffs (size in bytes). If overall diff size on
161 ; commit, or pull request exceeds this limit this diff will be displayed
160 ; commit, or pull request exceeds this limit this diff will be displayed
162 ; partially. E.g 512000 == 512Kb
161 ; partially. E.g 512000 == 512Kb
163 cut_off_limit_diff = 512000
162 cut_off_limit_diff = 512000
164
163
165 ; Cut off limit for large files inside diffs (size in bytes). Each individual
164 ; Cut off limit for large files inside diffs (size in bytes). Each individual
166 ; file inside diff which exceeds this limit will be displayed partially.
165 ; file inside diff which exceeds this limit will be displayed partially.
167 ; E.g 128000 == 128Kb
166 ; E.g 128000 == 128Kb
168 cut_off_limit_file = 128000
167 cut_off_limit_file = 128000
169
168
170 ; Use cached version of vcs repositories everywhere. Recommended to be `true`
169 ; Use cached version of vcs repositories everywhere. Recommended to be `true`
171 vcs_full_cache = true
170 vcs_full_cache = true
172
171
173 ; Force https in RhodeCode, fixes https redirects, assumes it's always https.
172 ; Force https in RhodeCode, fixes https redirects, assumes it's always https.
174 ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache
173 ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache
175 force_https = false
174 force_https = false
176
175
177 ; use Strict-Transport-Security headers
176 ; use Strict-Transport-Security headers
178 use_htsts = false
177 use_htsts = false
179
178
180 ; Set to true if your repos are exposed using the dumb protocol
179 ; Set to true if your repos are exposed using the dumb protocol
181 git_update_server_info = false
180 git_update_server_info = false
182
181
183 ; RSS/ATOM feed options
182 ; RSS/ATOM feed options
184 rss_cut_off_limit = 256000
183 rss_cut_off_limit = 256000
185 rss_items_per_page = 10
184 rss_items_per_page = 10
186 rss_include_diff = false
185 rss_include_diff = false
187
186
188 ; gist URL alias, used to create nicer urls for gist. This should be an
187 ; gist URL alias, used to create nicer urls for gist. This should be an
189 ; url that does rewrites to _admin/gists/{gistid}.
188 ; url that does rewrites to _admin/gists/{gistid}.
190 ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal
189 ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal
191 ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid}
190 ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid}
192 gist_alias_url =
191 gist_alias_url =
193
192
194 ; List of views (using glob pattern syntax) that AUTH TOKENS could be
193 ; List of views (using glob pattern syntax) that AUTH TOKENS could be
195 ; used for access.
194 ; used for access.
196 ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it
195 ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it
197 ; came from the the logged in user who own this authentication token.
196 ; came from the the logged in user who own this authentication token.
198 ; Additionally @TOKEN syntax can be used to bound the view to specific
197 ; Additionally @TOKEN syntax can be used to bound the view to specific
199 ; authentication token. Such view would be only accessible when used together
198 ; authentication token. Such view would be only accessible when used together
200 ; with this authentication token
199 ; with this authentication token
201 ; list of all views can be found under `/_admin/permissions/auth_token_access`
200 ; list of all views can be found under `/_admin/permissions/auth_token_access`
202 ; The list should be "," separated and on a single line.
201 ; The list should be "," separated and on a single line.
203 ; Most common views to enable:
202 ; Most common views to enable:
204
203
205 # RepoCommitsView:repo_commit_download
204 # RepoCommitsView:repo_commit_download
206 # RepoCommitsView:repo_commit_patch
205 # RepoCommitsView:repo_commit_patch
207 # RepoCommitsView:repo_commit_raw
206 # RepoCommitsView:repo_commit_raw
208 # RepoCommitsView:repo_commit_raw@TOKEN
207 # RepoCommitsView:repo_commit_raw@TOKEN
209 # RepoFilesView:repo_files_diff
208 # RepoFilesView:repo_files_diff
210 # RepoFilesView:repo_archivefile
209 # RepoFilesView:repo_archivefile
211 # RepoFilesView:repo_file_raw
210 # RepoFilesView:repo_file_raw
212 # GistView:*
211 # GistView:*
213 api_access_controllers_whitelist =
212 api_access_controllers_whitelist =
214
213
215 ; Default encoding used to convert from and to unicode
214 ; Default encoding used to convert from and to unicode
216 ; can be also a comma separated list of encoding in case of mixed encodings
215 ; can be also a comma separated list of encoding in case of mixed encodings
217 default_encoding = UTF-8
216 default_encoding = UTF-8
218
217
219 ; instance-id prefix
218 ; instance-id prefix
220 ; a prefix key for this instance used for cache invalidation when running
219 ; a prefix key for this instance used for cache invalidation when running
221 ; multiple instances of RhodeCode, make sure it's globally unique for
220 ; multiple instances of RhodeCode, make sure it's globally unique for
222 ; all running RhodeCode instances. Leave empty if you don't use it
221 ; all running RhodeCode instances. Leave empty if you don't use it
223 instance_id =
222 instance_id =
224
223
225 ; Fallback authentication plugin. Set this to a plugin ID to force the usage
224 ; Fallback authentication plugin. Set this to a plugin ID to force the usage
226 ; of an authentication plugin also if it is disabled by it's settings.
225 ; of an authentication plugin also if it is disabled by it's settings.
227 ; This could be useful if you are unable to log in to the system due to broken
226 ; This could be useful if you are unable to log in to the system due to broken
228 ; authentication settings. Then you can enable e.g. the internal RhodeCode auth
227 ; authentication settings. Then you can enable e.g. the internal RhodeCode auth
229 ; module to log in again and fix the settings.
228 ; module to log in again and fix the settings.
230 ; Available builtin plugin IDs (hash is part of the ID):
229 ; Available builtin plugin IDs (hash is part of the ID):
231 ; egg:rhodecode-enterprise-ce#rhodecode
230 ; egg:rhodecode-enterprise-ce#rhodecode
232 ; egg:rhodecode-enterprise-ce#pam
231 ; egg:rhodecode-enterprise-ce#pam
233 ; egg:rhodecode-enterprise-ce#ldap
232 ; egg:rhodecode-enterprise-ce#ldap
234 ; egg:rhodecode-enterprise-ce#jasig_cas
233 ; egg:rhodecode-enterprise-ce#jasig_cas
235 ; egg:rhodecode-enterprise-ce#headers
234 ; egg:rhodecode-enterprise-ce#headers
236 ; egg:rhodecode-enterprise-ce#crowd
235 ; egg:rhodecode-enterprise-ce#crowd
237
236
238 #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode
237 #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode
239
238
240 ; Flag to control loading of legacy plugins in py:/path format
239 ; Flag to control loading of legacy plugins in py:/path format
241 auth_plugin.import_legacy_plugins = true
240 auth_plugin.import_legacy_plugins = true
242
241
243 ; alternative return HTTP header for failed authentication. Default HTTP
242 ; alternative return HTTP header for failed authentication. Default HTTP
244 ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with
243 ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with
245 ; handling that causing a series of failed authentication calls.
244 ; handling that causing a series of failed authentication calls.
246 ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code
245 ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code
247 ; This will be served instead of default 401 on bad authentication
246 ; This will be served instead of default 401 on bad authentication
248 auth_ret_code =
247 auth_ret_code =
249
248
250 ; use special detection method when serving auth_ret_code, instead of serving
249 ; use special detection method when serving auth_ret_code, instead of serving
251 ; ret_code directly, use 401 initially (Which triggers credentials prompt)
250 ; ret_code directly, use 401 initially (Which triggers credentials prompt)
252 ; and then serve auth_ret_code to clients
251 ; and then serve auth_ret_code to clients
253 auth_ret_code_detection = false
252 auth_ret_code_detection = false
254
253
255 ; locking return code. When repository is locked return this HTTP code. 2XX
254 ; locking return code. When repository is locked return this HTTP code. 2XX
256 ; codes don't break the transactions while 4XX codes do
255 ; codes don't break the transactions while 4XX codes do
257 lock_ret_code = 423
256 lock_ret_code = 423
258
257
259 ; allows to change the repository location in settings page
258 ; allows to change the repository location in settings page
260 allow_repo_location_change = true
259 allow_repo_location_change = true
261
260
262 ; allows to setup custom hooks in settings page
261 ; allows to setup custom hooks in settings page
263 allow_custom_hooks_settings = true
262 allow_custom_hooks_settings = true
264
263
265 ; Generated license token required for EE edition license.
264 ; Generated license token required for EE edition license.
266 ; New generated token value can be found in Admin > settings > license page.
265 ; New generated token value can be found in Admin > settings > license page.
267 license_token =
266 license_token =
268
267
269 ; This flag hides sensitive information on the license page such as token, and license data
268 ; This flag hides sensitive information on the license page such as token, and license data
270 license.hide_license_info = false
269 license.hide_license_info = false
271
270
272 ; supervisor connection uri, for managing supervisor and logs.
271 ; supervisor connection uri, for managing supervisor and logs.
273 supervisor.uri =
272 supervisor.uri =
274
273
275 ; supervisord group name/id we only want this RC instance to handle
274 ; supervisord group name/id we only want this RC instance to handle
276 supervisor.group_id = dev
275 supervisor.group_id = dev
277
276
278 ; Display extended labs settings
277 ; Display extended labs settings
279 labs_settings_active = true
278 labs_settings_active = true
280
279
281 ; Custom exception store path, defaults to TMPDIR
280 ; Custom exception store path, defaults to TMPDIR
282 ; This is used to store exception from RhodeCode in shared directory
281 ; This is used to store exception from RhodeCode in shared directory
283 #exception_tracker.store_path =
282 #exception_tracker.store_path =
284
283
285 ; Send email with exception details when it happens
284 ; Send email with exception details when it happens
286 #exception_tracker.send_email = false
285 #exception_tracker.send_email = false
287
286
288 ; Comma separated list of recipients for exception emails,
287 ; Comma separated list of recipients for exception emails,
289 ; e.g admin@rhodecode.com,devops@rhodecode.com
288 ; e.g admin@rhodecode.com,devops@rhodecode.com
290 ; Can be left empty, then emails will be sent to ALL super-admins
289 ; Can be left empty, then emails will be sent to ALL super-admins
291 #exception_tracker.send_email_recipients =
290 #exception_tracker.send_email_recipients =
292
291
293 ; optional prefix to Add to email Subject
292 ; optional prefix to Add to email Subject
294 #exception_tracker.email_prefix = [RHODECODE ERROR]
293 #exception_tracker.email_prefix = [RHODECODE ERROR]
295
294
296 ; File store configuration. This is used to store and serve uploaded files
295 ; File store configuration. This is used to store and serve uploaded files
297 file_store.enabled = true
296 file_store.enabled = true
298
297
299 ; Storage backend, available options are: local
298 ; Storage backend, available options are: local
300 file_store.backend = local
299 file_store.backend = local
301
300
302 ; path to store the uploaded binaries
301 ; path to store the uploaded binaries
303 file_store.storage_path = %(here)s/data/file_store
302 file_store.storage_path = %(here)s/data/file_store
304
303
305 ; Uncomment and set this path to control settings for archive download cache.
304 ; Uncomment and set this path to control settings for archive download cache.
306 ; Generated repo archives will be cached at this location
305 ; Generated repo archives will be cached at this location
307 ; and served from the cache during subsequent requests for the same archive of
306 ; and served from the cache during subsequent requests for the same archive of
308 ; the repository. This path is important to be shared across filesystems and with
307 ; the repository. This path is important to be shared across filesystems and with
309 ; RhodeCode and vcsserver
308 ; RhodeCode and vcsserver
310
309
311 ; Default is $cache_dir/archive_cache if not set
310 ; Default is $cache_dir/archive_cache if not set
312 archive_cache.store_dir = %(here)s/data/archive_cache
311 archive_cache.store_dir = %(here)s/data/archive_cache
313
312
314 ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb
313 ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb
315 archive_cache.cache_size_gb = 10
314 archive_cache.cache_size_gb = 10
316
315
317 ; By default cache uses sharding technique, this specifies how many shards are there
316 ; By default cache uses sharding technique, this specifies how many shards are there
318 archive_cache.cache_shards = 10
317 archive_cache.cache_shards = 10
319
318
320 ; #############
319 ; #############
321 ; CELERY CONFIG
320 ; CELERY CONFIG
322 ; #############
321 ; #############
323
322
324 ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini
323 ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini
325
324
326 use_celery = false
325 use_celery = false
327
326
328 ; path to store schedule database
327 ; path to store schedule database
329 #celerybeat-schedule.path =
328 #celerybeat-schedule.path =
330
329
331 ; connection url to the message broker (default redis)
330 ; connection url to the message broker (default redis)
332 celery.broker_url = redis://localhost:6379/8
331 celery.broker_url = redis://redis:6379/8
332
333 ; results backend to get results for (default redis)
334 celery.result_backend = redis://redis:6379/8
333
335
334 ; rabbitmq example
336 ; rabbitmq example
335 #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost
337 #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost
336
338
337 ; maximum tasks to execute before worker restart
339 ; maximum tasks to execute before worker restart
338 celery.max_tasks_per_child = 20
340 celery.max_tasks_per_child = 20
339
341
340 ; tasks will never be sent to the queue, but executed locally instead.
342 ; tasks will never be sent to the queue, but executed locally instead.
341 celery.task_always_eager = false
343 celery.task_always_eager = false
342
344
343 ; #############
345 ; #############
344 ; DOGPILE CACHE
346 ; DOGPILE CACHE
345 ; #############
347 ; #############
346
348
347 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
349 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
348 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
350 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
349 cache_dir = %(here)s/data
351 cache_dir = %(here)s/data
350
352
351 ; *********************************************
353 ; *********************************************
352 ; `sql_cache_short` cache for heavy SQL queries
354 ; `sql_cache_short` cache for heavy SQL queries
353 ; Only supported backend is `memory_lru`
355 ; Only supported backend is `memory_lru`
354 ; *********************************************
356 ; *********************************************
355 rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru
357 rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru
356 rc_cache.sql_cache_short.expiration_time = 30
358 rc_cache.sql_cache_short.expiration_time = 30
357
359
358
360
359 ; *****************************************************
361 ; *****************************************************
360 ; `cache_repo_longterm` cache for repo object instances
362 ; `cache_repo_longterm` cache for repo object instances
361 ; Only supported backend is `memory_lru`
363 ; Only supported backend is `memory_lru`
362 ; *****************************************************
364 ; *****************************************************
363 rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru
365 rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru
364 ; by default we use 30 Days, cache is still invalidated on push
366 ; by default we use 30 Days, cache is still invalidated on push
365 rc_cache.cache_repo_longterm.expiration_time = 2592000
367 rc_cache.cache_repo_longterm.expiration_time = 2592000
366 ; max items in LRU cache, set to smaller number to save memory, and expire last used caches
368 ; max items in LRU cache, set to smaller number to save memory, and expire last used caches
367 rc_cache.cache_repo_longterm.max_size = 10000
369 rc_cache.cache_repo_longterm.max_size = 10000
368
370
369
371
370 ; *********************************************
372 ; *********************************************
371 ; `cache_general` cache for general purpose use
373 ; `cache_general` cache for general purpose use
372 ; for simplicity use rc.file_namespace backend,
374 ; for simplicity use rc.file_namespace backend,
373 ; for performance and scale use rc.redis
375 ; for performance and scale use rc.redis
374 ; *********************************************
376 ; *********************************************
375 rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace
377 rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace
376 rc_cache.cache_general.expiration_time = 43200
378 rc_cache.cache_general.expiration_time = 43200
377 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
379 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
378 #rc_cache.cache_general.arguments.filename = /tmp/cache_general_db
380 #rc_cache.cache_general.arguments.filename = /tmp/cache_general_db
379
381
380 ; alternative `cache_general` redis backend with distributed lock
382 ; alternative `cache_general` redis backend with distributed lock
381 #rc_cache.cache_general.backend = dogpile.cache.rc.redis
383 #rc_cache.cache_general.backend = dogpile.cache.rc.redis
382 #rc_cache.cache_general.expiration_time = 300
384 #rc_cache.cache_general.expiration_time = 300
383
385
384 ; redis_expiration_time needs to be greater then expiration_time
386 ; redis_expiration_time needs to be greater then expiration_time
385 #rc_cache.cache_general.arguments.redis_expiration_time = 7200
387 #rc_cache.cache_general.arguments.redis_expiration_time = 7200
386
388
387 #rc_cache.cache_general.arguments.host = localhost
389 #rc_cache.cache_general.arguments.host = localhost
388 #rc_cache.cache_general.arguments.port = 6379
390 #rc_cache.cache_general.arguments.port = 6379
389 #rc_cache.cache_general.arguments.db = 0
391 #rc_cache.cache_general.arguments.db = 0
390 #rc_cache.cache_general.arguments.socket_timeout = 30
392 #rc_cache.cache_general.arguments.socket_timeout = 30
391 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
393 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
392 #rc_cache.cache_general.arguments.distributed_lock = true
394 #rc_cache.cache_general.arguments.distributed_lock = true
393
395
394 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
396 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
395 #rc_cache.cache_general.arguments.lock_auto_renewal = true
397 #rc_cache.cache_general.arguments.lock_auto_renewal = true
396
398
397 ; *************************************************
399 ; *************************************************
398 ; `cache_perms` cache for permission tree, auth TTL
400 ; `cache_perms` cache for permission tree, auth TTL
399 ; for simplicity use rc.file_namespace backend,
401 ; for simplicity use rc.file_namespace backend,
400 ; for performance and scale use rc.redis
402 ; for performance and scale use rc.redis
401 ; *************************************************
403 ; *************************************************
402 rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace
404 rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace
403 rc_cache.cache_perms.expiration_time = 3600
405 rc_cache.cache_perms.expiration_time = 3600
404 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
406 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
405 #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms_db
407 #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms_db
406
408
407 ; alternative `cache_perms` redis backend with distributed lock
409 ; alternative `cache_perms` redis backend with distributed lock
408 #rc_cache.cache_perms.backend = dogpile.cache.rc.redis
410 #rc_cache.cache_perms.backend = dogpile.cache.rc.redis
409 #rc_cache.cache_perms.expiration_time = 300
411 #rc_cache.cache_perms.expiration_time = 300
410
412
411 ; redis_expiration_time needs to be greater then expiration_time
413 ; redis_expiration_time needs to be greater then expiration_time
412 #rc_cache.cache_perms.arguments.redis_expiration_time = 7200
414 #rc_cache.cache_perms.arguments.redis_expiration_time = 7200
413
415
414 #rc_cache.cache_perms.arguments.host = localhost
416 #rc_cache.cache_perms.arguments.host = localhost
415 #rc_cache.cache_perms.arguments.port = 6379
417 #rc_cache.cache_perms.arguments.port = 6379
416 #rc_cache.cache_perms.arguments.db = 0
418 #rc_cache.cache_perms.arguments.db = 0
417 #rc_cache.cache_perms.arguments.socket_timeout = 30
419 #rc_cache.cache_perms.arguments.socket_timeout = 30
418 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
420 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
419 #rc_cache.cache_perms.arguments.distributed_lock = true
421 #rc_cache.cache_perms.arguments.distributed_lock = true
420
422
421 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
423 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
422 #rc_cache.cache_perms.arguments.lock_auto_renewal = true
424 #rc_cache.cache_perms.arguments.lock_auto_renewal = true
423
425
424 ; ***************************************************
426 ; ***************************************************
425 ; `cache_repo` cache for file tree, Readme, RSS FEEDS
427 ; `cache_repo` cache for file tree, Readme, RSS FEEDS
426 ; for simplicity use rc.file_namespace backend,
428 ; for simplicity use rc.file_namespace backend,
427 ; for performance and scale use rc.redis
429 ; for performance and scale use rc.redis
428 ; ***************************************************
430 ; ***************************************************
429 rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace
431 rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace
430 rc_cache.cache_repo.expiration_time = 2592000
432 rc_cache.cache_repo.expiration_time = 2592000
431 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
433 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
432 #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo_db
434 #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo_db
433
435
434 ; alternative `cache_repo` redis backend with distributed lock
436 ; alternative `cache_repo` redis backend with distributed lock
435 #rc_cache.cache_repo.backend = dogpile.cache.rc.redis
437 #rc_cache.cache_repo.backend = dogpile.cache.rc.redis
436 #rc_cache.cache_repo.expiration_time = 2592000
438 #rc_cache.cache_repo.expiration_time = 2592000
437
439
438 ; redis_expiration_time needs to be greater then expiration_time
440 ; redis_expiration_time needs to be greater then expiration_time
439 #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400
441 #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400
440
442
441 #rc_cache.cache_repo.arguments.host = localhost
443 #rc_cache.cache_repo.arguments.host = localhost
442 #rc_cache.cache_repo.arguments.port = 6379
444 #rc_cache.cache_repo.arguments.port = 6379
443 #rc_cache.cache_repo.arguments.db = 1
445 #rc_cache.cache_repo.arguments.db = 1
444 #rc_cache.cache_repo.arguments.socket_timeout = 30
446 #rc_cache.cache_repo.arguments.socket_timeout = 30
445 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
447 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
446 #rc_cache.cache_repo.arguments.distributed_lock = true
448 #rc_cache.cache_repo.arguments.distributed_lock = true
447
449
448 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
450 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
449 #rc_cache.cache_repo.arguments.lock_auto_renewal = true
451 #rc_cache.cache_repo.arguments.lock_auto_renewal = true
450
452
451 ; ##############
453 ; ##############
452 ; BEAKER SESSION
454 ; BEAKER SESSION
453 ; ##############
455 ; ##############
454
456
455 ; beaker.session.type is type of storage options for the logged users sessions. Current allowed
457 ; beaker.session.type is type of storage options for the logged users sessions. Current allowed
456 ; types are file, ext:redis, ext:database, ext:memcached
458 ; types are file, ext:redis, ext:database, ext:memcached
457 ; Fastest ones are ext:redis and ext:database, DO NOT use memory type for session
459 ; Fastest ones are ext:redis and ext:database, DO NOT use memory type for session
458 beaker.session.type = file
460 beaker.session.type = file
459 beaker.session.data_dir = %(here)s/data/sessions
461 beaker.session.data_dir = %(here)s/data/sessions
460
462
461 ; Redis based sessions
463 ; Redis based sessions
462 #beaker.session.type = ext:redis
464 #beaker.session.type = ext:redis
463 #beaker.session.url = redis://127.0.0.1:6379/2
465 #beaker.session.url = redis://127.0.0.1:6379/2
464
466
465 ; DB based session, fast, and allows easy management over logged in users
467 ; DB based session, fast, and allows easy management over logged in users
466 #beaker.session.type = ext:database
468 #beaker.session.type = ext:database
467 #beaker.session.table_name = db_session
469 #beaker.session.table_name = db_session
468 #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode
470 #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode
469 #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode
471 #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode
470 #beaker.session.sa.pool_recycle = 3600
472 #beaker.session.sa.pool_recycle = 3600
471 #beaker.session.sa.echo = false
473 #beaker.session.sa.echo = false
472
474
473 beaker.session.key = rhodecode
475 beaker.session.key = rhodecode
474 beaker.session.secret = develop-rc-uytcxaz
476 beaker.session.secret = develop-rc-uytcxaz
475 beaker.session.lock_dir = %(here)s/data/sessions/lock
477 beaker.session.lock_dir = %(here)s/data/sessions/lock
476
478
477 ; Secure encrypted cookie. Requires AES and AES python libraries
479 ; Secure encrypted cookie. Requires AES and AES python libraries
478 ; you must disable beaker.session.secret to use this
480 ; you must disable beaker.session.secret to use this
479 #beaker.session.encrypt_key = key_for_encryption
481 #beaker.session.encrypt_key = key_for_encryption
480 #beaker.session.validate_key = validation_key
482 #beaker.session.validate_key = validation_key
481
483
482 ; Sets session as invalid (also logging out user) if it haven not been
484 ; Sets session as invalid (also logging out user) if it haven not been
483 ; accessed for given amount of time in seconds
485 ; accessed for given amount of time in seconds
484 beaker.session.timeout = 2592000
486 beaker.session.timeout = 2592000
485 beaker.session.httponly = true
487 beaker.session.httponly = true
486
488
487 ; Path to use for the cookie. Set to prefix if you use prefix middleware
489 ; Path to use for the cookie. Set to prefix if you use prefix middleware
488 #beaker.session.cookie_path = /custom_prefix
490 #beaker.session.cookie_path = /custom_prefix
489
491
490 ; Set https secure cookie
492 ; Set https secure cookie
491 beaker.session.secure = false
493 beaker.session.secure = false
492
494
493 ; default cookie expiration time in seconds, set to `true` to set expire
495 ; default cookie expiration time in seconds, set to `true` to set expire
494 ; at browser close
496 ; at browser close
495 #beaker.session.cookie_expires = 3600
497 #beaker.session.cookie_expires = 3600
496
498
497 ; #############################
499 ; #############################
498 ; SEARCH INDEXING CONFIGURATION
500 ; SEARCH INDEXING CONFIGURATION
499 ; #############################
501 ; #############################
500
502
501 ; Full text search indexer is available in rhodecode-tools under
503 ; Full text search indexer is available in rhodecode-tools under
502 ; `rhodecode-tools index` command
504 ; `rhodecode-tools index` command
503
505
504 ; WHOOSH Backend, doesn't require additional services to run
506 ; WHOOSH Backend, doesn't require additional services to run
505 ; it works good with few dozen repos
507 ; it works good with few dozen repos
506 search.module = rhodecode.lib.index.whoosh
508 search.module = rhodecode.lib.index.whoosh
507 search.location = %(here)s/data/index
509 search.location = %(here)s/data/index
508
510
509 ; ####################
511 ; ####################
510 ; CHANNELSTREAM CONFIG
512 ; CHANNELSTREAM CONFIG
511 ; ####################
513 ; ####################
512
514
513 ; channelstream enables persistent connections and live notification
515 ; channelstream enables persistent connections and live notification
514 ; in the system. It's also used by the chat system
516 ; in the system. It's also used by the chat system
515
517
516 channelstream.enabled = false
518 channelstream.enabled = false
517
519
518 ; server address for channelstream server on the backend
520 ; server address for channelstream server on the backend
519 channelstream.server = 127.0.0.1:9800
521 channelstream.server = 127.0.0.1:9800
520
522
521 ; location of the channelstream server from outside world
523 ; location of the channelstream server from outside world
522 ; use ws:// for http or wss:// for https. This address needs to be handled
524 ; use ws:// for http or wss:// for https. This address needs to be handled
523 ; by external HTTP server such as Nginx or Apache
525 ; by external HTTP server such as Nginx or Apache
524 ; see Nginx/Apache configuration examples in our docs
526 ; see Nginx/Apache configuration examples in our docs
525 channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream
527 channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream
526 channelstream.secret = secret
528 channelstream.secret = secret
527 channelstream.history.location = %(here)s/channelstream_history
529 channelstream.history.location = %(here)s/channelstream_history
528
530
529 ; Internal application path that Javascript uses to connect into.
531 ; Internal application path that Javascript uses to connect into.
530 ; If you use proxy-prefix the prefix should be added before /_channelstream
532 ; If you use proxy-prefix the prefix should be added before /_channelstream
531 channelstream.proxy_path = /_channelstream
533 channelstream.proxy_path = /_channelstream
532
534
533
535
534 ; ##############################
536 ; ##############################
535 ; MAIN RHODECODE DATABASE CONFIG
537 ; MAIN RHODECODE DATABASE CONFIG
536 ; ##############################
538 ; ##############################
537
539
538 #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
540 #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
539 #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
541 #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
540 #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8
542 #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8
541 ; pymysql is an alternative driver for MySQL, use in case of problems with default one
543 ; pymysql is an alternative driver for MySQL, use in case of problems with default one
542 #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode
544 #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode
543
545
544 sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
546 sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
545
547
546 ; see sqlalchemy docs for other advanced settings
548 ; see sqlalchemy docs for other advanced settings
547 ; print the sql statements to output
549 ; print the sql statements to output
548 sqlalchemy.db1.echo = false
550 sqlalchemy.db1.echo = false
549
551
550 ; recycle the connections after this amount of seconds
552 ; recycle the connections after this amount of seconds
551 sqlalchemy.db1.pool_recycle = 3600
553 sqlalchemy.db1.pool_recycle = 3600
552
554
553 ; the number of connections to keep open inside the connection pool.
555 ; the number of connections to keep open inside the connection pool.
554 ; 0 indicates no limit
556 ; 0 indicates no limit
555 ; the general calculus with gevent is:
557 ; the general calculus with gevent is:
556 ; if your system allows 500 concurrent greenlets (max_connections) that all do database access,
558 ; if your system allows 500 concurrent greenlets (max_connections) that all do database access,
557 ; then increase pool size + max overflow so that they add up to 500.
559 ; then increase pool size + max overflow so that they add up to 500.
558 #sqlalchemy.db1.pool_size = 5
560 #sqlalchemy.db1.pool_size = 5
559
561
560 ; The number of connections to allow in connection pool "overflow", that is
562 ; The number of connections to allow in connection pool "overflow", that is
561 ; connections that can be opened above and beyond the pool_size setting,
563 ; connections that can be opened above and beyond the pool_size setting,
562 ; which defaults to five.
564 ; which defaults to five.
563 #sqlalchemy.db1.max_overflow = 10
565 #sqlalchemy.db1.max_overflow = 10
564
566
565 ; Connection check ping, used to detect broken database connections
567 ; Connection check ping, used to detect broken database connections
566 ; could be enabled to better handle cases if MySQL has gone away errors
568 ; could be enabled to better handle cases if MySQL has gone away errors
567 #sqlalchemy.db1.ping_connection = true
569 #sqlalchemy.db1.ping_connection = true
568
570
569 ; ##########
571 ; ##########
570 ; VCS CONFIG
572 ; VCS CONFIG
571 ; ##########
573 ; ##########
572 vcs.server.enable = true
574 vcs.server.enable = true
573 vcs.server = localhost:9900
575 vcs.server = localhost:9900
574
576
575 ; Web server connectivity protocol, responsible for web based VCS operations
577 ; Web server connectivity protocol, responsible for web based VCS operations
576 ; Available protocols are:
578 ; Available protocols are:
577 ; `http` - use http-rpc backend (default)
579 ; `http` - use http-rpc backend (default)
578 vcs.server.protocol = http
580 vcs.server.protocol = http
579
581
580 ; Push/Pull operations protocol, available options are:
582 ; Push/Pull operations protocol, available options are:
581 ; `http` - use http-rpc backend (default)
583 ; `http` - use http-rpc backend (default)
582 vcs.scm_app_implementation = http
584 vcs.scm_app_implementation = http
583
585
584 ; Push/Pull operations hooks protocol, available options are:
586 ; Push/Pull operations hooks protocol, available options are:
585 ; `http` - use http-rpc backend (default)
587 ; `http` - use http-rpc backend (default)
586 vcs.hooks.protocol = http
588 vcs.hooks.protocol = http
587
589
588 ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be
590 ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be
589 ; accessible via network.
591 ; accessible via network.
590 ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker)
592 ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker)
591 vcs.hooks.host = *
593 vcs.hooks.host = *
592
594
593 ; Start VCSServer with this instance as a subprocess, useful for development
595 ; Start VCSServer with this instance as a subprocess, useful for development
594 vcs.start_server = false
596 vcs.start_server = false
595
597
596 ; List of enabled VCS backends, available options are:
598 ; List of enabled VCS backends, available options are:
597 ; `hg` - mercurial
599 ; `hg` - mercurial
598 ; `git` - git
600 ; `git` - git
599 ; `svn` - subversion
601 ; `svn` - subversion
600 vcs.backends = hg, git, svn
602 vcs.backends = hg, git, svn
601
603
602 ; Wait this number of seconds before killing connection to the vcsserver
604 ; Wait this number of seconds before killing connection to the vcsserver
603 vcs.connection_timeout = 3600
605 vcs.connection_timeout = 3600
604
606
605 ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out.
607 ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out.
606 ; Set a numeric version for your current SVN e.g 1.8, or 1.12
608 ; Set a numeric version for your current SVN e.g 1.8, or 1.12
607 ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible
609 ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible
608 #vcs.svn.compatible_version = 1.8
610 #vcs.svn.compatible_version = 1.8
609
611
610 ; Cache flag to cache vcsserver remote calls locally
612 ; Cache flag to cache vcsserver remote calls locally
611 ; It uses cache_region `cache_repo`
613 ; It uses cache_region `cache_repo`
612 vcs.methods.cache = true
614 vcs.methods.cache = true
613
615
614 ; ####################################################
616 ; ####################################################
615 ; Subversion proxy support (mod_dav_svn)
617 ; Subversion proxy support (mod_dav_svn)
616 ; Maps RhodeCode repo groups into SVN paths for Apache
618 ; Maps RhodeCode repo groups into SVN paths for Apache
617 ; ####################################################
619 ; ####################################################
618
620
619 ; Enable or disable the config file generation.
621 ; Enable or disable the config file generation.
620 svn.proxy.generate_config = false
622 svn.proxy.generate_config = false
621
623
622 ; Generate config file with `SVNListParentPath` set to `On`.
624 ; Generate config file with `SVNListParentPath` set to `On`.
623 svn.proxy.list_parent_path = true
625 svn.proxy.list_parent_path = true
624
626
625 ; Set location and file name of generated config file.
627 ; Set location and file name of generated config file.
626 svn.proxy.config_file_path = %(here)s/mod_dav_svn.conf
628 svn.proxy.config_file_path = %(here)s/mod_dav_svn.conf
627
629
628 ; alternative mod_dav config template. This needs to be a valid mako template
630 ; alternative mod_dav config template. This needs to be a valid mako template
629 ; Example template can be found in the source code:
631 ; Example template can be found in the source code:
630 ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako
632 ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako
631 #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako
633 #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako
632
634
633 ; Used as a prefix to the `Location` block in the generated config file.
635 ; Used as a prefix to the `Location` block in the generated config file.
634 ; In most cases it should be set to `/`.
636 ; In most cases it should be set to `/`.
635 svn.proxy.location_root = /
637 svn.proxy.location_root = /
636
638
637 ; Command to reload the mod dav svn configuration on change.
639 ; Command to reload the mod dav svn configuration on change.
638 ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh
640 ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh
639 ; Make sure user who runs RhodeCode process is allowed to reload Apache
641 ; Make sure user who runs RhodeCode process is allowed to reload Apache
640 #svn.proxy.reload_cmd = /etc/init.d/apache2 reload
642 #svn.proxy.reload_cmd = /etc/init.d/apache2 reload
641
643
642 ; If the timeout expires before the reload command finishes, the command will
644 ; If the timeout expires before the reload command finishes, the command will
643 ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds.
645 ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds.
644 #svn.proxy.reload_timeout = 10
646 #svn.proxy.reload_timeout = 10
645
647
646 ; ####################
648 ; ####################
647 ; SSH Support Settings
649 ; SSH Support Settings
648 ; ####################
650 ; ####################
649
651
650 ; Defines if a custom authorized_keys file should be created and written on
652 ; Defines if a custom authorized_keys file should be created and written on
651 ; any change user ssh keys. Setting this to false also disables possibility
653 ; any change user ssh keys. Setting this to false also disables possibility
652 ; of adding SSH keys by users from web interface. Super admins can still
654 ; of adding SSH keys by users from web interface. Super admins can still
653 ; manage SSH Keys.
655 ; manage SSH Keys.
654 ssh.generate_authorized_keyfile = false
656 ssh.generate_authorized_keyfile = false
655
657
656 ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding`
658 ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding`
657 # ssh.authorized_keys_ssh_opts =
659 # ssh.authorized_keys_ssh_opts =
658
660
659 ; Path to the authorized_keys file where the generate entries are placed.
661 ; Path to the authorized_keys file where the generate entries are placed.
660 ; It is possible to have multiple key files specified in `sshd_config` e.g.
662 ; It is possible to have multiple key files specified in `sshd_config` e.g.
661 ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode
663 ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode
662 ssh.authorized_keys_file_path = ~/.ssh/authorized_keys_rhodecode
664 ssh.authorized_keys_file_path = ~/.ssh/authorized_keys_rhodecode
663
665
664 ; Command to execute the SSH wrapper. The binary is available in the
666 ; Command to execute the SSH wrapper. The binary is available in the
665 ; RhodeCode installation directory.
667 ; RhodeCode installation directory.
666 ; e.g ~/.rccontrol/community-1/profile/bin/rc-ssh-wrapper
668 ; e.g ~/.rccontrol/community-1/profile/bin/rc-ssh-wrapper
667 ssh.wrapper_cmd = ~/.rccontrol/community-1/rc-ssh-wrapper
669 ssh.wrapper_cmd = ~/.rccontrol/community-1/rc-ssh-wrapper
668
670
669 ; Allow shell when executing the ssh-wrapper command
671 ; Allow shell when executing the ssh-wrapper command
670 ssh.wrapper_cmd_allow_shell = false
672 ssh.wrapper_cmd_allow_shell = false
671
673
672 ; Enables logging, and detailed output send back to the client during SSH
674 ; Enables logging, and detailed output send back to the client during SSH
673 ; operations. Useful for debugging, shouldn't be used in production.
675 ; operations. Useful for debugging, shouldn't be used in production.
674 ssh.enable_debug_logging = true
676 ssh.enable_debug_logging = true
675
677
676 ; Paths to binary executable, by default they are the names, but we can
678 ; Paths to binary executable, by default they are the names, but we can
677 ; override them if we want to use a custom one
679 ; override them if we want to use a custom one
678 ssh.executable.hg = ~/.rccontrol/vcsserver-1/profile/bin/hg
680 ssh.executable.hg = ~/.rccontrol/vcsserver-1/profile/bin/hg
679 ssh.executable.git = ~/.rccontrol/vcsserver-1/profile/bin/git
681 ssh.executable.git = ~/.rccontrol/vcsserver-1/profile/bin/git
680 ssh.executable.svn = ~/.rccontrol/vcsserver-1/profile/bin/svnserve
682 ssh.executable.svn = ~/.rccontrol/vcsserver-1/profile/bin/svnserve
681
683
682 ; Enables SSH key generator web interface. Disabling this still allows users
684 ; Enables SSH key generator web interface. Disabling this still allows users
683 ; to add their own keys.
685 ; to add their own keys.
684 ssh.enable_ui_key_generator = true
686 ssh.enable_ui_key_generator = true
685
687
686
688
687 ; #################
689 ; #################
688 ; APPENLIGHT CONFIG
690 ; APPENLIGHT CONFIG
689 ; #################
691 ; #################
690
692
691 ; Appenlight is tailored to work with RhodeCode, see
693 ; Appenlight is tailored to work with RhodeCode, see
692 ; http://appenlight.rhodecode.com for details how to obtain an account
694 ; http://appenlight.rhodecode.com for details how to obtain an account
693
695
694 ; Appenlight integration enabled
696 ; Appenlight integration enabled
695 #appenlight = false
697 #appenlight = false
696
698
697 #appenlight.server_url = https://api.appenlight.com
699 #appenlight.server_url = https://api.appenlight.com
698 #appenlight.api_key = YOUR_API_KEY
700 #appenlight.api_key = YOUR_API_KEY
699 #appenlight.transport_config = https://api.appenlight.com?threaded=1&timeout=5
701 #appenlight.transport_config = https://api.appenlight.com?threaded=1&timeout=5
700
702
701 ; used for JS client
703 ; used for JS client
702 #appenlight.api_public_key = YOUR_API_PUBLIC_KEY
704 #appenlight.api_public_key = YOUR_API_PUBLIC_KEY
703
705
704 ; TWEAK AMOUNT OF INFO SENT HERE
706 ; TWEAK AMOUNT OF INFO SENT HERE
705
707
706 ; enables 404 error logging (default False)
708 ; enables 404 error logging (default False)
707 #appenlight.report_404 = false
709 #appenlight.report_404 = false
708
710
709 ; time in seconds after request is considered being slow (default 1)
711 ; time in seconds after request is considered being slow (default 1)
710 #appenlight.slow_request_time = 1
712 #appenlight.slow_request_time = 1
711
713
712 ; record slow requests in application
714 ; record slow requests in application
713 ; (needs to be enabled for slow datastore recording and time tracking)
715 ; (needs to be enabled for slow datastore recording and time tracking)
714 #appenlight.slow_requests = true
716 #appenlight.slow_requests = true
715
717
716 ; enable hooking to application loggers
718 ; enable hooking to application loggers
717 #appenlight.logging = true
719 #appenlight.logging = true
718
720
719 ; minimum log level for log capture
721 ; minimum log level for log capture
720 #ppenlight.logging.level = WARNING
722 #ppenlight.logging.level = WARNING
721
723
722 ; send logs only from erroneous/slow requests
724 ; send logs only from erroneous/slow requests
723 ; (saves API quota for intensive logging)
725 ; (saves API quota for intensive logging)
724 #appenlight.logging_on_error = false
726 #appenlight.logging_on_error = false
725
727
726 ; list of additional keywords that should be grabbed from environ object
728 ; list of additional keywords that should be grabbed from environ object
727 ; can be string with comma separated list of words in lowercase
729 ; can be string with comma separated list of words in lowercase
728 ; (by default client will always send following info:
730 ; (by default client will always send following info:
729 ; 'REMOTE_USER', 'REMOTE_ADDR', 'SERVER_NAME', 'CONTENT_TYPE' + all keys that
731 ; 'REMOTE_USER', 'REMOTE_ADDR', 'SERVER_NAME', 'CONTENT_TYPE' + all keys that
730 ; start with HTTP* this list be extended with additional keywords here
732 ; start with HTTP* this list be extended with additional keywords here
731 #appenlight.environ_keys_whitelist =
733 #appenlight.environ_keys_whitelist =
732
734
733 ; list of keywords that should be blanked from request object
735 ; list of keywords that should be blanked from request object
734 ; can be string with comma separated list of words in lowercase
736 ; can be string with comma separated list of words in lowercase
735 ; (by default client will always blank keys that contain following words
737 ; (by default client will always blank keys that contain following words
736 ; 'password', 'passwd', 'pwd', 'auth_tkt', 'secret', 'csrf'
738 ; 'password', 'passwd', 'pwd', 'auth_tkt', 'secret', 'csrf'
737 ; this list be extended with additional keywords set here
739 ; this list be extended with additional keywords set here
738 #appenlight.request_keys_blacklist =
740 #appenlight.request_keys_blacklist =
739
741
740 ; list of namespaces that should be ignores when gathering log entries
742 ; list of namespaces that should be ignores when gathering log entries
741 ; can be string with comma separated list of namespaces
743 ; can be string with comma separated list of namespaces
742 ; (by default the client ignores own entries: appenlight_client.client)
744 ; (by default the client ignores own entries: appenlight_client.client)
743 #appenlight.log_namespace_blacklist =
745 #appenlight.log_namespace_blacklist =
744
746
745 ; Statsd client config, this is used to send metrics to statsd
747 ; Statsd client config, this is used to send metrics to statsd
746 ; We recommend setting statsd_exported and scrape them using Prometheus
748 ; We recommend setting statsd_exported and scrape them using Prometheus
747 #statsd.enabled = false
749 #statsd.enabled = false
748 #statsd.statsd_host = 0.0.0.0
750 #statsd.statsd_host = 0.0.0.0
749 #statsd.statsd_port = 8125
751 #statsd.statsd_port = 8125
750 #statsd.statsd_prefix =
752 #statsd.statsd_prefix =
751 #statsd.statsd_ipv6 = false
753 #statsd.statsd_ipv6 = false
752
754
753 ; configure logging automatically at server startup set to false
755 ; configure logging automatically at server startup set to false
754 ; to use the below custom logging config.
756 ; to use the below custom logging config.
755 ; RC_LOGGING_FORMATTER
757 ; RC_LOGGING_FORMATTER
756 ; RC_LOGGING_LEVEL
758 ; RC_LOGGING_LEVEL
757 ; env variables can control the settings for logging in case of autoconfigure
759 ; env variables can control the settings for logging in case of autoconfigure
758
760
759 #logging.autoconfigure = true
761 #logging.autoconfigure = true
760
762
761 ; specify your own custom logging config file to configure logging
763 ; specify your own custom logging config file to configure logging
762 #logging.logging_conf_file = /path/to/custom_logging.ini
764 #logging.logging_conf_file = /path/to/custom_logging.ini
763
765
764 ; Dummy marker to add new entries after.
766 ; Dummy marker to add new entries after.
765 ; Add any custom entries below. Please don't remove this marker.
767 ; Add any custom entries below. Please don't remove this marker.
766 custom.conf = 1
768 custom.conf = 1
767
769
768
770
769 ; #####################
771 ; #####################
770 ; LOGGING CONFIGURATION
772 ; LOGGING CONFIGURATION
771 ; #####################
773 ; #####################
772
774
773 [loggers]
775 [loggers]
774 keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper
776 keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper
775
777
776 [handlers]
778 [handlers]
777 keys = console, console_sql
779 keys = console, console_sql
778
780
779 [formatters]
781 [formatters]
780 keys = generic, json, color_formatter, color_formatter_sql
782 keys = generic, json, color_formatter, color_formatter_sql
781
783
782 ; #######
784 ; #######
783 ; LOGGERS
785 ; LOGGERS
784 ; #######
786 ; #######
785 [logger_root]
787 [logger_root]
786 level = NOTSET
788 level = NOTSET
787 handlers = console
789 handlers = console
788
790
789 [logger_sqlalchemy]
791 [logger_sqlalchemy]
790 level = INFO
792 level = INFO
791 handlers = console_sql
793 handlers = console_sql
792 qualname = sqlalchemy.engine
794 qualname = sqlalchemy.engine
793 propagate = 0
795 propagate = 0
794
796
795 [logger_beaker]
797 [logger_beaker]
796 level = DEBUG
798 level = DEBUG
797 handlers =
799 handlers =
798 qualname = beaker.container
800 qualname = beaker.container
799 propagate = 1
801 propagate = 1
800
802
801 [logger_rhodecode]
803 [logger_rhodecode]
802 level = DEBUG
804 level = DEBUG
803 handlers =
805 handlers =
804 qualname = rhodecode
806 qualname = rhodecode
805 propagate = 1
807 propagate = 1
806
808
807 [logger_ssh_wrapper]
809 [logger_ssh_wrapper]
808 level = DEBUG
810 level = DEBUG
809 handlers =
811 handlers =
810 qualname = ssh_wrapper
812 qualname = ssh_wrapper
811 propagate = 1
813 propagate = 1
812
814
813 [logger_celery]
815 [logger_celery]
814 level = DEBUG
816 level = DEBUG
815 handlers =
817 handlers =
816 qualname = celery
818 qualname = celery
817
819
818
820
819 ; ########
821 ; ########
820 ; HANDLERS
822 ; HANDLERS
821 ; ########
823 ; ########
822
824
823 [handler_console]
825 [handler_console]
824 class = StreamHandler
826 class = StreamHandler
825 args = (sys.stderr, )
827 args = (sys.stderr, )
826 level = DEBUG
828 level = DEBUG
827 ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json'
829 ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json'
828 ; This allows sending properly formatted logs to grafana loki or elasticsearch
830 ; This allows sending properly formatted logs to grafana loki or elasticsearch
829 formatter = color_formatter
831 formatter = color_formatter
830
832
831 [handler_console_sql]
833 [handler_console_sql]
832 ; "level = DEBUG" logs SQL queries and results.
834 ; "level = DEBUG" logs SQL queries and results.
833 ; "level = INFO" logs SQL queries.
835 ; "level = INFO" logs SQL queries.
834 ; "level = WARN" logs neither. (Recommended for production systems.)
836 ; "level = WARN" logs neither. (Recommended for production systems.)
835 class = StreamHandler
837 class = StreamHandler
836 args = (sys.stderr, )
838 args = (sys.stderr, )
837 level = WARN
839 level = WARN
838 ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json'
840 ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json'
839 ; This allows sending properly formatted logs to grafana loki or elasticsearch
841 ; This allows sending properly formatted logs to grafana loki or elasticsearch
840 formatter = color_formatter_sql
842 formatter = color_formatter_sql
841
843
842 ; ##########
844 ; ##########
843 ; FORMATTERS
845 ; FORMATTERS
844 ; ##########
846 ; ##########
845
847
846 [formatter_generic]
848 [formatter_generic]
847 class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter
849 class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter
848 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
850 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
849 datefmt = %Y-%m-%d %H:%M:%S
851 datefmt = %Y-%m-%d %H:%M:%S
850
852
851 [formatter_color_formatter]
853 [formatter_color_formatter]
852 class = rhodecode.lib.logging_formatter.ColorFormatter
854 class = rhodecode.lib.logging_formatter.ColorFormatter
853 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
855 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
854 datefmt = %Y-%m-%d %H:%M:%S
856 datefmt = %Y-%m-%d %H:%M:%S
855
857
856 [formatter_color_formatter_sql]
858 [formatter_color_formatter_sql]
857 class = rhodecode.lib.logging_formatter.ColorFormatterSql
859 class = rhodecode.lib.logging_formatter.ColorFormatterSql
858 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
860 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
859 datefmt = %Y-%m-%d %H:%M:%S
861 datefmt = %Y-%m-%d %H:%M:%S
860
862
861 [formatter_json]
863 [formatter_json]
862 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
864 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
863 class = rhodecode.lib._vendor.jsonlogger.JsonFormatter
865 class = rhodecode.lib._vendor.jsonlogger.JsonFormatter
@@ -1,518 +1,520 b''
1 """
1 """
2 Gunicorn config extension and hooks. This config file adds some extra settings and memory management.
2 Gunicorn config extension and hooks. This config file adds some extra settings and memory management.
3 Gunicorn configuration should be managed by .ini files entries of RhodeCode or VCSServer
3 Gunicorn configuration should be managed by .ini files entries of RhodeCode or VCSServer
4 """
4 """
5
5
6 import gc
6 import gc
7 import os
7 import os
8 import sys
8 import sys
9 import math
9 import math
10 import time
10 import time
11 import threading
11 import threading
12 import traceback
12 import traceback
13 import random
13 import random
14 import socket
14 import socket
15 import dataclasses
15 import dataclasses
16 from gunicorn.glogging import Logger
16 from gunicorn.glogging import Logger
17
17
18
18
19 def get_workers():
19 def get_workers():
20 import multiprocessing
20 import multiprocessing
21 return multiprocessing.cpu_count() * 2 + 1
21 return multiprocessing.cpu_count() * 2 + 1
22
22
23
23
24 bind = "127.0.0.1:10020"
24 bind = "127.0.0.1:10020"
25
25
26
26
27 # Error logging output for gunicorn (-) is stdout
27 # Error logging output for gunicorn (-) is stdout
28 errorlog = '-'
28 errorlog = '-'
29
29
30 # Access logging output for gunicorn (-) is stdout
30 # Access logging output for gunicorn (-) is stdout
31 accesslog = '-'
31 accesslog = '-'
32
32
33
33
34 # SERVER MECHANICS
34 # SERVER MECHANICS
35 # None == system temp dir
35 # None == system temp dir
36 # worker_tmp_dir is recommended to be set to some tmpfs
36 # worker_tmp_dir is recommended to be set to some tmpfs
37 worker_tmp_dir = None
37 worker_tmp_dir = None
38 tmp_upload_dir = None
38 tmp_upload_dir = None
39
39
40 # use re-use port logic
40 # use re-use port logic
41 #reuse_port = True
41 #reuse_port = True
42
42
43 # Custom log format
43 # Custom log format
44 #access_log_format = (
44 #access_log_format = (
45 # '%(t)s %(p)s INFO [GNCRN] %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"')
45 # '%(t)s %(p)s INFO [GNCRN] %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"')
46
46
47 # loki format for easier parsing in grafana
47 # loki format for easier parsing in grafana
48 access_log_format = (
48 access_log_format = (
49 'time="%(t)s" pid=%(p)s level="INFO" type="[GNCRN]" ip="%(h)-15s" rqt="%(L)s" response_code="%(s)s" response_bytes="%(b)-6s" uri="%(m)s:%(U)s %(q)s" user=":%(u)s" user_agent="%(a)s"')
49 'time="%(t)s" pid=%(p)s level="INFO" type="[GNCRN]" ip="%(h)-15s" rqt="%(L)s" response_code="%(s)s" response_bytes="%(b)-6s" uri="%(m)s:%(U)s %(q)s" user=":%(u)s" user_agent="%(a)s"')
50
50
51
52 # Sets the number of process workers. More workers means more concurrent connections
53 # RhodeCode can handle at the same time. Each additional worker also it increases
54 # memory usage as each has its own set of caches.
55 # The Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
56 # than 8-10 unless for huge deployments .e.g 700-1000 users.
57 # `instance_id = *` must be set in the [app:main] section below (which is the default)
58 # when using more than 1 worker.
59 workers = 4
60
61 # self adjust workers based on CPU count, to use maximum of CPU and not overquota the resources
51 # self adjust workers based on CPU count, to use maximum of CPU and not overquota the resources
62 # workers = get_workers()
52 # workers = get_workers()
63
53
64 # Gunicorn access log level
54 # Gunicorn access log level
65 loglevel = 'info'
55 loglevel = 'info'
66
56
67 # Process name visible in a process list
57 # Process name visible in a process list
68 proc_name = 'rhodecode_enterprise'
58 proc_name = 'rhodecode_enterprise'
69
59
70 # Type of worker class, one of `sync`, `gevent` or `gthread`
60 # Type of worker class, one of `sync`, `gevent` or `gthread`
71 # currently `sync` is the only option allowed for vcsserver and for rhodecode all of 3 are allowed
61 # currently `sync` is the only option allowed for vcsserver and for rhodecode all of 3 are allowed
72 # gevent:
62 # gevent:
73 # In this case, the maximum number of concurrent requests is (N workers * X worker_connections)
63 # In this case, the maximum number of concurrent requests is (N workers * X worker_connections)
74 # e.g. workers =3 worker_connections=10 = 3*10, 30 concurrent requests can be handled
64 # e.g. workers =3 worker_connections=10 = 3*10, 30 concurrent requests can be handled
75 # gtrhead:
65 # gthread:
76 # In this case, the maximum number of concurrent requests is (N workers * X threads)
66 # In this case, the maximum number of concurrent requests is (N workers * X threads)
77 # e.g. workers = 3 threads=3 = 3*3, 9 concurrent requests can be handled
67 # e.g. workers = 3 threads=3 = 3*3, 9 concurrent requests can be handled
78 worker_class = 'gevent'
68 worker_class = 'gthread'
69
70 # Sets the number of process workers. More workers means more concurrent connections
71 # RhodeCode can handle at the same time. Each additional worker also it increases
72 # memory usage as each has its own set of caches.
73 # The Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
74 # than 8-10 unless for huge deployments .e.g 700-1000 users.
75 # `instance_id = *` must be set in the [app:main] section below (which is the default)
76 # when using more than 1 worker.
77 workers = 2
78
79 # Threads numbers for worker class gthread
80 threads = 1
79
81
80 # The maximum number of simultaneous clients. Valid only for gevent
82 # The maximum number of simultaneous clients. Valid only for gevent
81 # In this case, the maximum number of concurrent requests is (N workers * X worker_connections)
83 # In this case, the maximum number of concurrent requests is (N workers * X worker_connections)
82 # e.g workers =3 worker_connections=10 = 3*10, 30 concurrent requests can be handled
84 # e.g workers =3 worker_connections=10 = 3*10, 30 concurrent requests can be handled
83 worker_connections = 10
85 worker_connections = 10
84
86
85 # Max number of requests that worker will handle before being gracefully restarted.
87 # Max number of requests that worker will handle before being gracefully restarted.
86 # Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
88 # Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
87 max_requests = 2000
89 max_requests = 2000
88 max_requests_jitter = int(max_requests * 0.2) # 20% of max_requests
90 max_requests_jitter = int(max_requests * 0.2) # 20% of max_requests
89
91
90 # The maximum number of pending connections.
92 # The maximum number of pending connections.
91 # Exceeding this number results in the client getting an error when attempting to connect.
93 # Exceeding this number results in the client getting an error when attempting to connect.
92 backlog = 64
94 backlog = 64
93
95
94 # The Amount of time a worker can spend with handling a request before it
96 # The Amount of time a worker can spend with handling a request before it
95 # gets killed and restarted. By default, set to 21600 (6hrs)
97 # gets killed and restarted. By default, set to 21600 (6hrs)
96 # Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
98 # Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
97 timeout = 21600
99 timeout = 21600
98
100
99 # The maximum size of HTTP request line in bytes.
101 # The maximum size of HTTP request line in bytes.
100 # 0 for unlimited
102 # 0 for unlimited
101 limit_request_line = 0
103 limit_request_line = 0
102
104
103 # Limit the number of HTTP headers fields in a request.
105 # Limit the number of HTTP headers fields in a request.
104 # By default this value is 100 and can't be larger than 32768.
106 # By default this value is 100 and can't be larger than 32768.
105 limit_request_fields = 32768
107 limit_request_fields = 32768
106
108
107 # Limit the allowed size of an HTTP request header field.
109 # Limit the allowed size of an HTTP request header field.
108 # Value is a positive number or 0.
110 # Value is a positive number or 0.
109 # Setting it to 0 will allow unlimited header field sizes.
111 # Setting it to 0 will allow unlimited header field sizes.
110 limit_request_field_size = 0
112 limit_request_field_size = 0
111
113
112 # Timeout for graceful workers restart.
114 # Timeout for graceful workers restart.
113 # After receiving a restart signal, workers have this much time to finish
115 # After receiving a restart signal, workers have this much time to finish
114 # serving requests. Workers still alive after the timeout (starting from the
116 # serving requests. Workers still alive after the timeout (starting from the
115 # receipt of the restart signal) are force killed.
117 # receipt of the restart signal) are force killed.
116 # Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
118 # Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
117 graceful_timeout = 21600
119 graceful_timeout = 21600
118
120
119 # The number of seconds to wait for requests on a Keep-Alive connection.
121 # The number of seconds to wait for requests on a Keep-Alive connection.
120 # Generally set in the 1-5 seconds range.
122 # Generally set in the 1-5 seconds range.
121 keepalive = 2
123 keepalive = 2
122
124
123 # Maximum memory usage that each worker can use before it will receive a
125 # Maximum memory usage that each worker can use before it will receive a
124 # graceful restart signal 0 = memory monitoring is disabled
126 # graceful restart signal 0 = memory monitoring is disabled
125 # Examples: 268435456 (256MB), 536870912 (512MB)
127 # Examples: 268435456 (256MB), 536870912 (512MB)
126 # 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
128 # 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
127 # Dynamic formula 1024 * 1024 * 256 == 256MBs
129 # Dynamic formula 1024 * 1024 * 256 == 256MBs
128 memory_max_usage = 0
130 memory_max_usage = 0
129
131
130 # How often in seconds to check for memory usage for each gunicorn worker
132 # How often in seconds to check for memory usage for each gunicorn worker
131 memory_usage_check_interval = 60
133 memory_usage_check_interval = 60
132
134
133 # Threshold value for which we don't recycle worker if GarbageCollection
135 # Threshold value for which we don't recycle worker if GarbageCollection
134 # frees up enough resources. Before each restart, we try to run GC on worker
136 # frees up enough resources. Before each restart, we try to run GC on worker
135 # in case we get enough free memory after that; restart will not happen.
137 # in case we get enough free memory after that; restart will not happen.
136 memory_usage_recovery_threshold = 0.8
138 memory_usage_recovery_threshold = 0.8
137
139
138
140
139 @dataclasses.dataclass
141 @dataclasses.dataclass
140 class MemoryCheckConfig:
142 class MemoryCheckConfig:
141 max_usage: int
143 max_usage: int
142 check_interval: int
144 check_interval: int
143 recovery_threshold: float
145 recovery_threshold: float
144
146
145
147
146 def _get_process_rss(pid=None):
148 def _get_process_rss(pid=None):
147 try:
149 try:
148 import psutil
150 import psutil
149 if pid:
151 if pid:
150 proc = psutil.Process(pid)
152 proc = psutil.Process(pid)
151 else:
153 else:
152 proc = psutil.Process()
154 proc = psutil.Process()
153 return proc.memory_info().rss
155 return proc.memory_info().rss
154 except Exception:
156 except Exception:
155 return None
157 return None
156
158
157
159
158 def _get_config(ini_path):
160 def _get_config(ini_path):
159 import configparser
161 import configparser
160
162
161 try:
163 try:
162 config = configparser.RawConfigParser()
164 config = configparser.RawConfigParser()
163 config.read(ini_path)
165 config.read(ini_path)
164 return config
166 return config
165 except Exception:
167 except Exception:
166 return None
168 return None
167
169
168
170
169 def get_memory_usage_params(config=None):
171 def get_memory_usage_params(config=None):
170 # memory spec defaults
172 # memory spec defaults
171 _memory_max_usage = memory_max_usage
173 _memory_max_usage = memory_max_usage
172 _memory_usage_check_interval = memory_usage_check_interval
174 _memory_usage_check_interval = memory_usage_check_interval
173 _memory_usage_recovery_threshold = memory_usage_recovery_threshold
175 _memory_usage_recovery_threshold = memory_usage_recovery_threshold
174
176
175 if config:
177 if config:
176 ini_path = os.path.abspath(config)
178 ini_path = os.path.abspath(config)
177 conf = _get_config(ini_path)
179 conf = _get_config(ini_path)
178
180
179 section = 'server:main'
181 section = 'server:main'
180 if conf and conf.has_section(section):
182 if conf and conf.has_section(section):
181
183
182 if conf.has_option(section, 'memory_max_usage'):
184 if conf.has_option(section, 'memory_max_usage'):
183 _memory_max_usage = conf.getint(section, 'memory_max_usage')
185 _memory_max_usage = conf.getint(section, 'memory_max_usage')
184
186
185 if conf.has_option(section, 'memory_usage_check_interval'):
187 if conf.has_option(section, 'memory_usage_check_interval'):
186 _memory_usage_check_interval = conf.getint(section, 'memory_usage_check_interval')
188 _memory_usage_check_interval = conf.getint(section, 'memory_usage_check_interval')
187
189
188 if conf.has_option(section, 'memory_usage_recovery_threshold'):
190 if conf.has_option(section, 'memory_usage_recovery_threshold'):
189 _memory_usage_recovery_threshold = conf.getfloat(section, 'memory_usage_recovery_threshold')
191 _memory_usage_recovery_threshold = conf.getfloat(section, 'memory_usage_recovery_threshold')
190
192
191 _memory_max_usage = int(os.environ.get('RC_GUNICORN_MEMORY_MAX_USAGE', '')
193 _memory_max_usage = int(os.environ.get('RC_GUNICORN_MEMORY_MAX_USAGE', '')
192 or _memory_max_usage)
194 or _memory_max_usage)
193 _memory_usage_check_interval = int(os.environ.get('RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL', '')
195 _memory_usage_check_interval = int(os.environ.get('RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL', '')
194 or _memory_usage_check_interval)
196 or _memory_usage_check_interval)
195 _memory_usage_recovery_threshold = float(os.environ.get('RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD', '')
197 _memory_usage_recovery_threshold = float(os.environ.get('RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD', '')
196 or _memory_usage_recovery_threshold)
198 or _memory_usage_recovery_threshold)
197
199
198 return MemoryCheckConfig(_memory_max_usage, _memory_usage_check_interval, _memory_usage_recovery_threshold)
200 return MemoryCheckConfig(_memory_max_usage, _memory_usage_check_interval, _memory_usage_recovery_threshold)
199
201
200
202
201 def _time_with_offset(check_interval):
203 def _time_with_offset(check_interval):
202 return time.time() - random.randint(0, check_interval/2.0)
204 return time.time() - random.randint(0, check_interval/2.0)
203
205
204
206
205 def pre_fork(server, worker):
207 def pre_fork(server, worker):
206 pass
208 pass
207
209
208
210
209 def post_fork(server, worker):
211 def post_fork(server, worker):
210
212
211 memory_conf = get_memory_usage_params()
213 memory_conf = get_memory_usage_params()
212 _memory_max_usage = memory_conf.max_usage
214 _memory_max_usage = memory_conf.max_usage
213 _memory_usage_check_interval = memory_conf.check_interval
215 _memory_usage_check_interval = memory_conf.check_interval
214 _memory_usage_recovery_threshold = memory_conf.recovery_threshold
216 _memory_usage_recovery_threshold = memory_conf.recovery_threshold
215
217
216 worker._memory_max_usage = int(os.environ.get('RC_GUNICORN_MEMORY_MAX_USAGE', '')
218 worker._memory_max_usage = int(os.environ.get('RC_GUNICORN_MEMORY_MAX_USAGE', '')
217 or _memory_max_usage)
219 or _memory_max_usage)
218 worker._memory_usage_check_interval = int(os.environ.get('RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL', '')
220 worker._memory_usage_check_interval = int(os.environ.get('RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL', '')
219 or _memory_usage_check_interval)
221 or _memory_usage_check_interval)
220 worker._memory_usage_recovery_threshold = float(os.environ.get('RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD', '')
222 worker._memory_usage_recovery_threshold = float(os.environ.get('RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD', '')
221 or _memory_usage_recovery_threshold)
223 or _memory_usage_recovery_threshold)
222
224
223 # register memory last check time, with some random offset so we don't recycle all
225 # register memory last check time, with some random offset so we don't recycle all
224 # at once
226 # at once
225 worker._last_memory_check_time = _time_with_offset(_memory_usage_check_interval)
227 worker._last_memory_check_time = _time_with_offset(_memory_usage_check_interval)
226
228
227 if _memory_max_usage:
229 if _memory_max_usage:
228 server.log.info("pid=[%-10s] WORKER spawned with max memory set at %s", worker.pid,
230 server.log.info("pid=[%-10s] WORKER spawned with max memory set at %s", worker.pid,
229 _format_data_size(_memory_max_usage))
231 _format_data_size(_memory_max_usage))
230 else:
232 else:
231 server.log.info("pid=[%-10s] WORKER spawned", worker.pid)
233 server.log.info("pid=[%-10s] WORKER spawned", worker.pid)
232
234
233
235
234 def pre_exec(server):
236 def pre_exec(server):
235 server.log.info("Forked child, re-executing.")
237 server.log.info("Forked child, re-executing.")
236
238
237
239
238 def on_starting(server):
240 def on_starting(server):
239 server_lbl = '{} {}'.format(server.proc_name, server.address)
241 server_lbl = '{} {}'.format(server.proc_name, server.address)
240 server.log.info("Server %s is starting.", server_lbl)
242 server.log.info("Server %s is starting.", server_lbl)
241 server.log.info('Config:')
243 server.log.info('Config:')
242 server.log.info(f"\n{server.cfg}")
244 server.log.info(f"\n{server.cfg}")
243 server.log.info(get_memory_usage_params())
245 server.log.info(get_memory_usage_params())
244
246
245
247
246 def when_ready(server):
248 def when_ready(server):
247 server.log.info("Server %s is ready. Spawning workers", server)
249 server.log.info("Server %s is ready. Spawning workers", server)
248
250
249
251
250 def on_reload(server):
252 def on_reload(server):
251 pass
253 pass
252
254
253
255
254 def _format_data_size(size, unit="B", precision=1, binary=True):
256 def _format_data_size(size, unit="B", precision=1, binary=True):
255 """Format a number using SI units (kilo, mega, etc.).
257 """Format a number using SI units (kilo, mega, etc.).
256
258
257 ``size``: The number as a float or int.
259 ``size``: The number as a float or int.
258
260
259 ``unit``: The unit name in plural form. Examples: "bytes", "B".
261 ``unit``: The unit name in plural form. Examples: "bytes", "B".
260
262
261 ``precision``: How many digits to the right of the decimal point. Default
263 ``precision``: How many digits to the right of the decimal point. Default
262 is 1. 0 suppresses the decimal point.
264 is 1. 0 suppresses the decimal point.
263
265
264 ``binary``: If false, use base-10 decimal prefixes (kilo = K = 1000).
266 ``binary``: If false, use base-10 decimal prefixes (kilo = K = 1000).
265 If true, use base-2 binary prefixes (kibi = Ki = 1024).
267 If true, use base-2 binary prefixes (kibi = Ki = 1024).
266
268
267 ``full_name``: If false (default), use the prefix abbreviation ("k" or
269 ``full_name``: If false (default), use the prefix abbreviation ("k" or
268 "Ki"). If true, use the full prefix ("kilo" or "kibi"). If false,
270 "Ki"). If true, use the full prefix ("kilo" or "kibi"). If false,
269 use abbreviation ("k" or "Ki").
271 use abbreviation ("k" or "Ki").
270
272
271 """
273 """
272
274
273 if not binary:
275 if not binary:
274 base = 1000
276 base = 1000
275 multiples = ('', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
277 multiples = ('', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
276 else:
278 else:
277 base = 1024
279 base = 1024
278 multiples = ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi')
280 multiples = ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi')
279
281
280 sign = ""
282 sign = ""
281 if size > 0:
283 if size > 0:
282 m = int(math.log(size, base))
284 m = int(math.log(size, base))
283 elif size < 0:
285 elif size < 0:
284 sign = "-"
286 sign = "-"
285 size = -size
287 size = -size
286 m = int(math.log(size, base))
288 m = int(math.log(size, base))
287 else:
289 else:
288 m = 0
290 m = 0
289 if m > 8:
291 if m > 8:
290 m = 8
292 m = 8
291
293
292 if m == 0:
294 if m == 0:
293 precision = '%.0f'
295 precision = '%.0f'
294 else:
296 else:
295 precision = '%%.%df' % precision
297 precision = '%%.%df' % precision
296
298
297 size = precision % (size / math.pow(base, m))
299 size = precision % (size / math.pow(base, m))
298
300
299 return '%s%s %s%s' % (sign, size.strip(), multiples[m], unit)
301 return '%s%s %s%s' % (sign, size.strip(), multiples[m], unit)
300
302
301
303
302 def _check_memory_usage(worker):
304 def _check_memory_usage(worker):
303 _memory_max_usage = worker._memory_max_usage
305 _memory_max_usage = worker._memory_max_usage
304 if not _memory_max_usage:
306 if not _memory_max_usage:
305 return
307 return
306
308
307 _memory_usage_check_interval = worker._memory_usage_check_interval
309 _memory_usage_check_interval = worker._memory_usage_check_interval
308 _memory_usage_recovery_threshold = memory_max_usage * worker._memory_usage_recovery_threshold
310 _memory_usage_recovery_threshold = memory_max_usage * worker._memory_usage_recovery_threshold
309
311
310 elapsed = time.time() - worker._last_memory_check_time
312 elapsed = time.time() - worker._last_memory_check_time
311 if elapsed > _memory_usage_check_interval:
313 if elapsed > _memory_usage_check_interval:
312 mem_usage = _get_process_rss()
314 mem_usage = _get_process_rss()
313 if mem_usage and mem_usage > _memory_max_usage:
315 if mem_usage and mem_usage > _memory_max_usage:
314 worker.log.info(
316 worker.log.info(
315 "memory usage %s > %s, forcing gc",
317 "memory usage %s > %s, forcing gc",
316 _format_data_size(mem_usage), _format_data_size(_memory_max_usage))
318 _format_data_size(mem_usage), _format_data_size(_memory_max_usage))
317 # Try to clean it up by forcing a full collection.
319 # Try to clean it up by forcing a full collection.
318 gc.collect()
320 gc.collect()
319 mem_usage = _get_process_rss()
321 mem_usage = _get_process_rss()
320 if mem_usage > _memory_usage_recovery_threshold:
322 if mem_usage > _memory_usage_recovery_threshold:
321 # Didn't clean up enough, we'll have to terminate.
323 # Didn't clean up enough, we'll have to terminate.
322 worker.log.warning(
324 worker.log.warning(
323 "memory usage %s > %s after gc, quitting",
325 "memory usage %s > %s after gc, quitting",
324 _format_data_size(mem_usage), _format_data_size(_memory_max_usage))
326 _format_data_size(mem_usage), _format_data_size(_memory_max_usage))
325 # This will cause worker to auto-restart itself
327 # This will cause worker to auto-restart itself
326 worker.alive = False
328 worker.alive = False
327 worker._last_memory_check_time = time.time()
329 worker._last_memory_check_time = time.time()
328
330
329
331
330 def worker_int(worker):
332 def worker_int(worker):
331 worker.log.info("pid=[%-10s] worker received INT or QUIT signal", worker.pid)
333 worker.log.info("pid=[%-10s] worker received INT or QUIT signal", worker.pid)
332
334
333 # get traceback info, when a worker crashes
335 # get traceback info, when a worker crashes
334 def get_thread_id(t_id):
336 def get_thread_id(t_id):
335 id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
337 id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
336 return id2name.get(t_id, "unknown_thread_id")
338 return id2name.get(t_id, "unknown_thread_id")
337
339
338 code = []
340 code = []
339 for thread_id, stack in sys._current_frames().items(): # noqa
341 for thread_id, stack in sys._current_frames().items(): # noqa
340 code.append(
342 code.append(
341 "\n# Thread: %s(%d)" % (get_thread_id(thread_id), thread_id))
343 "\n# Thread: %s(%d)" % (get_thread_id(thread_id), thread_id))
342 for fname, lineno, name, line in traceback.extract_stack(stack):
344 for fname, lineno, name, line in traceback.extract_stack(stack):
343 code.append('File: "%s", line %d, in %s' % (fname, lineno, name))
345 code.append('File: "%s", line %d, in %s' % (fname, lineno, name))
344 if line:
346 if line:
345 code.append(" %s" % (line.strip()))
347 code.append(" %s" % (line.strip()))
346 worker.log.debug("\n".join(code))
348 worker.log.debug("\n".join(code))
347
349
348
350
349 def worker_abort(worker):
351 def worker_abort(worker):
350 worker.log.info("pid=[%-10s] worker received SIGABRT signal", worker.pid)
352 worker.log.info("pid=[%-10s] worker received SIGABRT signal", worker.pid)
351
353
352
354
353 def worker_exit(server, worker):
355 def worker_exit(server, worker):
354 worker.log.info("pid=[%-10s] worker exit", worker.pid)
356 worker.log.info("pid=[%-10s] worker exit", worker.pid)
355
357
356
358
357 def child_exit(server, worker):
359 def child_exit(server, worker):
358 worker.log.info("pid=[%-10s] worker child exit", worker.pid)
360 worker.log.info("pid=[%-10s] worker child exit", worker.pid)
359
361
360
362
361 def pre_request(worker, req):
363 def pre_request(worker, req):
362 worker.start_time = time.time()
364 worker.start_time = time.time()
363 worker.log.debug(
365 worker.log.debug(
364 "GNCRN PRE WORKER [cnt:%s]: %s %s", worker.nr, req.method, req.path)
366 "GNCRN PRE WORKER [cnt:%s]: %s %s", worker.nr, req.method, req.path)
365
367
366
368
367 def post_request(worker, req, environ, resp):
369 def post_request(worker, req, environ, resp):
368 total_time = time.time() - worker.start_time
370 total_time = time.time() - worker.start_time
369 # Gunicorn sometimes has problems with reading the status_code
371 # Gunicorn sometimes has problems with reading the status_code
370 status_code = getattr(resp, 'status_code', '')
372 status_code = getattr(resp, 'status_code', '')
371 worker.log.debug(
373 worker.log.debug(
372 "GNCRN POST WORKER [cnt:%s]: %s %s resp: %s, Load Time: %.4fs",
374 "GNCRN POST WORKER [cnt:%s]: %s %s resp: %s, Load Time: %.4fs",
373 worker.nr, req.method, req.path, status_code, total_time)
375 worker.nr, req.method, req.path, status_code, total_time)
374 _check_memory_usage(worker)
376 _check_memory_usage(worker)
375
377
376
378
377 def _filter_proxy(ip):
379 def _filter_proxy(ip):
378 """
380 """
379 Passed in IP addresses in HEADERS can be in a special format of multiple
381 Passed in IP addresses in HEADERS can be in a special format of multiple
380 ips. Those comma separated IPs are passed from various proxies in the
382 ips. Those comma separated IPs are passed from various proxies in the
381 chain of request processing. The left-most being the original client.
383 chain of request processing. The left-most being the original client.
382 We only care about the first IP which came from the org. client.
384 We only care about the first IP which came from the org. client.
383
385
384 :param ip: ip string from headers
386 :param ip: ip string from headers
385 """
387 """
386 if ',' in ip:
388 if ',' in ip:
387 _ips = ip.split(',')
389 _ips = ip.split(',')
388 _first_ip = _ips[0].strip()
390 _first_ip = _ips[0].strip()
389 return _first_ip
391 return _first_ip
390 return ip
392 return ip
391
393
392
394
393 def _filter_port(ip):
395 def _filter_port(ip):
394 """
396 """
395 Removes a port from ip, there are 4 main cases to handle here.
397 Removes a port from ip, there are 4 main cases to handle here.
396 - ipv4 eg. 127.0.0.1
398 - ipv4 eg. 127.0.0.1
397 - ipv6 eg. ::1
399 - ipv6 eg. ::1
398 - ipv4+port eg. 127.0.0.1:8080
400 - ipv4+port eg. 127.0.0.1:8080
399 - ipv6+port eg. [::1]:8080
401 - ipv6+port eg. [::1]:8080
400
402
401 :param ip:
403 :param ip:
402 """
404 """
403 def is_ipv6(ip_addr):
405 def is_ipv6(ip_addr):
404 if hasattr(socket, 'inet_pton'):
406 if hasattr(socket, 'inet_pton'):
405 try:
407 try:
406 socket.inet_pton(socket.AF_INET6, ip_addr)
408 socket.inet_pton(socket.AF_INET6, ip_addr)
407 except socket.error:
409 except socket.error:
408 return False
410 return False
409 else:
411 else:
410 return False
412 return False
411 return True
413 return True
412
414
413 if ':' not in ip: # must be ipv4 pure ip
415 if ':' not in ip: # must be ipv4 pure ip
414 return ip
416 return ip
415
417
416 if '[' in ip and ']' in ip: # ipv6 with port
418 if '[' in ip and ']' in ip: # ipv6 with port
417 return ip.split(']')[0][1:].lower()
419 return ip.split(']')[0][1:].lower()
418
420
419 # must be ipv6 or ipv4 with port
421 # must be ipv6 or ipv4 with port
420 if is_ipv6(ip):
422 if is_ipv6(ip):
421 return ip
423 return ip
422 else:
424 else:
423 ip, _port = ip.split(':')[:2] # means ipv4+port
425 ip, _port = ip.split(':')[:2] # means ipv4+port
424 return ip
426 return ip
425
427
426
428
427 def get_ip_addr(environ):
429 def get_ip_addr(environ):
428 proxy_key = 'HTTP_X_REAL_IP'
430 proxy_key = 'HTTP_X_REAL_IP'
429 proxy_key2 = 'HTTP_X_FORWARDED_FOR'
431 proxy_key2 = 'HTTP_X_FORWARDED_FOR'
430 def_key = 'REMOTE_ADDR'
432 def_key = 'REMOTE_ADDR'
431
433
432 def _filters(x):
434 def _filters(x):
433 return _filter_port(_filter_proxy(x))
435 return _filter_port(_filter_proxy(x))
434
436
435 ip = environ.get(proxy_key)
437 ip = environ.get(proxy_key)
436 if ip:
438 if ip:
437 return _filters(ip)
439 return _filters(ip)
438
440
439 ip = environ.get(proxy_key2)
441 ip = environ.get(proxy_key2)
440 if ip:
442 if ip:
441 return _filters(ip)
443 return _filters(ip)
442
444
443 ip = environ.get(def_key, '0.0.0.0')
445 ip = environ.get(def_key, '0.0.0.0')
444 return _filters(ip)
446 return _filters(ip)
445
447
446
448
447 class RhodeCodeLogger(Logger):
449 class RhodeCodeLogger(Logger):
448 """
450 """
449 Custom Logger that allows some customization that gunicorn doesn't allow
451 Custom Logger that allows some customization that gunicorn doesn't allow
450 """
452 """
451
453
452 datefmt = r"%Y-%m-%d %H:%M:%S"
454 datefmt = r"%Y-%m-%d %H:%M:%S"
453
455
454 def __init__(self, cfg):
456 def __init__(self, cfg):
455 Logger.__init__(self, cfg)
457 Logger.__init__(self, cfg)
456
458
457 def now(self):
459 def now(self):
458 """ return date in RhodeCode Log format """
460 """ return date in RhodeCode Log format """
459 now = time.time()
461 now = time.time()
460 msecs = int((now - int(now)) * 1000)
462 msecs = int((now - int(now)) * 1000)
461 return time.strftime(self.datefmt, time.localtime(now)) + '.{0:03d}'.format(msecs)
463 return time.strftime(self.datefmt, time.localtime(now)) + '.{0:03d}'.format(msecs)
462
464
463 def atoms(self, resp, req, environ, request_time):
465 def atoms(self, resp, req, environ, request_time):
464 """ Gets atoms for log formatting.
466 """ Gets atoms for log formatting.
465 """
467 """
466 status = resp.status
468 status = resp.status
467 if isinstance(status, str):
469 if isinstance(status, str):
468 status = status.split(None, 1)[0]
470 status = status.split(None, 1)[0]
469 atoms = {
471 atoms = {
470 'h': get_ip_addr(environ),
472 'h': get_ip_addr(environ),
471 'l': '-',
473 'l': '-',
472 'u': self._get_user(environ) or '-',
474 'u': self._get_user(environ) or '-',
473 't': self.now(),
475 't': self.now(),
474 'r': "%s %s %s" % (environ['REQUEST_METHOD'],
476 'r': "%s %s %s" % (environ['REQUEST_METHOD'],
475 environ['RAW_URI'],
477 environ['RAW_URI'],
476 environ["SERVER_PROTOCOL"]),
478 environ["SERVER_PROTOCOL"]),
477 's': status,
479 's': status,
478 'm': environ.get('REQUEST_METHOD'),
480 'm': environ.get('REQUEST_METHOD'),
479 'U': environ.get('PATH_INFO'),
481 'U': environ.get('PATH_INFO'),
480 'q': environ.get('QUERY_STRING'),
482 'q': environ.get('QUERY_STRING'),
481 'H': environ.get('SERVER_PROTOCOL'),
483 'H': environ.get('SERVER_PROTOCOL'),
482 'b': getattr(resp, 'sent', None) is not None and str(resp.sent) or '-',
484 'b': getattr(resp, 'sent', None) is not None and str(resp.sent) or '-',
483 'B': getattr(resp, 'sent', None),
485 'B': getattr(resp, 'sent', None),
484 'f': environ.get('HTTP_REFERER', '-'),
486 'f': environ.get('HTTP_REFERER', '-'),
485 'a': environ.get('HTTP_USER_AGENT', '-'),
487 'a': environ.get('HTTP_USER_AGENT', '-'),
486 'T': request_time.seconds,
488 'T': request_time.seconds,
487 'D': (request_time.seconds * 1000000) + request_time.microseconds,
489 'D': (request_time.seconds * 1000000) + request_time.microseconds,
488 'M': (request_time.seconds * 1000) + int(request_time.microseconds/1000),
490 'M': (request_time.seconds * 1000) + int(request_time.microseconds/1000),
489 'L': "%d.%06d" % (request_time.seconds, request_time.microseconds),
491 'L': "%d.%06d" % (request_time.seconds, request_time.microseconds),
490 'p': "<%s>" % os.getpid()
492 'p': "<%s>" % os.getpid()
491 }
493 }
492
494
493 # add request headers
495 # add request headers
494 if hasattr(req, 'headers'):
496 if hasattr(req, 'headers'):
495 req_headers = req.headers
497 req_headers = req.headers
496 else:
498 else:
497 req_headers = req
499 req_headers = req
498
500
499 if hasattr(req_headers, "items"):
501 if hasattr(req_headers, "items"):
500 req_headers = req_headers.items()
502 req_headers = req_headers.items()
501
503
502 atoms.update({"{%s}i" % k.lower(): v for k, v in req_headers})
504 atoms.update({"{%s}i" % k.lower(): v for k, v in req_headers})
503
505
504 resp_headers = resp.headers
506 resp_headers = resp.headers
505 if hasattr(resp_headers, "items"):
507 if hasattr(resp_headers, "items"):
506 resp_headers = resp_headers.items()
508 resp_headers = resp_headers.items()
507
509
508 # add response headers
510 # add response headers
509 atoms.update({"{%s}o" % k.lower(): v for k, v in resp_headers})
511 atoms.update({"{%s}o" % k.lower(): v for k, v in resp_headers})
510
512
511 # add environ variables
513 # add environ variables
512 environ_variables = environ.items()
514 environ_variables = environ.items()
513 atoms.update({"{%s}e" % k.lower(): v for k, v in environ_variables})
515 atoms.update({"{%s}e" % k.lower(): v for k, v in environ_variables})
514
516
515 return atoms
517 return atoms
516
518
517
519
518 logger_class = RhodeCodeLogger
520 logger_class = RhodeCodeLogger
@@ -1,813 +1,816 b''
1
1
2 ; #########################################
2 ; #########################################
3 ; RHODECODE COMMUNITY EDITION CONFIGURATION
3 ; RHODECODE COMMUNITY EDITION CONFIGURATION
4 ; #########################################
4 ; #########################################
5
5
6 [DEFAULT]
6 [DEFAULT]
7 ; Debug flag sets all loggers to debug, and enables request tracking
7 ; Debug flag sets all loggers to debug, and enables request tracking
8 debug = false
8 debug = false
9
9
10 ; ########################################################################
10 ; ########################################################################
11 ; EMAIL CONFIGURATION
11 ; EMAIL CONFIGURATION
12 ; These settings will be used by the RhodeCode mailing system
12 ; These settings will be used by the RhodeCode mailing system
13 ; ########################################################################
13 ; ########################################################################
14
14
15 ; prefix all emails subjects with given prefix, helps filtering out emails
15 ; prefix all emails subjects with given prefix, helps filtering out emails
16 #email_prefix = [RhodeCode]
16 #email_prefix = [RhodeCode]
17
17
18 ; email FROM address all mails will be sent
18 ; email FROM address all mails will be sent
19 #app_email_from = rhodecode-noreply@localhost
19 #app_email_from = rhodecode-noreply@localhost
20
20
21 #smtp_server = mail.server.com
21 #smtp_server = mail.server.com
22 #smtp_username =
22 #smtp_username =
23 #smtp_password =
23 #smtp_password =
24 #smtp_port =
24 #smtp_port =
25 #smtp_use_tls = false
25 #smtp_use_tls = false
26 #smtp_use_ssl = true
26 #smtp_use_ssl = true
27
27
28 [server:main]
28 [server:main]
29 ; COMMON HOST/IP CONFIG, This applies mostly to develop setup,
29 ; COMMON HOST/IP CONFIG, This applies mostly to develop setup,
30 ; Host port for gunicorn are controlled by gunicorn_conf.py
30 ; Host port for gunicorn are controlled by gunicorn_conf.py
31 host = 127.0.0.1
31 host = 127.0.0.1
32 port = 10020
32 port = 10020
33
33
34
34
35 ; ###########################
35 ; ###########################
36 ; GUNICORN APPLICATION SERVER
36 ; GUNICORN APPLICATION SERVER
37 ; ###########################
37 ; ###########################
38
38
39 ; run with gunicorn --paste rhodecode.ini --config gunicorn_conf.py
39 ; run with gunicorn --paste rhodecode.ini --config gunicorn_conf.py
40
40
41 ; Module to use, this setting shouldn't be changed
41 ; Module to use, this setting shouldn't be changed
42 use = egg:gunicorn#main
42 use = egg:gunicorn#main
43
43
44 ; Prefix middleware for RhodeCode.
44 ; Prefix middleware for RhodeCode.
45 ; recommended when using proxy setup.
45 ; recommended when using proxy setup.
46 ; allows to set RhodeCode under a prefix in server.
46 ; allows to set RhodeCode under a prefix in server.
47 ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well.
47 ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well.
48 ; And set your prefix like: `prefix = /custom_prefix`
48 ; And set your prefix like: `prefix = /custom_prefix`
49 ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need
49 ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need
50 ; to make your cookies only work on prefix url
50 ; to make your cookies only work on prefix url
51 [filter:proxy-prefix]
51 [filter:proxy-prefix]
52 use = egg:PasteDeploy#prefix
52 use = egg:PasteDeploy#prefix
53 prefix = /
53 prefix = /
54
54
55 [app:main]
55 [app:main]
56 ; The %(here)s variable will be replaced with the absolute path of parent directory
56 ; The %(here)s variable will be replaced with the absolute path of parent directory
57 ; of this file
57 ; of this file
58 ; Each option in the app:main can be override by an environmental variable
58 ; Each option in the app:main can be override by an environmental variable
59 ;
59 ;
60 ;To override an option:
60 ;To override an option:
61 ;
61 ;
62 ;RC_<KeyName>
62 ;RC_<KeyName>
63 ;Everything should be uppercase, . and - should be replaced by _.
63 ;Everything should be uppercase, . and - should be replaced by _.
64 ;For example, if you have these configuration settings:
64 ;For example, if you have these configuration settings:
65 ;rc_cache.repo_object.backend = foo
65 ;rc_cache.repo_object.backend = foo
66 ;can be overridden by
66 ;can be overridden by
67 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
67 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
68
68
69 use = egg:rhodecode-enterprise-ce
69 use = egg:rhodecode-enterprise-ce
70
70
71 ; enable proxy prefix middleware, defined above
71 ; enable proxy prefix middleware, defined above
72 #filter-with = proxy-prefix
72 #filter-with = proxy-prefix
73
73
74 ; encryption key used to encrypt social plugin tokens,
74 ; encryption key used to encrypt social plugin tokens,
75 ; remote_urls with credentials etc, if not set it defaults to
75 ; remote_urls with credentials etc, if not set it defaults to
76 ; `beaker.session.secret`
76 ; `beaker.session.secret`
77 #rhodecode.encrypted_values.secret =
77 #rhodecode.encrypted_values.secret =
78
78
79 ; decryption strict mode (enabled by default). It controls if decryption raises
79 ; decryption strict mode (enabled by default). It controls if decryption raises
80 ; `SignatureVerificationError` in case of wrong key, or damaged encryption data.
80 ; `SignatureVerificationError` in case of wrong key, or damaged encryption data.
81 #rhodecode.encrypted_values.strict = false
81 #rhodecode.encrypted_values.strict = false
82
82
83 ; Pick algorithm for encryption. Either fernet (more secure) or aes (default)
83 ; Pick algorithm for encryption. Either fernet (more secure) or aes (default)
84 ; fernet is safer, and we strongly recommend switching to it.
84 ; fernet is safer, and we strongly recommend switching to it.
85 ; Due to backward compatibility aes is used as default.
85 ; Due to backward compatibility aes is used as default.
86 #rhodecode.encrypted_values.algorithm = fernet
86 #rhodecode.encrypted_values.algorithm = fernet
87
87
88 ; Return gzipped responses from RhodeCode (static files/application)
88 ; Return gzipped responses from RhodeCode (static files/application)
89 gzip_responses = false
89 gzip_responses = false
90
90
91 ; Auto-generate javascript routes file on startup
91 ; Auto-generate javascript routes file on startup
92 generate_js_files = false
92 generate_js_files = false
93
93
94 ; System global default language.
94 ; System global default language.
95 ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh
95 ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh
96 lang = en
96 lang = en
97
97
98 ; Perform a full repository scan and import on each server start.
98 ; Perform a full repository scan and import on each server start.
99 ; Settings this to true could lead to very long startup time.
99 ; Settings this to true could lead to very long startup time.
100 startup.import_repos = false
100 startup.import_repos = false
101
101
102 ; URL at which the application is running. This is used for Bootstrapping
102 ; URL at which the application is running. This is used for Bootstrapping
103 ; requests in context when no web request is available. Used in ishell, or
103 ; requests in context when no web request is available. Used in ishell, or
104 ; SSH calls. Set this for events to receive proper url for SSH calls.
104 ; SSH calls. Set this for events to receive proper url for SSH calls.
105 app.base_url = http://rhodecode.local
105 app.base_url = http://rhodecode.local
106
106
107 ; Unique application ID. Should be a random unique string for security.
107 ; Unique application ID. Should be a random unique string for security.
108 app_instance_uuid = rc-production
108 app_instance_uuid = rc-production
109
109
110 ; Cut off limit for large diffs (size in bytes). If overall diff size on
110 ; Cut off limit for large diffs (size in bytes). If overall diff size on
111 ; commit, or pull request exceeds this limit this diff will be displayed
111 ; commit, or pull request exceeds this limit this diff will be displayed
112 ; partially. E.g 512000 == 512Kb
112 ; partially. E.g 512000 == 512Kb
113 cut_off_limit_diff = 512000
113 cut_off_limit_diff = 512000
114
114
115 ; Cut off limit for large files inside diffs (size in bytes). Each individual
115 ; Cut off limit for large files inside diffs (size in bytes). Each individual
116 ; file inside diff which exceeds this limit will be displayed partially.
116 ; file inside diff which exceeds this limit will be displayed partially.
117 ; E.g 128000 == 128Kb
117 ; E.g 128000 == 128Kb
118 cut_off_limit_file = 128000
118 cut_off_limit_file = 128000
119
119
120 ; Use cached version of vcs repositories everywhere. Recommended to be `true`
120 ; Use cached version of vcs repositories everywhere. Recommended to be `true`
121 vcs_full_cache = true
121 vcs_full_cache = true
122
122
123 ; Force https in RhodeCode, fixes https redirects, assumes it's always https.
123 ; Force https in RhodeCode, fixes https redirects, assumes it's always https.
124 ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache
124 ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache
125 force_https = false
125 force_https = false
126
126
127 ; use Strict-Transport-Security headers
127 ; use Strict-Transport-Security headers
128 use_htsts = false
128 use_htsts = false
129
129
130 ; Set to true if your repos are exposed using the dumb protocol
130 ; Set to true if your repos are exposed using the dumb protocol
131 git_update_server_info = false
131 git_update_server_info = false
132
132
133 ; RSS/ATOM feed options
133 ; RSS/ATOM feed options
134 rss_cut_off_limit = 256000
134 rss_cut_off_limit = 256000
135 rss_items_per_page = 10
135 rss_items_per_page = 10
136 rss_include_diff = false
136 rss_include_diff = false
137
137
138 ; gist URL alias, used to create nicer urls for gist. This should be an
138 ; gist URL alias, used to create nicer urls for gist. This should be an
139 ; url that does rewrites to _admin/gists/{gistid}.
139 ; url that does rewrites to _admin/gists/{gistid}.
140 ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal
140 ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal
141 ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid}
141 ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid}
142 gist_alias_url =
142 gist_alias_url =
143
143
144 ; List of views (using glob pattern syntax) that AUTH TOKENS could be
144 ; List of views (using glob pattern syntax) that AUTH TOKENS could be
145 ; used for access.
145 ; used for access.
146 ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it
146 ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it
147 ; came from the the logged in user who own this authentication token.
147 ; came from the the logged in user who own this authentication token.
148 ; Additionally @TOKEN syntax can be used to bound the view to specific
148 ; Additionally @TOKEN syntax can be used to bound the view to specific
149 ; authentication token. Such view would be only accessible when used together
149 ; authentication token. Such view would be only accessible when used together
150 ; with this authentication token
150 ; with this authentication token
151 ; list of all views can be found under `/_admin/permissions/auth_token_access`
151 ; list of all views can be found under `/_admin/permissions/auth_token_access`
152 ; The list should be "," separated and on a single line.
152 ; The list should be "," separated and on a single line.
153 ; Most common views to enable:
153 ; Most common views to enable:
154
154
155 # RepoCommitsView:repo_commit_download
155 # RepoCommitsView:repo_commit_download
156 # RepoCommitsView:repo_commit_patch
156 # RepoCommitsView:repo_commit_patch
157 # RepoCommitsView:repo_commit_raw
157 # RepoCommitsView:repo_commit_raw
158 # RepoCommitsView:repo_commit_raw@TOKEN
158 # RepoCommitsView:repo_commit_raw@TOKEN
159 # RepoFilesView:repo_files_diff
159 # RepoFilesView:repo_files_diff
160 # RepoFilesView:repo_archivefile
160 # RepoFilesView:repo_archivefile
161 # RepoFilesView:repo_file_raw
161 # RepoFilesView:repo_file_raw
162 # GistView:*
162 # GistView:*
163 api_access_controllers_whitelist =
163 api_access_controllers_whitelist =
164
164
165 ; Default encoding used to convert from and to unicode
165 ; Default encoding used to convert from and to unicode
166 ; can be also a comma separated list of encoding in case of mixed encodings
166 ; can be also a comma separated list of encoding in case of mixed encodings
167 default_encoding = UTF-8
167 default_encoding = UTF-8
168
168
169 ; instance-id prefix
169 ; instance-id prefix
170 ; a prefix key for this instance used for cache invalidation when running
170 ; a prefix key for this instance used for cache invalidation when running
171 ; multiple instances of RhodeCode, make sure it's globally unique for
171 ; multiple instances of RhodeCode, make sure it's globally unique for
172 ; all running RhodeCode instances. Leave empty if you don't use it
172 ; all running RhodeCode instances. Leave empty if you don't use it
173 instance_id =
173 instance_id =
174
174
175 ; Fallback authentication plugin. Set this to a plugin ID to force the usage
175 ; Fallback authentication plugin. Set this to a plugin ID to force the usage
176 ; of an authentication plugin also if it is disabled by it's settings.
176 ; of an authentication plugin also if it is disabled by it's settings.
177 ; This could be useful if you are unable to log in to the system due to broken
177 ; This could be useful if you are unable to log in to the system due to broken
178 ; authentication settings. Then you can enable e.g. the internal RhodeCode auth
178 ; authentication settings. Then you can enable e.g. the internal RhodeCode auth
179 ; module to log in again and fix the settings.
179 ; module to log in again and fix the settings.
180 ; Available builtin plugin IDs (hash is part of the ID):
180 ; Available builtin plugin IDs (hash is part of the ID):
181 ; egg:rhodecode-enterprise-ce#rhodecode
181 ; egg:rhodecode-enterprise-ce#rhodecode
182 ; egg:rhodecode-enterprise-ce#pam
182 ; egg:rhodecode-enterprise-ce#pam
183 ; egg:rhodecode-enterprise-ce#ldap
183 ; egg:rhodecode-enterprise-ce#ldap
184 ; egg:rhodecode-enterprise-ce#jasig_cas
184 ; egg:rhodecode-enterprise-ce#jasig_cas
185 ; egg:rhodecode-enterprise-ce#headers
185 ; egg:rhodecode-enterprise-ce#headers
186 ; egg:rhodecode-enterprise-ce#crowd
186 ; egg:rhodecode-enterprise-ce#crowd
187
187
188 #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode
188 #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode
189
189
190 ; Flag to control loading of legacy plugins in py:/path format
190 ; Flag to control loading of legacy plugins in py:/path format
191 auth_plugin.import_legacy_plugins = true
191 auth_plugin.import_legacy_plugins = true
192
192
193 ; alternative return HTTP header for failed authentication. Default HTTP
193 ; alternative return HTTP header for failed authentication. Default HTTP
194 ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with
194 ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with
195 ; handling that causing a series of failed authentication calls.
195 ; handling that causing a series of failed authentication calls.
196 ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code
196 ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code
197 ; This will be served instead of default 401 on bad authentication
197 ; This will be served instead of default 401 on bad authentication
198 auth_ret_code =
198 auth_ret_code =
199
199
200 ; use special detection method when serving auth_ret_code, instead of serving
200 ; use special detection method when serving auth_ret_code, instead of serving
201 ; ret_code directly, use 401 initially (Which triggers credentials prompt)
201 ; ret_code directly, use 401 initially (Which triggers credentials prompt)
202 ; and then serve auth_ret_code to clients
202 ; and then serve auth_ret_code to clients
203 auth_ret_code_detection = false
203 auth_ret_code_detection = false
204
204
205 ; locking return code. When repository is locked return this HTTP code. 2XX
205 ; locking return code. When repository is locked return this HTTP code. 2XX
206 ; codes don't break the transactions while 4XX codes do
206 ; codes don't break the transactions while 4XX codes do
207 lock_ret_code = 423
207 lock_ret_code = 423
208
208
209 ; allows to change the repository location in settings page
209 ; allows to change the repository location in settings page
210 allow_repo_location_change = true
210 allow_repo_location_change = true
211
211
212 ; allows to setup custom hooks in settings page
212 ; allows to setup custom hooks in settings page
213 allow_custom_hooks_settings = true
213 allow_custom_hooks_settings = true
214
214
215 ; Generated license token required for EE edition license.
215 ; Generated license token required for EE edition license.
216 ; New generated token value can be found in Admin > settings > license page.
216 ; New generated token value can be found in Admin > settings > license page.
217 license_token =
217 license_token =
218
218
219 ; This flag hides sensitive information on the license page such as token, and license data
219 ; This flag hides sensitive information on the license page such as token, and license data
220 license.hide_license_info = false
220 license.hide_license_info = false
221
221
222 ; supervisor connection uri, for managing supervisor and logs.
222 ; supervisor connection uri, for managing supervisor and logs.
223 supervisor.uri =
223 supervisor.uri =
224
224
225 ; supervisord group name/id we only want this RC instance to handle
225 ; supervisord group name/id we only want this RC instance to handle
226 supervisor.group_id = prod
226 supervisor.group_id = prod
227
227
228 ; Display extended labs settings
228 ; Display extended labs settings
229 labs_settings_active = true
229 labs_settings_active = true
230
230
231 ; Custom exception store path, defaults to TMPDIR
231 ; Custom exception store path, defaults to TMPDIR
232 ; This is used to store exception from RhodeCode in shared directory
232 ; This is used to store exception from RhodeCode in shared directory
233 #exception_tracker.store_path =
233 #exception_tracker.store_path =
234
234
235 ; Send email with exception details when it happens
235 ; Send email with exception details when it happens
236 #exception_tracker.send_email = false
236 #exception_tracker.send_email = false
237
237
238 ; Comma separated list of recipients for exception emails,
238 ; Comma separated list of recipients for exception emails,
239 ; e.g admin@rhodecode.com,devops@rhodecode.com
239 ; e.g admin@rhodecode.com,devops@rhodecode.com
240 ; Can be left empty, then emails will be sent to ALL super-admins
240 ; Can be left empty, then emails will be sent to ALL super-admins
241 #exception_tracker.send_email_recipients =
241 #exception_tracker.send_email_recipients =
242
242
243 ; optional prefix to Add to email Subject
243 ; optional prefix to Add to email Subject
244 #exception_tracker.email_prefix = [RHODECODE ERROR]
244 #exception_tracker.email_prefix = [RHODECODE ERROR]
245
245
246 ; File store configuration. This is used to store and serve uploaded files
246 ; File store configuration. This is used to store and serve uploaded files
247 file_store.enabled = true
247 file_store.enabled = true
248
248
249 ; Storage backend, available options are: local
249 ; Storage backend, available options are: local
250 file_store.backend = local
250 file_store.backend = local
251
251
252 ; path to store the uploaded binaries
252 ; path to store the uploaded binaries
253 file_store.storage_path = %(here)s/data/file_store
253 file_store.storage_path = %(here)s/data/file_store
254
254
255 ; Uncomment and set this path to control settings for archive download cache.
255 ; Uncomment and set this path to control settings for archive download cache.
256 ; Generated repo archives will be cached at this location
256 ; Generated repo archives will be cached at this location
257 ; and served from the cache during subsequent requests for the same archive of
257 ; and served from the cache during subsequent requests for the same archive of
258 ; the repository. This path is important to be shared across filesystems and with
258 ; the repository. This path is important to be shared across filesystems and with
259 ; RhodeCode and vcsserver
259 ; RhodeCode and vcsserver
260
260
261 ; Default is $cache_dir/archive_cache if not set
261 ; Default is $cache_dir/archive_cache if not set
262 archive_cache.store_dir = %(here)s/data/archive_cache
262 archive_cache.store_dir = %(here)s/data/archive_cache
263
263
264 ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb
264 ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb
265 archive_cache.cache_size_gb = 40
265 archive_cache.cache_size_gb = 40
266
266
267 ; By default cache uses sharding technique, this specifies how many shards are there
267 ; By default cache uses sharding technique, this specifies how many shards are there
268 archive_cache.cache_shards = 4
268 archive_cache.cache_shards = 4
269
269
270 ; #############
270 ; #############
271 ; CELERY CONFIG
271 ; CELERY CONFIG
272 ; #############
272 ; #############
273
273
274 ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini
274 ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini
275
275
276 use_celery = false
276 use_celery = false
277
277
278 ; path to store schedule database
278 ; path to store schedule database
279 #celerybeat-schedule.path =
279 #celerybeat-schedule.path =
280
280
281 ; connection url to the message broker (default redis)
281 ; connection url to the message broker (default redis)
282 celery.broker_url = redis://localhost:6379/8
282 celery.broker_url = redis://redis:6379/8
283
284 ; results backend to get results for (default redis)
285 celery.result_backend = redis://redis:6379/8
283
286
284 ; rabbitmq example
287 ; rabbitmq example
285 #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost
288 #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost
286
289
287 ; maximum tasks to execute before worker restart
290 ; maximum tasks to execute before worker restart
288 celery.max_tasks_per_child = 20
291 celery.max_tasks_per_child = 20
289
292
290 ; tasks will never be sent to the queue, but executed locally instead.
293 ; tasks will never be sent to the queue, but executed locally instead.
291 celery.task_always_eager = false
294 celery.task_always_eager = false
292
295
293 ; #############
296 ; #############
294 ; DOGPILE CACHE
297 ; DOGPILE CACHE
295 ; #############
298 ; #############
296
299
297 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
300 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
298 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
301 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
299 cache_dir = %(here)s/data
302 cache_dir = %(here)s/data
300
303
301 ; *********************************************
304 ; *********************************************
302 ; `sql_cache_short` cache for heavy SQL queries
305 ; `sql_cache_short` cache for heavy SQL queries
303 ; Only supported backend is `memory_lru`
306 ; Only supported backend is `memory_lru`
304 ; *********************************************
307 ; *********************************************
305 rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru
308 rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru
306 rc_cache.sql_cache_short.expiration_time = 30
309 rc_cache.sql_cache_short.expiration_time = 30
307
310
308
311
309 ; *****************************************************
312 ; *****************************************************
310 ; `cache_repo_longterm` cache for repo object instances
313 ; `cache_repo_longterm` cache for repo object instances
311 ; Only supported backend is `memory_lru`
314 ; Only supported backend is `memory_lru`
312 ; *****************************************************
315 ; *****************************************************
313 rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru
316 rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru
314 ; by default we use 30 Days, cache is still invalidated on push
317 ; by default we use 30 Days, cache is still invalidated on push
315 rc_cache.cache_repo_longterm.expiration_time = 2592000
318 rc_cache.cache_repo_longterm.expiration_time = 2592000
316 ; max items in LRU cache, set to smaller number to save memory, and expire last used caches
319 ; max items in LRU cache, set to smaller number to save memory, and expire last used caches
317 rc_cache.cache_repo_longterm.max_size = 10000
320 rc_cache.cache_repo_longterm.max_size = 10000
318
321
319
322
320 ; *********************************************
323 ; *********************************************
321 ; `cache_general` cache for general purpose use
324 ; `cache_general` cache for general purpose use
322 ; for simplicity use rc.file_namespace backend,
325 ; for simplicity use rc.file_namespace backend,
323 ; for performance and scale use rc.redis
326 ; for performance and scale use rc.redis
324 ; *********************************************
327 ; *********************************************
325 rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace
328 rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace
326 rc_cache.cache_general.expiration_time = 43200
329 rc_cache.cache_general.expiration_time = 43200
327 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
330 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
328 #rc_cache.cache_general.arguments.filename = /tmp/cache_general_db
331 #rc_cache.cache_general.arguments.filename = /tmp/cache_general_db
329
332
330 ; alternative `cache_general` redis backend with distributed lock
333 ; alternative `cache_general` redis backend with distributed lock
331 #rc_cache.cache_general.backend = dogpile.cache.rc.redis
334 #rc_cache.cache_general.backend = dogpile.cache.rc.redis
332 #rc_cache.cache_general.expiration_time = 300
335 #rc_cache.cache_general.expiration_time = 300
333
336
334 ; redis_expiration_time needs to be greater then expiration_time
337 ; redis_expiration_time needs to be greater then expiration_time
335 #rc_cache.cache_general.arguments.redis_expiration_time = 7200
338 #rc_cache.cache_general.arguments.redis_expiration_time = 7200
336
339
337 #rc_cache.cache_general.arguments.host = localhost
340 #rc_cache.cache_general.arguments.host = localhost
338 #rc_cache.cache_general.arguments.port = 6379
341 #rc_cache.cache_general.arguments.port = 6379
339 #rc_cache.cache_general.arguments.db = 0
342 #rc_cache.cache_general.arguments.db = 0
340 #rc_cache.cache_general.arguments.socket_timeout = 30
343 #rc_cache.cache_general.arguments.socket_timeout = 30
341 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
344 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
342 #rc_cache.cache_general.arguments.distributed_lock = true
345 #rc_cache.cache_general.arguments.distributed_lock = true
343
346
344 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
347 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
345 #rc_cache.cache_general.arguments.lock_auto_renewal = true
348 #rc_cache.cache_general.arguments.lock_auto_renewal = true
346
349
347 ; *************************************************
350 ; *************************************************
348 ; `cache_perms` cache for permission tree, auth TTL
351 ; `cache_perms` cache for permission tree, auth TTL
349 ; for simplicity use rc.file_namespace backend,
352 ; for simplicity use rc.file_namespace backend,
350 ; for performance and scale use rc.redis
353 ; for performance and scale use rc.redis
351 ; *************************************************
354 ; *************************************************
352 rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace
355 rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace
353 rc_cache.cache_perms.expiration_time = 3600
356 rc_cache.cache_perms.expiration_time = 3600
354 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
357 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
355 #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms_db
358 #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms_db
356
359
357 ; alternative `cache_perms` redis backend with distributed lock
360 ; alternative `cache_perms` redis backend with distributed lock
358 #rc_cache.cache_perms.backend = dogpile.cache.rc.redis
361 #rc_cache.cache_perms.backend = dogpile.cache.rc.redis
359 #rc_cache.cache_perms.expiration_time = 300
362 #rc_cache.cache_perms.expiration_time = 300
360
363
361 ; redis_expiration_time needs to be greater then expiration_time
364 ; redis_expiration_time needs to be greater then expiration_time
362 #rc_cache.cache_perms.arguments.redis_expiration_time = 7200
365 #rc_cache.cache_perms.arguments.redis_expiration_time = 7200
363
366
364 #rc_cache.cache_perms.arguments.host = localhost
367 #rc_cache.cache_perms.arguments.host = localhost
365 #rc_cache.cache_perms.arguments.port = 6379
368 #rc_cache.cache_perms.arguments.port = 6379
366 #rc_cache.cache_perms.arguments.db = 0
369 #rc_cache.cache_perms.arguments.db = 0
367 #rc_cache.cache_perms.arguments.socket_timeout = 30
370 #rc_cache.cache_perms.arguments.socket_timeout = 30
368 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
371 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
369 #rc_cache.cache_perms.arguments.distributed_lock = true
372 #rc_cache.cache_perms.arguments.distributed_lock = true
370
373
371 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
374 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
372 #rc_cache.cache_perms.arguments.lock_auto_renewal = true
375 #rc_cache.cache_perms.arguments.lock_auto_renewal = true
373
376
374 ; ***************************************************
377 ; ***************************************************
375 ; `cache_repo` cache for file tree, Readme, RSS FEEDS
378 ; `cache_repo` cache for file tree, Readme, RSS FEEDS
376 ; for simplicity use rc.file_namespace backend,
379 ; for simplicity use rc.file_namespace backend,
377 ; for performance and scale use rc.redis
380 ; for performance and scale use rc.redis
378 ; ***************************************************
381 ; ***************************************************
379 rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace
382 rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace
380 rc_cache.cache_repo.expiration_time = 2592000
383 rc_cache.cache_repo.expiration_time = 2592000
381 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
384 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
382 #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo_db
385 #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo_db
383
386
384 ; alternative `cache_repo` redis backend with distributed lock
387 ; alternative `cache_repo` redis backend with distributed lock
385 #rc_cache.cache_repo.backend = dogpile.cache.rc.redis
388 #rc_cache.cache_repo.backend = dogpile.cache.rc.redis
386 #rc_cache.cache_repo.expiration_time = 2592000
389 #rc_cache.cache_repo.expiration_time = 2592000
387
390
388 ; redis_expiration_time needs to be greater then expiration_time
391 ; redis_expiration_time needs to be greater then expiration_time
389 #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400
392 #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400
390
393
391 #rc_cache.cache_repo.arguments.host = localhost
394 #rc_cache.cache_repo.arguments.host = localhost
392 #rc_cache.cache_repo.arguments.port = 6379
395 #rc_cache.cache_repo.arguments.port = 6379
393 #rc_cache.cache_repo.arguments.db = 1
396 #rc_cache.cache_repo.arguments.db = 1
394 #rc_cache.cache_repo.arguments.socket_timeout = 30
397 #rc_cache.cache_repo.arguments.socket_timeout = 30
395 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
398 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
396 #rc_cache.cache_repo.arguments.distributed_lock = true
399 #rc_cache.cache_repo.arguments.distributed_lock = true
397
400
398 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
401 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
399 #rc_cache.cache_repo.arguments.lock_auto_renewal = true
402 #rc_cache.cache_repo.arguments.lock_auto_renewal = true
400
403
401 ; ##############
404 ; ##############
402 ; BEAKER SESSION
405 ; BEAKER SESSION
403 ; ##############
406 ; ##############
404
407
405 ; beaker.session.type is type of storage options for the logged users sessions. Current allowed
408 ; beaker.session.type is type of storage options for the logged users sessions. Current allowed
406 ; types are file, ext:redis, ext:database, ext:memcached
409 ; types are file, ext:redis, ext:database, ext:memcached
407 ; Fastest ones are ext:redis and ext:database, DO NOT use memory type for session
410 ; Fastest ones are ext:redis and ext:database, DO NOT use memory type for session
408 beaker.session.type = file
411 beaker.session.type = file
409 beaker.session.data_dir = %(here)s/data/sessions
412 beaker.session.data_dir = %(here)s/data/sessions
410
413
411 ; Redis based sessions
414 ; Redis based sessions
412 #beaker.session.type = ext:redis
415 #beaker.session.type = ext:redis
413 #beaker.session.url = redis://127.0.0.1:6379/2
416 #beaker.session.url = redis://127.0.0.1:6379/2
414
417
415 ; DB based session, fast, and allows easy management over logged in users
418 ; DB based session, fast, and allows easy management over logged in users
416 #beaker.session.type = ext:database
419 #beaker.session.type = ext:database
417 #beaker.session.table_name = db_session
420 #beaker.session.table_name = db_session
418 #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode
421 #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode
419 #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode
422 #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode
420 #beaker.session.sa.pool_recycle = 3600
423 #beaker.session.sa.pool_recycle = 3600
421 #beaker.session.sa.echo = false
424 #beaker.session.sa.echo = false
422
425
423 beaker.session.key = rhodecode
426 beaker.session.key = rhodecode
424 beaker.session.secret = production-rc-uytcxaz
427 beaker.session.secret = production-rc-uytcxaz
425 beaker.session.lock_dir = %(here)s/data/sessions/lock
428 beaker.session.lock_dir = %(here)s/data/sessions/lock
426
429
427 ; Secure encrypted cookie. Requires AES and AES python libraries
430 ; Secure encrypted cookie. Requires AES and AES python libraries
428 ; you must disable beaker.session.secret to use this
431 ; you must disable beaker.session.secret to use this
429 #beaker.session.encrypt_key = key_for_encryption
432 #beaker.session.encrypt_key = key_for_encryption
430 #beaker.session.validate_key = validation_key
433 #beaker.session.validate_key = validation_key
431
434
432 ; Sets session as invalid (also logging out user) if it haven not been
435 ; Sets session as invalid (also logging out user) if it haven not been
433 ; accessed for given amount of time in seconds
436 ; accessed for given amount of time in seconds
434 beaker.session.timeout = 2592000
437 beaker.session.timeout = 2592000
435 beaker.session.httponly = true
438 beaker.session.httponly = true
436
439
437 ; Path to use for the cookie. Set to prefix if you use prefix middleware
440 ; Path to use for the cookie. Set to prefix if you use prefix middleware
438 #beaker.session.cookie_path = /custom_prefix
441 #beaker.session.cookie_path = /custom_prefix
439
442
440 ; Set https secure cookie
443 ; Set https secure cookie
441 beaker.session.secure = false
444 beaker.session.secure = false
442
445
443 ; default cookie expiration time in seconds, set to `true` to set expire
446 ; default cookie expiration time in seconds, set to `true` to set expire
444 ; at browser close
447 ; at browser close
445 #beaker.session.cookie_expires = 3600
448 #beaker.session.cookie_expires = 3600
446
449
447 ; #############################
450 ; #############################
448 ; SEARCH INDEXING CONFIGURATION
451 ; SEARCH INDEXING CONFIGURATION
449 ; #############################
452 ; #############################
450
453
451 ; Full text search indexer is available in rhodecode-tools under
454 ; Full text search indexer is available in rhodecode-tools under
452 ; `rhodecode-tools index` command
455 ; `rhodecode-tools index` command
453
456
454 ; WHOOSH Backend, doesn't require additional services to run
457 ; WHOOSH Backend, doesn't require additional services to run
455 ; it works good with few dozen repos
458 ; it works good with few dozen repos
456 search.module = rhodecode.lib.index.whoosh
459 search.module = rhodecode.lib.index.whoosh
457 search.location = %(here)s/data/index
460 search.location = %(here)s/data/index
458
461
459 ; ####################
462 ; ####################
460 ; CHANNELSTREAM CONFIG
463 ; CHANNELSTREAM CONFIG
461 ; ####################
464 ; ####################
462
465
463 ; channelstream enables persistent connections and live notification
466 ; channelstream enables persistent connections and live notification
464 ; in the system. It's also used by the chat system
467 ; in the system. It's also used by the chat system
465
468
466 channelstream.enabled = false
469 channelstream.enabled = false
467
470
468 ; server address for channelstream server on the backend
471 ; server address for channelstream server on the backend
469 channelstream.server = 127.0.0.1:9800
472 channelstream.server = 127.0.0.1:9800
470
473
471 ; location of the channelstream server from outside world
474 ; location of the channelstream server from outside world
472 ; use ws:// for http or wss:// for https. This address needs to be handled
475 ; use ws:// for http or wss:// for https. This address needs to be handled
473 ; by external HTTP server such as Nginx or Apache
476 ; by external HTTP server such as Nginx or Apache
474 ; see Nginx/Apache configuration examples in our docs
477 ; see Nginx/Apache configuration examples in our docs
475 channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream
478 channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream
476 channelstream.secret = secret
479 channelstream.secret = secret
477 channelstream.history.location = %(here)s/channelstream_history
480 channelstream.history.location = %(here)s/channelstream_history
478
481
479 ; Internal application path that Javascript uses to connect into.
482 ; Internal application path that Javascript uses to connect into.
480 ; If you use proxy-prefix the prefix should be added before /_channelstream
483 ; If you use proxy-prefix the prefix should be added before /_channelstream
481 channelstream.proxy_path = /_channelstream
484 channelstream.proxy_path = /_channelstream
482
485
483
486
484 ; ##############################
487 ; ##############################
485 ; MAIN RHODECODE DATABASE CONFIG
488 ; MAIN RHODECODE DATABASE CONFIG
486 ; ##############################
489 ; ##############################
487
490
488 #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
491 #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
489 #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
492 #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
490 #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8
493 #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8
491 ; pymysql is an alternative driver for MySQL, use in case of problems with default one
494 ; pymysql is an alternative driver for MySQL, use in case of problems with default one
492 #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode
495 #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode
493
496
494 sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
497 sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
495
498
496 ; see sqlalchemy docs for other advanced settings
499 ; see sqlalchemy docs for other advanced settings
497 ; print the sql statements to output
500 ; print the sql statements to output
498 sqlalchemy.db1.echo = false
501 sqlalchemy.db1.echo = false
499
502
500 ; recycle the connections after this amount of seconds
503 ; recycle the connections after this amount of seconds
501 sqlalchemy.db1.pool_recycle = 3600
504 sqlalchemy.db1.pool_recycle = 3600
502
505
503 ; the number of connections to keep open inside the connection pool.
506 ; the number of connections to keep open inside the connection pool.
504 ; 0 indicates no limit
507 ; 0 indicates no limit
505 ; the general calculus with gevent is:
508 ; the general calculus with gevent is:
506 ; if your system allows 500 concurrent greenlets (max_connections) that all do database access,
509 ; if your system allows 500 concurrent greenlets (max_connections) that all do database access,
507 ; then increase pool size + max overflow so that they add up to 500.
510 ; then increase pool size + max overflow so that they add up to 500.
508 #sqlalchemy.db1.pool_size = 5
511 #sqlalchemy.db1.pool_size = 5
509
512
510 ; The number of connections to allow in connection pool "overflow", that is
513 ; The number of connections to allow in connection pool "overflow", that is
511 ; connections that can be opened above and beyond the pool_size setting,
514 ; connections that can be opened above and beyond the pool_size setting,
512 ; which defaults to five.
515 ; which defaults to five.
513 #sqlalchemy.db1.max_overflow = 10
516 #sqlalchemy.db1.max_overflow = 10
514
517
515 ; Connection check ping, used to detect broken database connections
518 ; Connection check ping, used to detect broken database connections
516 ; could be enabled to better handle cases if MySQL has gone away errors
519 ; could be enabled to better handle cases if MySQL has gone away errors
517 #sqlalchemy.db1.ping_connection = true
520 #sqlalchemy.db1.ping_connection = true
518
521
519 ; ##########
522 ; ##########
520 ; VCS CONFIG
523 ; VCS CONFIG
521 ; ##########
524 ; ##########
522 vcs.server.enable = true
525 vcs.server.enable = true
523 vcs.server = localhost:9900
526 vcs.server = localhost:9900
524
527
525 ; Web server connectivity protocol, responsible for web based VCS operations
528 ; Web server connectivity protocol, responsible for web based VCS operations
526 ; Available protocols are:
529 ; Available protocols are:
527 ; `http` - use http-rpc backend (default)
530 ; `http` - use http-rpc backend (default)
528 vcs.server.protocol = http
531 vcs.server.protocol = http
529
532
530 ; Push/Pull operations protocol, available options are:
533 ; Push/Pull operations protocol, available options are:
531 ; `http` - use http-rpc backend (default)
534 ; `http` - use http-rpc backend (default)
532 vcs.scm_app_implementation = http
535 vcs.scm_app_implementation = http
533
536
534 ; Push/Pull operations hooks protocol, available options are:
537 ; Push/Pull operations hooks protocol, available options are:
535 ; `http` - use http-rpc backend (default)
538 ; `http` - use http-rpc backend (default)
536 vcs.hooks.protocol = http
539 vcs.hooks.protocol = http
537
540
538 ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be
541 ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be
539 ; accessible via network.
542 ; accessible via network.
540 ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker)
543 ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker)
541 vcs.hooks.host = *
544 vcs.hooks.host = *
542
545
543 ; Start VCSServer with this instance as a subprocess, useful for development
546 ; Start VCSServer with this instance as a subprocess, useful for development
544 vcs.start_server = false
547 vcs.start_server = false
545
548
546 ; List of enabled VCS backends, available options are:
549 ; List of enabled VCS backends, available options are:
547 ; `hg` - mercurial
550 ; `hg` - mercurial
548 ; `git` - git
551 ; `git` - git
549 ; `svn` - subversion
552 ; `svn` - subversion
550 vcs.backends = hg, git, svn
553 vcs.backends = hg, git, svn
551
554
552 ; Wait this number of seconds before killing connection to the vcsserver
555 ; Wait this number of seconds before killing connection to the vcsserver
553 vcs.connection_timeout = 3600
556 vcs.connection_timeout = 3600
554
557
555 ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out.
558 ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out.
556 ; Set a numeric version for your current SVN e.g 1.8, or 1.12
559 ; Set a numeric version for your current SVN e.g 1.8, or 1.12
557 ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible
560 ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible
558 #vcs.svn.compatible_version = 1.8
561 #vcs.svn.compatible_version = 1.8
559
562
560 ; Cache flag to cache vcsserver remote calls locally
563 ; Cache flag to cache vcsserver remote calls locally
561 ; It uses cache_region `cache_repo`
564 ; It uses cache_region `cache_repo`
562 vcs.methods.cache = true
565 vcs.methods.cache = true
563
566
564 ; ####################################################
567 ; ####################################################
565 ; Subversion proxy support (mod_dav_svn)
568 ; Subversion proxy support (mod_dav_svn)
566 ; Maps RhodeCode repo groups into SVN paths for Apache
569 ; Maps RhodeCode repo groups into SVN paths for Apache
567 ; ####################################################
570 ; ####################################################
568
571
569 ; Enable or disable the config file generation.
572 ; Enable or disable the config file generation.
570 svn.proxy.generate_config = false
573 svn.proxy.generate_config = false
571
574
572 ; Generate config file with `SVNListParentPath` set to `On`.
575 ; Generate config file with `SVNListParentPath` set to `On`.
573 svn.proxy.list_parent_path = true
576 svn.proxy.list_parent_path = true
574
577
575 ; Set location and file name of generated config file.
578 ; Set location and file name of generated config file.
576 svn.proxy.config_file_path = %(here)s/mod_dav_svn.conf
579 svn.proxy.config_file_path = %(here)s/mod_dav_svn.conf
577
580
578 ; alternative mod_dav config template. This needs to be a valid mako template
581 ; alternative mod_dav config template. This needs to be a valid mako template
579 ; Example template can be found in the source code:
582 ; Example template can be found in the source code:
580 ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako
583 ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako
581 #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako
584 #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako
582
585
583 ; Used as a prefix to the `Location` block in the generated config file.
586 ; Used as a prefix to the `Location` block in the generated config file.
584 ; In most cases it should be set to `/`.
587 ; In most cases it should be set to `/`.
585 svn.proxy.location_root = /
588 svn.proxy.location_root = /
586
589
587 ; Command to reload the mod dav svn configuration on change.
590 ; Command to reload the mod dav svn configuration on change.
588 ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh
591 ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh
589 ; Make sure user who runs RhodeCode process is allowed to reload Apache
592 ; Make sure user who runs RhodeCode process is allowed to reload Apache
590 #svn.proxy.reload_cmd = /etc/init.d/apache2 reload
593 #svn.proxy.reload_cmd = /etc/init.d/apache2 reload
591
594
592 ; If the timeout expires before the reload command finishes, the command will
595 ; If the timeout expires before the reload command finishes, the command will
593 ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds.
596 ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds.
594 #svn.proxy.reload_timeout = 10
597 #svn.proxy.reload_timeout = 10
595
598
596 ; ####################
599 ; ####################
597 ; SSH Support Settings
600 ; SSH Support Settings
598 ; ####################
601 ; ####################
599
602
600 ; Defines if a custom authorized_keys file should be created and written on
603 ; Defines if a custom authorized_keys file should be created and written on
601 ; any change user ssh keys. Setting this to false also disables possibility
604 ; any change user ssh keys. Setting this to false also disables possibility
602 ; of adding SSH keys by users from web interface. Super admins can still
605 ; of adding SSH keys by users from web interface. Super admins can still
603 ; manage SSH Keys.
606 ; manage SSH Keys.
604 ssh.generate_authorized_keyfile = false
607 ssh.generate_authorized_keyfile = false
605
608
606 ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding`
609 ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding`
607 # ssh.authorized_keys_ssh_opts =
610 # ssh.authorized_keys_ssh_opts =
608
611
609 ; Path to the authorized_keys file where the generate entries are placed.
612 ; Path to the authorized_keys file where the generate entries are placed.
610 ; It is possible to have multiple key files specified in `sshd_config` e.g.
613 ; It is possible to have multiple key files specified in `sshd_config` e.g.
611 ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode
614 ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode
612 ssh.authorized_keys_file_path = ~/.ssh/authorized_keys_rhodecode
615 ssh.authorized_keys_file_path = ~/.ssh/authorized_keys_rhodecode
613
616
614 ; Command to execute the SSH wrapper. The binary is available in the
617 ; Command to execute the SSH wrapper. The binary is available in the
615 ; RhodeCode installation directory.
618 ; RhodeCode installation directory.
616 ; e.g ~/.rccontrol/community-1/profile/bin/rc-ssh-wrapper
619 ; e.g ~/.rccontrol/community-1/profile/bin/rc-ssh-wrapper
617 ssh.wrapper_cmd = ~/.rccontrol/community-1/rc-ssh-wrapper
620 ssh.wrapper_cmd = ~/.rccontrol/community-1/rc-ssh-wrapper
618
621
619 ; Allow shell when executing the ssh-wrapper command
622 ; Allow shell when executing the ssh-wrapper command
620 ssh.wrapper_cmd_allow_shell = false
623 ssh.wrapper_cmd_allow_shell = false
621
624
622 ; Enables logging, and detailed output send back to the client during SSH
625 ; Enables logging, and detailed output send back to the client during SSH
623 ; operations. Useful for debugging, shouldn't be used in production.
626 ; operations. Useful for debugging, shouldn't be used in production.
624 ssh.enable_debug_logging = false
627 ssh.enable_debug_logging = false
625
628
626 ; Paths to binary executable, by default they are the names, but we can
629 ; Paths to binary executable, by default they are the names, but we can
627 ; override them if we want to use a custom one
630 ; override them if we want to use a custom one
628 ssh.executable.hg = ~/.rccontrol/vcsserver-1/profile/bin/hg
631 ssh.executable.hg = ~/.rccontrol/vcsserver-1/profile/bin/hg
629 ssh.executable.git = ~/.rccontrol/vcsserver-1/profile/bin/git
632 ssh.executable.git = ~/.rccontrol/vcsserver-1/profile/bin/git
630 ssh.executable.svn = ~/.rccontrol/vcsserver-1/profile/bin/svnserve
633 ssh.executable.svn = ~/.rccontrol/vcsserver-1/profile/bin/svnserve
631
634
632 ; Enables SSH key generator web interface. Disabling this still allows users
635 ; Enables SSH key generator web interface. Disabling this still allows users
633 ; to add their own keys.
636 ; to add their own keys.
634 ssh.enable_ui_key_generator = true
637 ssh.enable_ui_key_generator = true
635
638
636
639
637 ; #################
640 ; #################
638 ; APPENLIGHT CONFIG
641 ; APPENLIGHT CONFIG
639 ; #################
642 ; #################
640
643
641 ; Appenlight is tailored to work with RhodeCode, see
644 ; Appenlight is tailored to work with RhodeCode, see
642 ; http://appenlight.rhodecode.com for details how to obtain an account
645 ; http://appenlight.rhodecode.com for details how to obtain an account
643
646
644 ; Appenlight integration enabled
647 ; Appenlight integration enabled
645 #appenlight = false
648 #appenlight = false
646
649
647 #appenlight.server_url = https://api.appenlight.com
650 #appenlight.server_url = https://api.appenlight.com
648 #appenlight.api_key = YOUR_API_KEY
651 #appenlight.api_key = YOUR_API_KEY
649 #appenlight.transport_config = https://api.appenlight.com?threaded=1&timeout=5
652 #appenlight.transport_config = https://api.appenlight.com?threaded=1&timeout=5
650
653
651 ; used for JS client
654 ; used for JS client
652 #appenlight.api_public_key = YOUR_API_PUBLIC_KEY
655 #appenlight.api_public_key = YOUR_API_PUBLIC_KEY
653
656
654 ; TWEAK AMOUNT OF INFO SENT HERE
657 ; TWEAK AMOUNT OF INFO SENT HERE
655
658
656 ; enables 404 error logging (default False)
659 ; enables 404 error logging (default False)
657 #appenlight.report_404 = false
660 #appenlight.report_404 = false
658
661
659 ; time in seconds after request is considered being slow (default 1)
662 ; time in seconds after request is considered being slow (default 1)
660 #appenlight.slow_request_time = 1
663 #appenlight.slow_request_time = 1
661
664
662 ; record slow requests in application
665 ; record slow requests in application
663 ; (needs to be enabled for slow datastore recording and time tracking)
666 ; (needs to be enabled for slow datastore recording and time tracking)
664 #appenlight.slow_requests = true
667 #appenlight.slow_requests = true
665
668
666 ; enable hooking to application loggers
669 ; enable hooking to application loggers
667 #appenlight.logging = true
670 #appenlight.logging = true
668
671
669 ; minimum log level for log capture
672 ; minimum log level for log capture
670 #ppenlight.logging.level = WARNING
673 #ppenlight.logging.level = WARNING
671
674
672 ; send logs only from erroneous/slow requests
675 ; send logs only from erroneous/slow requests
673 ; (saves API quota for intensive logging)
676 ; (saves API quota for intensive logging)
674 #appenlight.logging_on_error = false
677 #appenlight.logging_on_error = false
675
678
676 ; list of additional keywords that should be grabbed from environ object
679 ; list of additional keywords that should be grabbed from environ object
677 ; can be string with comma separated list of words in lowercase
680 ; can be string with comma separated list of words in lowercase
678 ; (by default client will always send following info:
681 ; (by default client will always send following info:
679 ; 'REMOTE_USER', 'REMOTE_ADDR', 'SERVER_NAME', 'CONTENT_TYPE' + all keys that
682 ; 'REMOTE_USER', 'REMOTE_ADDR', 'SERVER_NAME', 'CONTENT_TYPE' + all keys that
680 ; start with HTTP* this list be extended with additional keywords here
683 ; start with HTTP* this list be extended with additional keywords here
681 #appenlight.environ_keys_whitelist =
684 #appenlight.environ_keys_whitelist =
682
685
683 ; list of keywords that should be blanked from request object
686 ; list of keywords that should be blanked from request object
684 ; can be string with comma separated list of words in lowercase
687 ; can be string with comma separated list of words in lowercase
685 ; (by default client will always blank keys that contain following words
688 ; (by default client will always blank keys that contain following words
686 ; 'password', 'passwd', 'pwd', 'auth_tkt', 'secret', 'csrf'
689 ; 'password', 'passwd', 'pwd', 'auth_tkt', 'secret', 'csrf'
687 ; this list be extended with additional keywords set here
690 ; this list be extended with additional keywords set here
688 #appenlight.request_keys_blacklist =
691 #appenlight.request_keys_blacklist =
689
692
690 ; list of namespaces that should be ignores when gathering log entries
693 ; list of namespaces that should be ignores when gathering log entries
691 ; can be string with comma separated list of namespaces
694 ; can be string with comma separated list of namespaces
692 ; (by default the client ignores own entries: appenlight_client.client)
695 ; (by default the client ignores own entries: appenlight_client.client)
693 #appenlight.log_namespace_blacklist =
696 #appenlight.log_namespace_blacklist =
694
697
695 ; Statsd client config, this is used to send metrics to statsd
698 ; Statsd client config, this is used to send metrics to statsd
696 ; We recommend setting statsd_exported and scrape them using Prometheus
699 ; We recommend setting statsd_exported and scrape them using Prometheus
697 #statsd.enabled = false
700 #statsd.enabled = false
698 #statsd.statsd_host = 0.0.0.0
701 #statsd.statsd_host = 0.0.0.0
699 #statsd.statsd_port = 8125
702 #statsd.statsd_port = 8125
700 #statsd.statsd_prefix =
703 #statsd.statsd_prefix =
701 #statsd.statsd_ipv6 = false
704 #statsd.statsd_ipv6 = false
702
705
703 ; configure logging automatically at server startup set to false
706 ; configure logging automatically at server startup set to false
704 ; to use the below custom logging config.
707 ; to use the below custom logging config.
705 ; RC_LOGGING_FORMATTER
708 ; RC_LOGGING_FORMATTER
706 ; RC_LOGGING_LEVEL
709 ; RC_LOGGING_LEVEL
707 ; env variables can control the settings for logging in case of autoconfigure
710 ; env variables can control the settings for logging in case of autoconfigure
708
711
709 #logging.autoconfigure = true
712 #logging.autoconfigure = true
710
713
711 ; specify your own custom logging config file to configure logging
714 ; specify your own custom logging config file to configure logging
712 #logging.logging_conf_file = /path/to/custom_logging.ini
715 #logging.logging_conf_file = /path/to/custom_logging.ini
713
716
714 ; Dummy marker to add new entries after.
717 ; Dummy marker to add new entries after.
715 ; Add any custom entries below. Please don't remove this marker.
718 ; Add any custom entries below. Please don't remove this marker.
716 custom.conf = 1
719 custom.conf = 1
717
720
718
721
719 ; #####################
722 ; #####################
720 ; LOGGING CONFIGURATION
723 ; LOGGING CONFIGURATION
721 ; #####################
724 ; #####################
722
725
723 [loggers]
726 [loggers]
724 keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper
727 keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper
725
728
726 [handlers]
729 [handlers]
727 keys = console, console_sql
730 keys = console, console_sql
728
731
729 [formatters]
732 [formatters]
730 keys = generic, json, color_formatter, color_formatter_sql
733 keys = generic, json, color_formatter, color_formatter_sql
731
734
732 ; #######
735 ; #######
733 ; LOGGERS
736 ; LOGGERS
734 ; #######
737 ; #######
735 [logger_root]
738 [logger_root]
736 level = NOTSET
739 level = NOTSET
737 handlers = console
740 handlers = console
738
741
739 [logger_sqlalchemy]
742 [logger_sqlalchemy]
740 level = INFO
743 level = INFO
741 handlers = console_sql
744 handlers = console_sql
742 qualname = sqlalchemy.engine
745 qualname = sqlalchemy.engine
743 propagate = 0
746 propagate = 0
744
747
745 [logger_beaker]
748 [logger_beaker]
746 level = DEBUG
749 level = DEBUG
747 handlers =
750 handlers =
748 qualname = beaker.container
751 qualname = beaker.container
749 propagate = 1
752 propagate = 1
750
753
751 [logger_rhodecode]
754 [logger_rhodecode]
752 level = DEBUG
755 level = DEBUG
753 handlers =
756 handlers =
754 qualname = rhodecode
757 qualname = rhodecode
755 propagate = 1
758 propagate = 1
756
759
757 [logger_ssh_wrapper]
760 [logger_ssh_wrapper]
758 level = DEBUG
761 level = DEBUG
759 handlers =
762 handlers =
760 qualname = ssh_wrapper
763 qualname = ssh_wrapper
761 propagate = 1
764 propagate = 1
762
765
763 [logger_celery]
766 [logger_celery]
764 level = DEBUG
767 level = DEBUG
765 handlers =
768 handlers =
766 qualname = celery
769 qualname = celery
767
770
768
771
769 ; ########
772 ; ########
770 ; HANDLERS
773 ; HANDLERS
771 ; ########
774 ; ########
772
775
773 [handler_console]
776 [handler_console]
774 class = StreamHandler
777 class = StreamHandler
775 args = (sys.stderr, )
778 args = (sys.stderr, )
776 level = INFO
779 level = INFO
777 ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json'
780 ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json'
778 ; This allows sending properly formatted logs to grafana loki or elasticsearch
781 ; This allows sending properly formatted logs to grafana loki or elasticsearch
779 formatter = generic
782 formatter = generic
780
783
781 [handler_console_sql]
784 [handler_console_sql]
782 ; "level = DEBUG" logs SQL queries and results.
785 ; "level = DEBUG" logs SQL queries and results.
783 ; "level = INFO" logs SQL queries.
786 ; "level = INFO" logs SQL queries.
784 ; "level = WARN" logs neither. (Recommended for production systems.)
787 ; "level = WARN" logs neither. (Recommended for production systems.)
785 class = StreamHandler
788 class = StreamHandler
786 args = (sys.stderr, )
789 args = (sys.stderr, )
787 level = WARN
790 level = WARN
788 ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json'
791 ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json'
789 ; This allows sending properly formatted logs to grafana loki or elasticsearch
792 ; This allows sending properly formatted logs to grafana loki or elasticsearch
790 formatter = generic
793 formatter = generic
791
794
792 ; ##########
795 ; ##########
793 ; FORMATTERS
796 ; FORMATTERS
794 ; ##########
797 ; ##########
795
798
796 [formatter_generic]
799 [formatter_generic]
797 class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter
800 class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter
798 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
801 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
799 datefmt = %Y-%m-%d %H:%M:%S
802 datefmt = %Y-%m-%d %H:%M:%S
800
803
801 [formatter_color_formatter]
804 [formatter_color_formatter]
802 class = rhodecode.lib.logging_formatter.ColorFormatter
805 class = rhodecode.lib.logging_formatter.ColorFormatter
803 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
806 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
804 datefmt = %Y-%m-%d %H:%M:%S
807 datefmt = %Y-%m-%d %H:%M:%S
805
808
806 [formatter_color_formatter_sql]
809 [formatter_color_formatter_sql]
807 class = rhodecode.lib.logging_formatter.ColorFormatterSql
810 class = rhodecode.lib.logging_formatter.ColorFormatterSql
808 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
811 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
809 datefmt = %Y-%m-%d %H:%M:%S
812 datefmt = %Y-%m-%d %H:%M:%S
810
813
811 [formatter_json]
814 [formatter_json]
812 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
815 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
813 class = rhodecode.lib._vendor.jsonlogger.JsonFormatter
816 class = rhodecode.lib._vendor.jsonlogger.JsonFormatter
General Comments 0
You need to be logged in to leave comments. Login now