##// END OF EJS Templates
configs: moved gunicorn configs to python files
super-admin -
r5125:d0f694ba default
parent child Browse files
Show More
@@ -1,933 +1,863 b''
1 1
2 2
3 3 ; #########################################
4 4 ; RHODECODE COMMUNITY EDITION CONFIGURATION
5 5 ; #########################################
6 6
7 7 [DEFAULT]
8 8 ; Debug flag sets all loggers to debug, and enables request tracking
9 9 debug = true
10 10
11 11 ; ########################################################################
12 12 ; EMAIL CONFIGURATION
13 13 ; These settings will be used by the RhodeCode mailing system
14 14 ; ########################################################################
15 15
16 16 ; prefix all emails subjects with given prefix, helps filtering out emails
17 17 #email_prefix = [RhodeCode]
18 18
19 19 ; email FROM address all mails will be sent
20 20 #app_email_from = rhodecode-noreply@localhost
21 21
22 22 #smtp_server = mail.server.com
23 23 #smtp_username =
24 24 #smtp_password =
25 25 #smtp_port =
26 26 #smtp_use_tls = false
27 27 #smtp_use_ssl = true
28 28
29 29 [server:main]
30 ; COMMON HOST/IP CONFIG
30 ; COMMON HOST/IP CONFIG, This applies mostly to develop setup,
31 ; Host port for gunicorn are controlled by gunicorn_conf.py
31 32 host = 127.0.0.1
32 port = 5000
33 port = 10020
33 34
34 35 ; ##################################################
35 36 ; WAITRESS WSGI SERVER - Recommended for Development
36 37 ; ##################################################
37 38
38 39 ; use server type
39 40 use = egg:waitress#main
40 41
41 42 ; number of worker threads
42 43 threads = 5
43 44
44 45 ; MAX BODY SIZE 100GB
45 46 max_request_body_size = 107374182400
46 47
47 48 ; Use poll instead of select, fixes file descriptors limits problems.
48 49 ; May not work on old windows systems.
49 50 asyncore_use_poll = true
50 51
51 52
52 53 ; ###########################
53 54 ; GUNICORN APPLICATION SERVER
54 55 ; ###########################
55 56
56 ; run with gunicorn --log-config rhodecode.ini --paste rhodecode.ini
57 ; run with gunicorn --paste rhodecode.ini --config gunicorn_conf.py
57 58
58 59 ; Module to use, this setting shouldn't be changed
59 60 #use = egg:gunicorn#main
60 61
61 ; Sets the number of process workers. More workers means more concurrent connections
62 ; RhodeCode can handle at the same time. Each additional worker also it increases
63 ; memory usage as each has it's own set of caches.
64 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
65 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
66 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
67 ; when using more than 1 worker.
68 #workers = 2
69
70 ; Gunicorn access log level
71 #loglevel = info
72
73 ; Process name visible in process list
74 #proc_name = rhodecode
75
76 ; Type of worker class, one of `sync`, `gevent`
77 ; Recommended type is `gevent`
78 #worker_class = gevent
79
80 ; The maximum number of simultaneous clients per worker. Valid only for gevent
81 #worker_connections = 10
82
83 ; The maximum number of pending connections worker will queue to handle
84 #backlog = 64
85
86 ; Max number of requests that worker will handle before being gracefully restarted.
87 ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
88 #max_requests = 1000
89 #max_requests_jitter = 30
90
91 ; Amount of time a worker can spend with handling a request before it
92 ; gets killed and restarted. By default set to 21600 (6hrs)
93 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
94 #timeout = 21600
95
96 ; The maximum size of HTTP request line in bytes.
97 ; 0 for unlimited
98 #limit_request_line = 0
99
100 ; Limit the number of HTTP headers fields in a request.
101 ; By default this value is 100 and can't be larger than 32768.
102 #limit_request_fields = 32768
103
104 ; Limit the allowed size of an HTTP request header field.
105 ; Value is a positive number or 0.
106 ; Setting it to 0 will allow unlimited header field sizes.
107 #limit_request_field_size = 0
108
109 ; Timeout for graceful workers restart.
110 ; After receiving a restart signal, workers have this much time to finish
111 ; serving requests. Workers still alive after the timeout (starting from the
112 ; receipt of the restart signal) are force killed.
113 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
114 #graceful_timeout = 3600
115
116 # The number of seconds to wait for requests on a Keep-Alive connection.
117 # Generally set in the 1-5 seconds range.
118 #keepalive = 2
119
120 ; Maximum memory usage that each worker can use before it will receive a
121 ; graceful restart signal 0 = memory monitoring is disabled
122 ; Examples: 268435456 (256MB), 536870912 (512MB)
123 ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
124 #memory_max_usage = 0
125
126 ; How often in seconds to check for memory usage for each gunicorn worker
127 #memory_usage_check_interval = 60
128
129 ; Threshold value for which we don't recycle worker if GarbageCollection
130 ; frees up enough resources. Before each restart we try to run GC on worker
131 ; in case we get enough free memory after that, restart will not happen.
132 #memory_usage_recovery_threshold = 0.8
133
134
135 62 ; Prefix middleware for RhodeCode.
136 63 ; recommended when using proxy setup.
137 64 ; allows to set RhodeCode under a prefix in server.
138 65 ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well.
139 66 ; And set your prefix like: `prefix = /custom_prefix`
140 67 ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need
141 68 ; to make your cookies only work on prefix url
142 69 [filter:proxy-prefix]
143 70 use = egg:PasteDeploy#prefix
144 71 prefix = /
145 72
146 73 [app:main]
147 74 ; The %(here)s variable will be replaced with the absolute path of parent directory
148 75 ; of this file
149 76 ; Each option in the app:main can be override by an environmental variable
150 77 ;
151 78 ;To override an option:
152 79 ;
153 80 ;RC_<KeyName>
154 81 ;Everything should be uppercase, . and - should be replaced by _.
155 82 ;For example, if you have these configuration settings:
156 83 ;rc_cache.repo_object.backend = foo
157 84 ;can be overridden by
158 85 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
159 86
160 87 use = egg:rhodecode-enterprise-ce
161 88
162 89 ; enable proxy prefix middleware, defined above
163 90 #filter-with = proxy-prefix
164 91
165 92 ; #############
166 93 ; DEBUG OPTIONS
167 94 ; #############
168 95
169 96 pyramid.reload_templates = true
170 97
171 98 # During development the we want to have the debug toolbar enabled
172 99 pyramid.includes =
173 100 pyramid_debugtoolbar
174 101
175 102 debugtoolbar.hosts = 0.0.0.0/0
176 103 debugtoolbar.exclude_prefixes =
177 104 /css
178 105 /fonts
179 106 /images
180 107 /js
181 108
182 109 ## RHODECODE PLUGINS ##
183 110 rhodecode.includes =
184 111 rhodecode.api
185 112
186 113
187 114 # api prefix url
188 115 rhodecode.api.url = /_admin/api
189 116
190 117 ; enable debug style page
191 118 debug_style = true
192 119
193 120 ; #################
194 121 ; END DEBUG OPTIONS
195 122 ; #################
196 123
197 124 ; encryption key used to encrypt social plugin tokens,
198 125 ; remote_urls with credentials etc, if not set it defaults to
199 126 ; `beaker.session.secret`
200 127 #rhodecode.encrypted_values.secret =
201 128
202 129 ; decryption strict mode (enabled by default). It controls if decryption raises
203 130 ; `SignatureVerificationError` in case of wrong key, or damaged encryption data.
204 131 #rhodecode.encrypted_values.strict = false
205 132
206 133 ; Pick algorithm for encryption. Either fernet (more secure) or aes (default)
207 134 ; fernet is safer, and we strongly recommend switching to it.
208 135 ; Due to backward compatibility aes is used as default.
209 136 #rhodecode.encrypted_values.algorithm = fernet
210 137
211 138 ; Return gzipped responses from RhodeCode (static files/application)
212 139 gzip_responses = false
213 140
214 141 ; Auto-generate javascript routes file on startup
215 142 generate_js_files = false
216 143
217 144 ; System global default language.
218 145 ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh
219 146 lang = en
220 147
221 148 ; Perform a full repository scan and import on each server start.
222 149 ; Settings this to true could lead to very long startup time.
223 150 startup.import_repos = false
224 151
225 152 ; URL at which the application is running. This is used for Bootstrapping
226 153 ; requests in context when no web request is available. Used in ishell, or
227 154 ; SSH calls. Set this for events to receive proper url for SSH calls.
228 155 app.base_url = http://rhodecode.local
229 156
230 157 ; Unique application ID. Should be a random unique string for security.
231 158 app_instance_uuid = rc-production
232 159
233 160 ; Cut off limit for large diffs (size in bytes). If overall diff size on
234 161 ; commit, or pull request exceeds this limit this diff will be displayed
235 162 ; partially. E.g 512000 == 512Kb
236 163 cut_off_limit_diff = 512000
237 164
238 165 ; Cut off limit for large files inside diffs (size in bytes). Each individual
239 166 ; file inside diff which exceeds this limit will be displayed partially.
240 167 ; E.g 128000 == 128Kb
241 168 cut_off_limit_file = 128000
242 169
243 170 ; Use cached version of vcs repositories everywhere. Recommended to be `true`
244 171 vcs_full_cache = true
245 172
246 173 ; Force https in RhodeCode, fixes https redirects, assumes it's always https.
247 174 ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache
248 175 force_https = false
249 176
250 177 ; use Strict-Transport-Security headers
251 178 use_htsts = false
252 179
253 180 ; Set to true if your repos are exposed using the dumb protocol
254 181 git_update_server_info = false
255 182
256 183 ; RSS/ATOM feed options
257 184 rss_cut_off_limit = 256000
258 185 rss_items_per_page = 10
259 186 rss_include_diff = false
260 187
261 188 ; gist URL alias, used to create nicer urls for gist. This should be an
262 189 ; url that does rewrites to _admin/gists/{gistid}.
263 190 ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal
264 191 ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid}
265 192 gist_alias_url =
266 193
267 194 ; List of views (using glob pattern syntax) that AUTH TOKENS could be
268 195 ; used for access.
269 196 ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it
270 197 ; came from the the logged in user who own this authentication token.
271 198 ; Additionally @TOKEN syntax can be used to bound the view to specific
272 199 ; authentication token. Such view would be only accessible when used together
273 200 ; with this authentication token
274 201 ; list of all views can be found under `/_admin/permissions/auth_token_access`
275 202 ; The list should be "," separated and on a single line.
276 203 ; Most common views to enable:
277 204
278 205 # RepoCommitsView:repo_commit_download
279 206 # RepoCommitsView:repo_commit_patch
280 207 # RepoCommitsView:repo_commit_raw
281 208 # RepoCommitsView:repo_commit_raw@TOKEN
282 209 # RepoFilesView:repo_files_diff
283 210 # RepoFilesView:repo_archivefile
284 211 # RepoFilesView:repo_file_raw
285 212 # GistView:*
286 213 api_access_controllers_whitelist =
287 214
288 215 ; Default encoding used to convert from and to unicode
289 216 ; can be also a comma separated list of encoding in case of mixed encodings
290 217 default_encoding = UTF-8
291 218
292 219 ; instance-id prefix
293 220 ; a prefix key for this instance used for cache invalidation when running
294 221 ; multiple instances of RhodeCode, make sure it's globally unique for
295 222 ; all running RhodeCode instances. Leave empty if you don't use it
296 223 instance_id =
297 224
298 225 ; Fallback authentication plugin. Set this to a plugin ID to force the usage
299 226 ; of an authentication plugin also if it is disabled by it's settings.
300 227 ; This could be useful if you are unable to log in to the system due to broken
301 228 ; authentication settings. Then you can enable e.g. the internal RhodeCode auth
302 229 ; module to log in again and fix the settings.
303 230 ; Available builtin plugin IDs (hash is part of the ID):
304 231 ; egg:rhodecode-enterprise-ce#rhodecode
305 232 ; egg:rhodecode-enterprise-ce#pam
306 233 ; egg:rhodecode-enterprise-ce#ldap
307 234 ; egg:rhodecode-enterprise-ce#jasig_cas
308 235 ; egg:rhodecode-enterprise-ce#headers
309 236 ; egg:rhodecode-enterprise-ce#crowd
310 237
311 238 #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode
312 239
313 240 ; Flag to control loading of legacy plugins in py:/path format
314 241 auth_plugin.import_legacy_plugins = true
315 242
316 243 ; alternative return HTTP header for failed authentication. Default HTTP
317 244 ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with
318 245 ; handling that causing a series of failed authentication calls.
319 246 ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code
320 247 ; This will be served instead of default 401 on bad authentication
321 248 auth_ret_code =
322 249
323 250 ; use special detection method when serving auth_ret_code, instead of serving
324 251 ; ret_code directly, use 401 initially (Which triggers credentials prompt)
325 252 ; and then serve auth_ret_code to clients
326 253 auth_ret_code_detection = false
327 254
328 255 ; locking return code. When repository is locked return this HTTP code. 2XX
329 256 ; codes don't break the transactions while 4XX codes do
330 257 lock_ret_code = 423
331 258
332 259 ; allows to change the repository location in settings page
333 260 allow_repo_location_change = true
334 261
335 262 ; allows to setup custom hooks in settings page
336 263 allow_custom_hooks_settings = true
337 264
338 265 ; Generated license token required for EE edition license.
339 266 ; New generated token value can be found in Admin > settings > license page.
340 267 license_token =
341 268
342 269 ; This flag hides sensitive information on the license page such as token, and license data
343 270 license.hide_license_info = false
344 271
345 272 ; supervisor connection uri, for managing supervisor and logs.
346 273 supervisor.uri =
347 274
348 275 ; supervisord group name/id we only want this RC instance to handle
349 276 supervisor.group_id = dev
350 277
351 278 ; Display extended labs settings
352 279 labs_settings_active = true
353 280
354 281 ; Custom exception store path, defaults to TMPDIR
355 282 ; This is used to store exception from RhodeCode in shared directory
356 283 #exception_tracker.store_path =
357 284
358 285 ; Send email with exception details when it happens
359 286 #exception_tracker.send_email = false
360 287
361 288 ; Comma separated list of recipients for exception emails,
362 289 ; e.g admin@rhodecode.com,devops@rhodecode.com
363 290 ; Can be left empty, then emails will be sent to ALL super-admins
364 291 #exception_tracker.send_email_recipients =
365 292
366 293 ; optional prefix to Add to email Subject
367 294 #exception_tracker.email_prefix = [RHODECODE ERROR]
368 295
369 296 ; File store configuration. This is used to store and serve uploaded files
370 297 file_store.enabled = true
371 298
372 299 ; Storage backend, available options are: local
373 300 file_store.backend = local
374 301
375 302 ; path to store the uploaded binaries
376 303 file_store.storage_path = %(here)s/data/file_store
377 304
378 305 ; Uncomment and set this path to control settings for archive download cache.
379 306 ; Generated repo archives will be cached at this location
380 307 ; and served from the cache during subsequent requests for the same archive of
381 308 ; the repository. This path is important to be shared across filesystems and with
382 309 ; RhodeCode and vcsserver
383 310
384 311 ; Default is $cache_dir/archive_cache if not set
385 312 archive_cache.store_dir = %(here)s/data/archive_cache
386 313
387 314 ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb
388 315 archive_cache.cache_size_gb = 10
389 316
390 317 ; By default cache uses sharding technique, this specifies how many shards are there
391 318 archive_cache.cache_shards = 10
392 319
393 320 ; #############
394 321 ; CELERY CONFIG
395 322 ; #############
396 323
397 324 ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini
398 325
399 326 use_celery = false
400 327
401 328 ; path to store schedule database
402 329 #celerybeat-schedule.path =
403 330
404 331 ; connection url to the message broker (default redis)
405 332 celery.broker_url = redis://localhost:6379/8
406 333
407 334 ; rabbitmq example
408 335 #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost
409 336
410 337 ; maximum tasks to execute before worker restart
411 338 celery.max_tasks_per_child = 20
412 339
413 340 ; tasks will never be sent to the queue, but executed locally instead.
414 341 celery.task_always_eager = false
415 342
416 343 ; #############
417 344 ; DOGPILE CACHE
418 345 ; #############
419 346
420 347 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
421 348 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
422 349 cache_dir = %(here)s/data
423 350
424 351 ; *********************************************
425 352 ; `sql_cache_short` cache for heavy SQL queries
426 353 ; Only supported backend is `memory_lru`
427 354 ; *********************************************
428 355 rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru
429 356 rc_cache.sql_cache_short.expiration_time = 30
430 357
431 358
432 359 ; *****************************************************
433 360 ; `cache_repo_longterm` cache for repo object instances
434 361 ; Only supported backend is `memory_lru`
435 362 ; *****************************************************
436 363 rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru
437 364 ; by default we use 30 Days, cache is still invalidated on push
438 365 rc_cache.cache_repo_longterm.expiration_time = 2592000
439 366 ; max items in LRU cache, set to smaller number to save memory, and expire last used caches
440 367 rc_cache.cache_repo_longterm.max_size = 10000
441 368
442 369
443 370 ; *********************************************
444 371 ; `cache_general` cache for general purpose use
445 372 ; for simplicity use rc.file_namespace backend,
446 373 ; for performance and scale use rc.redis
447 374 ; *********************************************
448 375 rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace
449 376 rc_cache.cache_general.expiration_time = 43200
450 377 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
451 378 #rc_cache.cache_general.arguments.filename = /tmp/cache_general_db
452 379
453 380 ; alternative `cache_general` redis backend with distributed lock
454 381 #rc_cache.cache_general.backend = dogpile.cache.rc.redis
455 382 #rc_cache.cache_general.expiration_time = 300
456 383
457 384 ; redis_expiration_time needs to be greater then expiration_time
458 385 #rc_cache.cache_general.arguments.redis_expiration_time = 7200
459 386
460 387 #rc_cache.cache_general.arguments.host = localhost
461 388 #rc_cache.cache_general.arguments.port = 6379
462 389 #rc_cache.cache_general.arguments.db = 0
463 390 #rc_cache.cache_general.arguments.socket_timeout = 30
464 391 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
465 392 #rc_cache.cache_general.arguments.distributed_lock = true
466 393
467 394 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
468 395 #rc_cache.cache_general.arguments.lock_auto_renewal = true
469 396
470 397 ; *************************************************
471 398 ; `cache_perms` cache for permission tree, auth TTL
472 399 ; for simplicity use rc.file_namespace backend,
473 400 ; for performance and scale use rc.redis
474 401 ; *************************************************
475 402 rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace
476 403 rc_cache.cache_perms.expiration_time = 3600
477 404 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
478 405 #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms_db
479 406
480 407 ; alternative `cache_perms` redis backend with distributed lock
481 408 #rc_cache.cache_perms.backend = dogpile.cache.rc.redis
482 409 #rc_cache.cache_perms.expiration_time = 300
483 410
484 411 ; redis_expiration_time needs to be greater then expiration_time
485 412 #rc_cache.cache_perms.arguments.redis_expiration_time = 7200
486 413
487 414 #rc_cache.cache_perms.arguments.host = localhost
488 415 #rc_cache.cache_perms.arguments.port = 6379
489 416 #rc_cache.cache_perms.arguments.db = 0
490 417 #rc_cache.cache_perms.arguments.socket_timeout = 30
491 418 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
492 419 #rc_cache.cache_perms.arguments.distributed_lock = true
493 420
494 421 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
495 422 #rc_cache.cache_perms.arguments.lock_auto_renewal = true
496 423
497 424 ; ***************************************************
498 425 ; `cache_repo` cache for file tree, Readme, RSS FEEDS
499 426 ; for simplicity use rc.file_namespace backend,
500 427 ; for performance and scale use rc.redis
501 428 ; ***************************************************
502 429 rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace
503 430 rc_cache.cache_repo.expiration_time = 2592000
504 431 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
505 432 #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo_db
506 433
507 434 ; alternative `cache_repo` redis backend with distributed lock
508 435 #rc_cache.cache_repo.backend = dogpile.cache.rc.redis
509 436 #rc_cache.cache_repo.expiration_time = 2592000
510 437
511 438 ; redis_expiration_time needs to be greater then expiration_time
512 439 #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400
513 440
514 441 #rc_cache.cache_repo.arguments.host = localhost
515 442 #rc_cache.cache_repo.arguments.port = 6379
516 443 #rc_cache.cache_repo.arguments.db = 1
517 444 #rc_cache.cache_repo.arguments.socket_timeout = 30
518 445 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
519 446 #rc_cache.cache_repo.arguments.distributed_lock = true
520 447
521 448 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
522 449 #rc_cache.cache_repo.arguments.lock_auto_renewal = true
523 450
524 451 ; ##############
525 452 ; BEAKER SESSION
526 453 ; ##############
527 454
528 455 ; beaker.session.type is type of storage options for the logged users sessions. Current allowed
529 456 ; types are file, ext:redis, ext:database, ext:memcached, and memory (default if not specified).
530 457 ; Fastest ones are Redis and ext:database
531 458 beaker.session.type = file
532 459 beaker.session.data_dir = %(here)s/data/sessions
533 460
534 461 ; Redis based sessions
535 462 #beaker.session.type = ext:redis
536 463 #beaker.session.url = redis://127.0.0.1:6379/2
537 464
538 465 ; DB based session, fast, and allows easy management over logged in users
539 466 #beaker.session.type = ext:database
540 467 #beaker.session.table_name = db_session
541 468 #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode
542 469 #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode
543 470 #beaker.session.sa.pool_recycle = 3600
544 471 #beaker.session.sa.echo = false
545 472
546 473 beaker.session.key = rhodecode
547 474 beaker.session.secret = develop-rc-uytcxaz
548 475 beaker.session.lock_dir = %(here)s/data/sessions/lock
549 476
550 477 ; Secure encrypted cookie. Requires AES and AES python libraries
551 478 ; you must disable beaker.session.secret to use this
552 479 #beaker.session.encrypt_key = key_for_encryption
553 480 #beaker.session.validate_key = validation_key
554 481
555 482 ; Sets session as invalid (also logging out user) if it haven not been
556 483 ; accessed for given amount of time in seconds
557 484 beaker.session.timeout = 2592000
558 485 beaker.session.httponly = true
559 486
560 487 ; Path to use for the cookie. Set to prefix if you use prefix middleware
561 488 #beaker.session.cookie_path = /custom_prefix
562 489
563 490 ; Set https secure cookie
564 491 beaker.session.secure = false
565 492
566 493 ; default cookie expiration time in seconds, set to `true` to set expire
567 494 ; at browser close
568 495 #beaker.session.cookie_expires = 3600
569 496
570 497 ; #############################
571 498 ; SEARCH INDEXING CONFIGURATION
572 499 ; #############################
573 500
574 501 ; Full text search indexer is available in rhodecode-tools under
575 502 ; `rhodecode-tools index` command
576 503
577 504 ; WHOOSH Backend, doesn't require additional services to run
578 505 ; it works good with few dozen repos
579 506 search.module = rhodecode.lib.index.whoosh
580 507 search.location = %(here)s/data/index
581 508
582 509 ; ####################
583 510 ; CHANNELSTREAM CONFIG
584 511 ; ####################
585 512
586 513 ; channelstream enables persistent connections and live notification
587 514 ; in the system. It's also used by the chat system
588 515
589 516 channelstream.enabled = false
590 517
591 518 ; server address for channelstream server on the backend
592 519 channelstream.server = 127.0.0.1:9800
593 520
594 521 ; location of the channelstream server from outside world
595 522 ; use ws:// for http or wss:// for https. This address needs to be handled
596 523 ; by external HTTP server such as Nginx or Apache
597 524 ; see Nginx/Apache configuration examples in our docs
598 525 channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream
599 526 channelstream.secret = secret
600 527 channelstream.history.location = %(here)s/channelstream_history
601 528
602 529 ; Internal application path that Javascript uses to connect into.
603 530 ; If you use proxy-prefix the prefix should be added before /_channelstream
604 531 channelstream.proxy_path = /_channelstream
605 532
606 533
607 534 ; ##############################
608 535 ; MAIN RHODECODE DATABASE CONFIG
609 536 ; ##############################
610 537
611 538 #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
612 539 #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
613 540 #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8
614 541 ; pymysql is an alternative driver for MySQL, use in case of problems with default one
615 542 #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode
616 543
617 544 sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
618 545
619 546 ; see sqlalchemy docs for other advanced settings
620 547 ; print the sql statements to output
621 548 sqlalchemy.db1.echo = false
622 549
623 550 ; recycle the connections after this amount of seconds
624 551 sqlalchemy.db1.pool_recycle = 3600
625 552
626 553 ; the number of connections to keep open inside the connection pool.
627 554 ; 0 indicates no limit
555 ; the general calculus with gevent is:
556 ; if your system allows 500 concurrent greenlets (max_connections) that all do database access,
557 ; then increase pool size + max overflow so that they add up to 500.
628 558 #sqlalchemy.db1.pool_size = 5
629 559
630 560 ; The number of connections to allow in connection pool "overflow", that is
631 561 ; connections that can be opened above and beyond the pool_size setting,
632 562 ; which defaults to five.
633 563 #sqlalchemy.db1.max_overflow = 10
634 564
635 565 ; Connection check ping, used to detect broken database connections
636 566 ; could be enabled to better handle cases if MySQL has gone away errors
637 567 #sqlalchemy.db1.ping_connection = true
638 568
639 569 ; ##########
640 570 ; VCS CONFIG
641 571 ; ##########
642 572 vcs.server.enable = true
643 573 vcs.server = localhost:9900
644 574
645 575 ; Web server connectivity protocol, responsible for web based VCS operations
646 576 ; Available protocols are:
647 577 ; `http` - use http-rpc backend (default)
648 578 vcs.server.protocol = http
649 579
650 580 ; Push/Pull operations protocol, available options are:
651 581 ; `http` - use http-rpc backend (default)
652 582 vcs.scm_app_implementation = http
653 583
654 584 ; Push/Pull operations hooks protocol, available options are:
655 585 ; `http` - use http-rpc backend (default)
656 586 vcs.hooks.protocol = http
657 587
658 588 ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be
659 589 ; accessible via network.
660 590 ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker)
661 591 vcs.hooks.host = *
662 592
663 593 ; Start VCSServer with this instance as a subprocess, useful for development
664 594 vcs.start_server = false
665 595
666 596 ; List of enabled VCS backends, available options are:
667 597 ; `hg` - mercurial
668 598 ; `git` - git
669 599 ; `svn` - subversion
670 600 vcs.backends = hg, git, svn
671 601
672 602 ; Wait this number of seconds before killing connection to the vcsserver
673 603 vcs.connection_timeout = 3600
674 604
675 605 ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out.
676 606 ; Set a numeric version for your current SVN e.g 1.8, or 1.12
677 607 ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible
678 608 #vcs.svn.compatible_version = 1.8
679 609
680 610 ; Cache flag to cache vcsserver remote calls locally
681 611 ; It uses cache_region `cache_repo`
682 612 vcs.methods.cache = true
683 613
684 614 ; ####################################################
685 615 ; Subversion proxy support (mod_dav_svn)
686 616 ; Maps RhodeCode repo groups into SVN paths for Apache
687 617 ; ####################################################
688 618
689 619 ; Enable or disable the config file generation.
690 620 svn.proxy.generate_config = false
691 621
692 622 ; Generate config file with `SVNListParentPath` set to `On`.
693 623 svn.proxy.list_parent_path = true
694 624
695 625 ; Set location and file name of generated config file.
696 626 svn.proxy.config_file_path = %(here)s/mod_dav_svn.conf
697 627
698 628 ; alternative mod_dav config template. This needs to be a valid mako template
699 629 ; Example template can be found in the source code:
700 630 ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako
701 631 #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako
702 632
703 633 ; Used as a prefix to the `Location` block in the generated config file.
704 634 ; In most cases it should be set to `/`.
705 635 svn.proxy.location_root = /
706 636
707 637 ; Command to reload the mod dav svn configuration on change.
708 638 ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh
709 639 ; Make sure user who runs RhodeCode process is allowed to reload Apache
710 640 #svn.proxy.reload_cmd = /etc/init.d/apache2 reload
711 641
712 642 ; If the timeout expires before the reload command finishes, the command will
713 643 ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds.
714 644 #svn.proxy.reload_timeout = 10
715 645
716 646 ; ####################
717 647 ; SSH Support Settings
718 648 ; ####################
719 649
720 650 ; Defines if a custom authorized_keys file should be created and written on
721 651 ; any change user ssh keys. Setting this to false also disables possibility
722 652 ; of adding SSH keys by users from web interface. Super admins can still
723 653 ; manage SSH Keys.
724 654 ssh.generate_authorized_keyfile = false
725 655
726 656 ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding`
727 657 # ssh.authorized_keys_ssh_opts =
728 658
729 659 ; Path to the authorized_keys file where the generate entries are placed.
730 660 ; It is possible to have multiple key files specified in `sshd_config` e.g.
731 661 ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode
732 662 ssh.authorized_keys_file_path = ~/.ssh/authorized_keys_rhodecode
733 663
734 664 ; Command to execute the SSH wrapper. The binary is available in the
735 665 ; RhodeCode installation directory.
736 666 ; e.g ~/.rccontrol/community-1/profile/bin/rc-ssh-wrapper
737 667 ssh.wrapper_cmd = ~/.rccontrol/community-1/rc-ssh-wrapper
738 668
739 669 ; Allow shell when executing the ssh-wrapper command
740 670 ssh.wrapper_cmd_allow_shell = false
741 671
742 672 ; Enables logging, and detailed output send back to the client during SSH
743 673 ; operations. Useful for debugging, shouldn't be used in production.
744 674 ssh.enable_debug_logging = true
745 675
746 676 ; Paths to binary executable, by default they are the names, but we can
747 677 ; override them if we want to use a custom one
748 678 ssh.executable.hg = ~/.rccontrol/vcsserver-1/profile/bin/hg
749 679 ssh.executable.git = ~/.rccontrol/vcsserver-1/profile/bin/git
750 680 ssh.executable.svn = ~/.rccontrol/vcsserver-1/profile/bin/svnserve
751 681
752 682 ; Enables SSH key generator web interface. Disabling this still allows users
753 683 ; to add their own keys.
754 684 ssh.enable_ui_key_generator = true
755 685
756 686
757 687 ; #################
758 688 ; APPENLIGHT CONFIG
759 689 ; #################
760 690
761 691 ; Appenlight is tailored to work with RhodeCode, see
762 692 ; http://appenlight.rhodecode.com for details how to obtain an account
763 693
764 694 ; Appenlight integration enabled
765 695 #appenlight = false
766 696
767 697 #appenlight.server_url = https://api.appenlight.com
768 698 #appenlight.api_key = YOUR_API_KEY
769 699 #appenlight.transport_config = https://api.appenlight.com?threaded=1&timeout=5
770 700
771 701 ; used for JS client
772 702 #appenlight.api_public_key = YOUR_API_PUBLIC_KEY
773 703
774 704 ; TWEAK AMOUNT OF INFO SENT HERE
775 705
776 706 ; enables 404 error logging (default False)
777 707 #appenlight.report_404 = false
778 708
779 709 ; time in seconds after request is considered being slow (default 1)
780 710 #appenlight.slow_request_time = 1
781 711
782 712 ; record slow requests in application
783 713 ; (needs to be enabled for slow datastore recording and time tracking)
784 714 #appenlight.slow_requests = true
785 715
786 716 ; enable hooking to application loggers
787 717 #appenlight.logging = true
788 718
789 719 ; minimum log level for log capture
790 720 #ppenlight.logging.level = WARNING
791 721
792 722 ; send logs only from erroneous/slow requests
793 723 ; (saves API quota for intensive logging)
794 724 #appenlight.logging_on_error = false
795 725
796 726 ; list of additional keywords that should be grabbed from environ object
797 727 ; can be string with comma separated list of words in lowercase
798 728 ; (by default client will always send following info:
799 729 ; 'REMOTE_USER', 'REMOTE_ADDR', 'SERVER_NAME', 'CONTENT_TYPE' + all keys that
800 730 ; start with HTTP* this list be extended with additional keywords here
801 731 #appenlight.environ_keys_whitelist =
802 732
803 733 ; list of keywords that should be blanked from request object
804 734 ; can be string with comma separated list of words in lowercase
805 735 ; (by default client will always blank keys that contain following words
806 736 ; 'password', 'passwd', 'pwd', 'auth_tkt', 'secret', 'csrf'
807 737 ; this list be extended with additional keywords set here
808 738 #appenlight.request_keys_blacklist =
809 739
810 740 ; list of namespaces that should be ignores when gathering log entries
811 741 ; can be string with comma separated list of namespaces
812 742 ; (by default the client ignores own entries: appenlight_client.client)
813 743 #appenlight.log_namespace_blacklist =
814 744
815 745 ; Statsd client config, this is used to send metrics to statsd
816 746 ; We recommend setting statsd_exported and scrape them using Prometheus
817 747 #statsd.enabled = false
818 748 #statsd.statsd_host = 0.0.0.0
819 749 #statsd.statsd_port = 8125
820 750 #statsd.statsd_prefix =
821 751 #statsd.statsd_ipv6 = false
822 752
823 753 ; configure logging automatically at server startup set to false
824 754 ; to use the below custom logging config.
825 755 ; RC_LOGGING_FORMATTER
826 756 ; RC_LOGGING_LEVEL
827 757 ; env variables can control the settings for logging in case of autoconfigure
828 758
829 759 #logging.autoconfigure = true
830 760
831 761 ; specify your own custom logging config file to configure logging
832 762 #logging.logging_conf_file = /path/to/custom_logging.ini
833 763
834 764 ; Dummy marker to add new entries after.
835 765 ; Add any custom entries below. Please don't remove this marker.
836 766 custom.conf = 1
837 767
838 768
839 769 ; #####################
840 770 ; LOGGING CONFIGURATION
841 771 ; #####################
842 772
843 773 [loggers]
844 774 keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper
845 775
846 776 [handlers]
847 777 keys = console, console_sql
848 778
849 779 [formatters]
850 780 keys = generic, json, color_formatter, color_formatter_sql
851 781
852 782 ; #######
853 783 ; LOGGERS
854 784 ; #######
855 785 [logger_root]
856 786 level = NOTSET
857 787 handlers = console
858 788
859 789 [logger_sqlalchemy]
860 790 level = INFO
861 791 handlers = console_sql
862 792 qualname = sqlalchemy.engine
863 793 propagate = 0
864 794
865 795 [logger_beaker]
866 796 level = DEBUG
867 797 handlers =
868 798 qualname = beaker.container
869 799 propagate = 1
870 800
871 801 [logger_rhodecode]
872 802 level = DEBUG
873 803 handlers =
874 804 qualname = rhodecode
875 805 propagate = 1
876 806
877 807 [logger_ssh_wrapper]
878 808 level = DEBUG
879 809 handlers =
880 810 qualname = ssh_wrapper
881 811 propagate = 1
882 812
883 813 [logger_celery]
884 814 level = DEBUG
885 815 handlers =
886 816 qualname = celery
887 817
888 818
889 819 ; ########
890 820 ; HANDLERS
891 821 ; ########
892 822
893 823 [handler_console]
894 824 class = StreamHandler
895 825 args = (sys.stderr, )
896 826 level = DEBUG
897 827 ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json'
898 828 ; This allows sending properly formatted logs to grafana loki or elasticsearch
899 829 formatter = color_formatter
900 830
901 831 [handler_console_sql]
902 832 ; "level = DEBUG" logs SQL queries and results.
903 833 ; "level = INFO" logs SQL queries.
904 834 ; "level = WARN" logs neither. (Recommended for production systems.)
905 835 class = StreamHandler
906 836 args = (sys.stderr, )
907 837 level = WARN
908 838 ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json'
909 839 ; This allows sending properly formatted logs to grafana loki or elasticsearch
910 840 formatter = color_formatter_sql
911 841
912 842 ; ##########
913 843 ; FORMATTERS
914 844 ; ##########
915 845
916 846 [formatter_generic]
917 847 class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter
918 848 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
919 849 datefmt = %Y-%m-%d %H:%M:%S
920 850
921 851 [formatter_color_formatter]
922 852 class = rhodecode.lib.logging_formatter.ColorFormatter
923 853 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
924 854 datefmt = %Y-%m-%d %H:%M:%S
925 855
926 856 [formatter_color_formatter_sql]
927 857 class = rhodecode.lib.logging_formatter.ColorFormatterSql
928 858 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
929 859 datefmt = %Y-%m-%d %H:%M:%S
930 860
931 861 [formatter_json]
932 862 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
933 863 class = rhodecode.lib._vendor.jsonlogger.JsonFormatter
@@ -1,393 +1,506 b''
1 1 """
2 2 Gunicorn config extension and hooks. This config file adds some extra settings and memory management.
3 3 Gunicorn configuration should be managed by .ini files entries of RhodeCode or VCSServer
4 4 """
5 5
6 6 import gc
7 7 import os
8 8 import sys
9 9 import math
10 10 import time
11 11 import threading
12 12 import traceback
13 13 import random
14 14 import socket
15 import dataclasses
15 16 from gunicorn.glogging import Logger
16 17
17 18
18 19 def get_workers():
19 20 import multiprocessing
20 21 return multiprocessing.cpu_count() * 2 + 1
21 22
22 # GLOBAL
23
24 bind = "127.0.0.1:10020"
25
26
27 # Error logging output for gunicorn (-) is stdout
23 28 errorlog = '-'
29
30 # Access logging output for gunicorn (-) is stdout
24 31 accesslog = '-'
25 32
26 33
27 34 # SERVER MECHANICS
28 35 # None == system temp dir
29 36 # worker_tmp_dir is recommended to be set to some tmpfs
30 37 worker_tmp_dir = None
31 38 tmp_upload_dir = None
32 39
40 # use re-use port logic
33 41 #reuse_port = True
34 42
35 43 # Custom log format
36 44 #access_log_format = (
37 45 # '%(t)s %(p)s INFO [GNCRN] %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"')
38 46
39 47 # loki format for easier parsing in grafana
40 48 access_log_format = (
41 49 'time="%(t)s" pid=%(p)s level="INFO" type="[GNCRN]" ip="%(h)-15s" rqt="%(L)s" response_code="%(s)s" response_bytes="%(b)-6s" uri="%(m)s:%(U)s %(q)s" user=":%(u)s" user_agent="%(a)s"')
42 50
43 # self adjust workers based on CPU count
51
52 # Sets the number of process workers. More workers means more concurrent connections
53 # RhodeCode can handle at the same time. Each additional worker also it increases
54 # memory usage as each has it's own set of caches.
55 # Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
56 # than 8-10 unless for huge deployments .e.g 700-1000 users.
57 # `instance_id = *` must be set in the [app:main] section below (which is the default)
58 # when using more than 1 worker.
59 workers = 4
60
61 # self adjust workers based on CPU count, to use maximum of CPU and not overquota the resources
44 62 # workers = get_workers()
45 63
64 # Gunicorn access log level
65 loglevel = 'info'
66
67 # Process name visible in process list
68 proc_name = 'rhodecode_enterprise'
69
70 # Type of worker class, one of `sync`, `gevent`
71 # currently `sync` is the only option allowed.
72 worker_class = 'gevent'
73
74 # The maximum number of simultaneous clients. Valid only for gevent
75 worker_connections = 10
76
77 # Max number of requests that worker will handle before being gracefully restarted.
78 # Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
79 max_requests = 2000
80 max_requests_jitter = 30
81
82 # The maximum number of pending connections.
83 # Exceeding this number results in the client getting an error when attempting to connect.
84 backlog = 64
85
86 # Amount of time a worker can spend with handling a request before it
87 # gets killed and restarted. By default set to 21600 (6hrs)
88 # Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
89 timeout = 21600
90
91 # The maximum size of HTTP request line in bytes.
92 # 0 for unlimited
93 limit_request_line = 0
94
95 # Limit the number of HTTP headers fields in a request.
96 # By default this value is 100 and can't be larger than 32768.
97 limit_request_fields = 32768
98
99 # Limit the allowed size of an HTTP request header field.
100 # Value is a positive number or 0.
101 # Setting it to 0 will allow unlimited header field sizes.
102 limit_request_field_size = 0
103
104 # Timeout for graceful workers restart.
105 # After receiving a restart signal, workers have this much time to finish
106 # serving requests. Workers still alive after the timeout (starting from the
107 # receipt of the restart signal) are force killed.
108 # Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
109 graceful_timeout = 21600
110
111 # The number of seconds to wait for requests on a Keep-Alive connection.
112 # Generally set in the 1-5 seconds range.
113 keepalive = 2
114
115 # Maximum memory usage that each worker can use before it will receive a
116 # graceful restart signal 0 = memory monitoring is disabled
117 # Examples: 268435456 (256MB), 536870912 (512MB)
118 # 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
119 memory_max_usage = 0
120
121 # How often in seconds to check for memory usage for each gunicorn worker
122 memory_usage_check_interval = 60
123
124 # Threshold value for which we don't recycle worker if GarbageCollection
125 # frees up enough resources. Before each restart we try to run GC on worker
126 # in case we get enough free memory after that, restart will not happen.
127 memory_usage_recovery_threshold = 0.8
128
129
130 @dataclasses.dataclass
131 class MemoryCheckConfig:
132 max_usage: int
133 check_interval: int
134 recovery_threshold: float
135
46 136
47 137 def _get_process_rss(pid=None):
48 138 try:
49 139 import psutil
50 140 if pid:
51 141 proc = psutil.Process(pid)
52 142 else:
53 143 proc = psutil.Process()
54 144 return proc.memory_info().rss
55 145 except Exception:
56 146 return None
57 147
58 148
59 149 def _get_config(ini_path):
60 150 import configparser
61 151
62 152 try:
63 153 config = configparser.RawConfigParser()
64 154 config.read(ini_path)
65 155 return config
66 156 except Exception:
67 157 return None
68 158
69 159
70 def _time_with_offset(memory_usage_check_interval):
71 return time.time() - random.randint(0, memory_usage_check_interval/2.0)
72
73
74 def pre_fork(server, worker):
75 pass
76
160 def get_memory_usage_params(config=None):
161 # memory spec defaults
162 _memory_max_usage = memory_max_usage
163 _memory_usage_check_interval = memory_usage_check_interval
164 _memory_usage_recovery_threshold = memory_usage_recovery_threshold
77 165
78 def post_fork(server, worker):
79
80 # memory spec defaults
81 _memory_max_usage = 0
82 _memory_usage_check_interval = 60
83 _memory_usage_recovery_threshold = 0.8
84
85 ini_path = os.path.abspath(server.cfg.paste)
166 if config:
167 ini_path = os.path.abspath(config)
86 168 conf = _get_config(ini_path)
87 169
88 170 section = 'server:main'
89 171 if conf and conf.has_section(section):
90 172
91 173 if conf.has_option(section, 'memory_max_usage'):
92 174 _memory_max_usage = conf.getint(section, 'memory_max_usage')
93 175
94 176 if conf.has_option(section, 'memory_usage_check_interval'):
95 177 _memory_usage_check_interval = conf.getint(section, 'memory_usage_check_interval')
96 178
97 179 if conf.has_option(section, 'memory_usage_recovery_threshold'):
98 180 _memory_usage_recovery_threshold = conf.getfloat(section, 'memory_usage_recovery_threshold')
99 181
182 _memory_max_usage = int(os.environ.get('RC_GUNICORN_MEMORY_MAX_USAGE', '')
183 or _memory_max_usage)
184 _memory_usage_check_interval = int(os.environ.get('RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL', '')
185 or _memory_usage_check_interval)
186 _memory_usage_recovery_threshold = float(os.environ.get('RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD', '')
187 or _memory_usage_recovery_threshold)
188
189 return MemoryCheckConfig(_memory_max_usage, _memory_usage_check_interval, _memory_usage_recovery_threshold)
190
191
192 def _time_with_offset(check_interval):
193 return time.time() - random.randint(0, check_interval/2.0)
194
195
196 def pre_fork(server, worker):
197 pass
198
199
200 def post_fork(server, worker):
201
202 memory_conf = get_memory_usage_params()
203 _memory_max_usage = memory_conf.max_usage
204 _memory_usage_check_interval = memory_conf.check_interval
205 _memory_usage_recovery_threshold = memory_conf.recovery_threshold
206
100 207 worker._memory_max_usage = int(os.environ.get('RC_GUNICORN_MEMORY_MAX_USAGE', '')
101 208 or _memory_max_usage)
102 209 worker._memory_usage_check_interval = int(os.environ.get('RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL', '')
103 210 or _memory_usage_check_interval)
104 211 worker._memory_usage_recovery_threshold = float(os.environ.get('RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD', '')
105 212 or _memory_usage_recovery_threshold)
106 213
107 214 # register memory last check time, with some random offset so we don't recycle all
108 215 # at once
109 216 worker._last_memory_check_time = _time_with_offset(_memory_usage_check_interval)
110 217
111 218 if _memory_max_usage:
112 server.log.info("[%-10s] WORKER spawned with max memory set at %s", worker.pid,
219 server.log.info("pid=[%-10s] WORKER spawned with max memory set at %s", worker.pid,
113 220 _format_data_size(_memory_max_usage))
114 221 else:
115 server.log.info("[%-10s] WORKER spawned", worker.pid)
222 server.log.info("pid=[%-10s] WORKER spawned", worker.pid)
116 223
117 224
118 225 def pre_exec(server):
119 226 server.log.info("Forked child, re-executing.")
120 227
121 228
122 229 def on_starting(server):
123 230 server_lbl = '{} {}'.format(server.proc_name, server.address)
124 231 server.log.info("Server %s is starting.", server_lbl)
125 232
126 233
127 234 def when_ready(server):
128 235 server.log.info("Server %s is ready. Spawning workers", server)
129 236
130 237
131 238 def on_reload(server):
132 239 pass
133 240
134 241
135 242 def _format_data_size(size, unit="B", precision=1, binary=True):
136 243 """Format a number using SI units (kilo, mega, etc.).
137 244
138 245 ``size``: The number as a float or int.
139 246
140 247 ``unit``: The unit name in plural form. Examples: "bytes", "B".
141 248
142 249 ``precision``: How many digits to the right of the decimal point. Default
143 250 is 1. 0 suppresses the decimal point.
144 251
145 252 ``binary``: If false, use base-10 decimal prefixes (kilo = K = 1000).
146 253 If true, use base-2 binary prefixes (kibi = Ki = 1024).
147 254
148 255 ``full_name``: If false (default), use the prefix abbreviation ("k" or
149 256 "Ki"). If true, use the full prefix ("kilo" or "kibi"). If false,
150 257 use abbreviation ("k" or "Ki").
151 258
152 259 """
153 260
154 261 if not binary:
155 262 base = 1000
156 263 multiples = ('', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
157 264 else:
158 265 base = 1024
159 266 multiples = ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi')
160 267
161 268 sign = ""
162 269 if size > 0:
163 270 m = int(math.log(size, base))
164 271 elif size < 0:
165 272 sign = "-"
166 273 size = -size
167 274 m = int(math.log(size, base))
168 275 else:
169 276 m = 0
170 277 if m > 8:
171 278 m = 8
172 279
173 280 if m == 0:
174 281 precision = '%.0f'
175 282 else:
176 283 precision = '%%.%df' % precision
177 284
178 285 size = precision % (size / math.pow(base, m))
179 286
180 287 return '%s%s %s%s' % (sign, size.strip(), multiples[m], unit)
181 288
182 289
183 290 def _check_memory_usage(worker):
184 memory_max_usage = worker._memory_max_usage
185 if not memory_max_usage:
291 _memory_max_usage = worker._memory_max_usage
292 if not _memory_max_usage:
186 293 return
187 294
188 memory_usage_check_interval = worker._memory_usage_check_interval
189 memory_usage_recovery_threshold = memory_max_usage * worker._memory_usage_recovery_threshold
295 _memory_usage_check_interval = worker._memory_usage_check_interval
296 _memory_usage_recovery_threshold = memory_max_usage * worker._memory_usage_recovery_threshold
190 297
191 298 elapsed = time.time() - worker._last_memory_check_time
192 if elapsed > memory_usage_check_interval:
299 if elapsed > _memory_usage_check_interval:
193 300 mem_usage = _get_process_rss()
194 if mem_usage and mem_usage > memory_max_usage:
301 if mem_usage and mem_usage > _memory_max_usage:
195 302 worker.log.info(
196 303 "memory usage %s > %s, forcing gc",
197 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
304 _format_data_size(mem_usage), _format_data_size(_memory_max_usage))
198 305 # Try to clean it up by forcing a full collection.
199 306 gc.collect()
200 307 mem_usage = _get_process_rss()
201 if mem_usage > memory_usage_recovery_threshold:
308 if mem_usage > _memory_usage_recovery_threshold:
202 309 # Didn't clean up enough, we'll have to terminate.
203 310 worker.log.warning(
204 311 "memory usage %s > %s after gc, quitting",
205 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
312 _format_data_size(mem_usage), _format_data_size(_memory_max_usage))
206 313 # This will cause worker to auto-restart itself
207 314 worker.alive = False
208 315 worker._last_memory_check_time = time.time()
209 316
210 317
211 318 def worker_int(worker):
212 worker.log.info("[%-10s] worker received INT or QUIT signal", worker.pid)
319 worker.log.info("pid=[%-10s] worker received INT or QUIT signal", worker.pid)
213 320
214 321 # get traceback info, on worker crash
322 def get_thread_id(t_id):
215 323 id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
324 return id2name.get(t_id, "unknown_thread_id")
325
216 326 code = []
217 327 for thread_id, stack in sys._current_frames().items():
218 328 code.append(
219 "\n# Thread: %s(%d)" % (id2name.get(thread_id, ""), thread_id))
329 "\n# Thread: %s(%d)" % (get_thread_id(thread_id), thread_id))
220 330 for fname, lineno, name, line in traceback.extract_stack(stack):
221 331 code.append('File: "%s", line %d, in %s' % (fname, lineno, name))
222 332 if line:
223 333 code.append(" %s" % (line.strip()))
224 334 worker.log.debug("\n".join(code))
225 335
226 336
227 337 def worker_abort(worker):
228 worker.log.info("[%-10s] worker received SIGABRT signal", worker.pid)
338 worker.log.info("pid=[%-10s] worker received SIGABRT signal", worker.pid)
229 339
230 340
231 341 def worker_exit(server, worker):
232 worker.log.info("[%-10s] worker exit", worker.pid)
342 worker.log.info("pid=[%-10s] worker exit", worker.pid)
233 343
234 344
235 345 def child_exit(server, worker):
236 worker.log.info("[%-10s] worker child exit", worker.pid)
346 worker.log.info("pid=[%-10s] worker child exit", worker.pid)
237 347
238 348
239 349 def pre_request(worker, req):
240 350 worker.start_time = time.time()
241 351 worker.log.debug(
242 352 "GNCRN PRE WORKER [cnt:%s]: %s %s", worker.nr, req.method, req.path)
243 353
244 354
245 355 def post_request(worker, req, environ, resp):
246 356 total_time = time.time() - worker.start_time
247 357 # Gunicorn sometimes has problems with reading the status_code
248 358 status_code = getattr(resp, 'status_code', '')
249 359 worker.log.debug(
250 360 "GNCRN POST WORKER [cnt:%s]: %s %s resp: %s, Load Time: %.4fs",
251 361 worker.nr, req.method, req.path, status_code, total_time)
252 362 _check_memory_usage(worker)
253 363
254 364
255 365 def _filter_proxy(ip):
256 366 """
257 367 Passed in IP addresses in HEADERS can be in a special format of multiple
258 368 ips. Those comma separated IPs are passed from various proxies in the
259 369 chain of request processing. The left-most being the original client.
260 370 We only care about the first IP which came from the org. client.
261 371
262 372 :param ip: ip string from headers
263 373 """
264 374 if ',' in ip:
265 375 _ips = ip.split(',')
266 376 _first_ip = _ips[0].strip()
267 377 return _first_ip
268 378 return ip
269 379
270 380
271 381 def _filter_port(ip):
272 382 """
273 383 Removes a port from ip, there are 4 main cases to handle here.
274 384 - ipv4 eg. 127.0.0.1
275 385 - ipv6 eg. ::1
276 386 - ipv4+port eg. 127.0.0.1:8080
277 387 - ipv6+port eg. [::1]:8080
278 388
279 389 :param ip:
280 390 """
281 391 def is_ipv6(ip_addr):
282 392 if hasattr(socket, 'inet_pton'):
283 393 try:
284 394 socket.inet_pton(socket.AF_INET6, ip_addr)
285 395 except socket.error:
286 396 return False
287 397 else:
288 398 return False
289 399 return True
290 400
291 401 if ':' not in ip: # must be ipv4 pure ip
292 402 return ip
293 403
294 404 if '[' in ip and ']' in ip: # ipv6 with port
295 405 return ip.split(']')[0][1:].lower()
296 406
297 407 # must be ipv6 or ipv4 with port
298 408 if is_ipv6(ip):
299 409 return ip
300 410 else:
301 411 ip, _port = ip.split(':')[:2] # means ipv4+port
302 412 return ip
303 413
304 414
305 415 def get_ip_addr(environ):
306 416 proxy_key = 'HTTP_X_REAL_IP'
307 417 proxy_key2 = 'HTTP_X_FORWARDED_FOR'
308 418 def_key = 'REMOTE_ADDR'
309 _filters = lambda x: _filter_port(_filter_proxy(x))
419
420 def _filters(x):
421 return _filter_port(_filter_proxy(x))
310 422
311 423 ip = environ.get(proxy_key)
312 424 if ip:
313 425 return _filters(ip)
314 426
315 427 ip = environ.get(proxy_key2)
316 428 if ip:
317 429 return _filters(ip)
318 430
319 431 ip = environ.get(def_key, '0.0.0.0')
320 432 return _filters(ip)
321 433
322 434
323 435 class RhodeCodeLogger(Logger):
324 436 """
325 437 Custom Logger that allows some customization that gunicorn doesn't allow
326 438 """
327 439
328 440 datefmt = r"%Y-%m-%d %H:%M:%S"
329 441
330 442 def __init__(self, cfg):
331 443 Logger.__init__(self, cfg)
332 444
333 445 def now(self):
334 446 """ return date in RhodeCode Log format """
335 447 now = time.time()
336 448 msecs = int((now - int(now)) * 1000)
337 449 return time.strftime(self.datefmt, time.localtime(now)) + '.{0:03d}'.format(msecs)
338 450
339 451 def atoms(self, resp, req, environ, request_time):
340 452 """ Gets atoms for log formatting.
341 453 """
342 454 status = resp.status
343 455 if isinstance(status, str):
344 456 status = status.split(None, 1)[0]
345 457 atoms = {
346 458 'h': get_ip_addr(environ),
347 459 'l': '-',
348 460 'u': self._get_user(environ) or '-',
349 461 't': self.now(),
350 462 'r': "%s %s %s" % (environ['REQUEST_METHOD'],
351 463 environ['RAW_URI'],
352 464 environ["SERVER_PROTOCOL"]),
353 465 's': status,
354 466 'm': environ.get('REQUEST_METHOD'),
355 467 'U': environ.get('PATH_INFO'),
356 468 'q': environ.get('QUERY_STRING'),
357 469 'H': environ.get('SERVER_PROTOCOL'),
358 470 'b': getattr(resp, 'sent', None) is not None and str(resp.sent) or '-',
359 471 'B': getattr(resp, 'sent', None),
360 472 'f': environ.get('HTTP_REFERER', '-'),
361 473 'a': environ.get('HTTP_USER_AGENT', '-'),
362 474 'T': request_time.seconds,
363 475 'D': (request_time.seconds * 1000000) + request_time.microseconds,
364 476 'M': (request_time.seconds * 1000) + int(request_time.microseconds/1000),
365 477 'L': "%d.%06d" % (request_time.seconds, request_time.microseconds),
366 478 'p': "<%s>" % os.getpid()
367 479 }
368 480
369 481 # add request headers
370 482 if hasattr(req, 'headers'):
371 483 req_headers = req.headers
372 484 else:
373 485 req_headers = req
374 486
375 487 if hasattr(req_headers, "items"):
376 488 req_headers = req_headers.items()
377 489
378 490 atoms.update({"{%s}i" % k.lower(): v for k, v in req_headers})
379 491
380 492 resp_headers = resp.headers
381 493 if hasattr(resp_headers, "items"):
382 494 resp_headers = resp_headers.items()
383 495
384 496 # add response headers
385 497 atoms.update({"{%s}o" % k.lower(): v for k, v in resp_headers})
386 498
387 499 # add environ variables
388 500 environ_variables = environ.items()
389 501 atoms.update({"{%s}e" % k.lower(): v for k, v in environ_variables})
390 502
391 503 return atoms
392 504
505
393 506 logger_class = RhodeCodeLogger
@@ -1,887 +1,814 b''
1 1
2 2
3 3 ; #########################################
4 4 ; RHODECODE COMMUNITY EDITION CONFIGURATION
5 5 ; #########################################
6 6
7 7 [DEFAULT]
8 8 ; Debug flag sets all loggers to debug, and enables request tracking
9 9 debug = false
10 10
11 11 ; ########################################################################
12 12 ; EMAIL CONFIGURATION
13 13 ; These settings will be used by the RhodeCode mailing system
14 14 ; ########################################################################
15 15
16 16 ; prefix all emails subjects with given prefix, helps filtering out emails
17 17 #email_prefix = [RhodeCode]
18 18
19 19 ; email FROM address all mails will be sent
20 20 #app_email_from = rhodecode-noreply@localhost
21 21
22 22 #smtp_server = mail.server.com
23 23 #smtp_username =
24 24 #smtp_password =
25 25 #smtp_port =
26 26 #smtp_use_tls = false
27 27 #smtp_use_ssl = true
28 28
29 29 [server:main]
30 ; COMMON HOST/IP CONFIG
30 ; COMMON HOST/IP CONFIG, This applies mostly to develop setup,
31 ; Host port for gunicorn are controlled by gunicorn_conf.py
31 32 host = 127.0.0.1
32 port = 5000
33 port = 10020
33 34
34 35
35 36 ; ###########################
36 37 ; GUNICORN APPLICATION SERVER
37 38 ; ###########################
38 39
39 ; run with gunicorn --paste rhodecode.ini
40 ; run with gunicorn --paste rhodecode.ini --config gunicorn_conf.py
40 41
41 42 ; Module to use, this setting shouldn't be changed
42 43 use = egg:gunicorn#main
43 44
44 ; Sets the number of process workers. More workers means more concurrent connections
45 ; RhodeCode can handle at the same time. Each additional worker also it increases
46 ; memory usage as each has it's own set of caches.
47 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
48 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
49 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
50 ; when using more than 1 worker.
51 workers = 2
52
53 ; Gunicorn access log level
54 loglevel = info
55
56 ; Process name visible in process list
57 proc_name = rhodecode
58
59 ; Type of worker class, one of `sync`, `gevent`
60 ; Recommended type is `gevent`
61 worker_class = gevent
62
63 ; The maximum number of simultaneous clients per worker. Valid only for gevent
64 worker_connections = 10
65
66 ; The maximum number of pending connections worker will queue to handle
67 backlog = 64
68
69 ; Max number of requests that worker will handle before being gracefully restarted.
70 ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
71 max_requests = 1000
72 max_requests_jitter = 30
73
74 ; Amount of time a worker can spend with handling a request before it
75 ; gets killed and restarted. By default set to 21600 (6hrs)
76 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
77 timeout = 21600
78
79 ; The maximum size of HTTP request line in bytes.
80 ; 0 for unlimited
81 limit_request_line = 0
82
83 ; Limit the number of HTTP headers fields in a request.
84 ; By default this value is 100 and can't be larger than 32768.
85 limit_request_fields = 32768
86
87 ; Limit the allowed size of an HTTP request header field.
88 ; Value is a positive number or 0.
89 ; Setting it to 0 will allow unlimited header field sizes.
90 limit_request_field_size = 0
91
92 ; Timeout for graceful workers restart.
93 ; After receiving a restart signal, workers have this much time to finish
94 ; serving requests. Workers still alive after the timeout (starting from the
95 ; receipt of the restart signal) are force killed.
96 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
97 graceful_timeout = 3600
98
99 # The number of seconds to wait for requests on a Keep-Alive connection.
100 # Generally set in the 1-5 seconds range.
101 keepalive = 2
102
103 ; Maximum memory usage that each worker can use before it will receive a
104 ; graceful restart signal 0 = memory monitoring is disabled
105 ; Examples: 268435456 (256MB), 536870912 (512MB)
106 ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
107 memory_max_usage = 0
108
109 ; How often in seconds to check for memory usage for each gunicorn worker
110 memory_usage_check_interval = 60
111
112 ; Threshold value for which we don't recycle worker if GarbageCollection
113 ; frees up enough resources. Before each restart we try to run GC on worker
114 ; in case we get enough free memory after that, restart will not happen.
115 memory_usage_recovery_threshold = 0.8
116
117
118 45 ; Prefix middleware for RhodeCode.
119 46 ; recommended when using proxy setup.
120 47 ; allows to set RhodeCode under a prefix in server.
121 48 ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well.
122 49 ; And set your prefix like: `prefix = /custom_prefix`
123 50 ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need
124 51 ; to make your cookies only work on prefix url
125 52 [filter:proxy-prefix]
126 53 use = egg:PasteDeploy#prefix
127 54 prefix = /
128 55
129 56 [app:main]
130 57 ; The %(here)s variable will be replaced with the absolute path of parent directory
131 58 ; of this file
132 59 ; Each option in the app:main can be override by an environmental variable
133 60 ;
134 61 ;To override an option:
135 62 ;
136 63 ;RC_<KeyName>
137 64 ;Everything should be uppercase, . and - should be replaced by _.
138 65 ;For example, if you have these configuration settings:
139 66 ;rc_cache.repo_object.backend = foo
140 67 ;can be overridden by
141 68 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
142 69
143 70 use = egg:rhodecode-enterprise-ce
144 71
145 72 ; enable proxy prefix middleware, defined above
146 73 #filter-with = proxy-prefix
147 74
148 75 ; encryption key used to encrypt social plugin tokens,
149 76 ; remote_urls with credentials etc, if not set it defaults to
150 77 ; `beaker.session.secret`
151 78 #rhodecode.encrypted_values.secret =
152 79
153 80 ; decryption strict mode (enabled by default). It controls if decryption raises
154 81 ; `SignatureVerificationError` in case of wrong key, or damaged encryption data.
155 82 #rhodecode.encrypted_values.strict = false
156 83
157 84 ; Pick algorithm for encryption. Either fernet (more secure) or aes (default)
158 85 ; fernet is safer, and we strongly recommend switching to it.
159 86 ; Due to backward compatibility aes is used as default.
160 87 #rhodecode.encrypted_values.algorithm = fernet
161 88
162 89 ; Return gzipped responses from RhodeCode (static files/application)
163 90 gzip_responses = false
164 91
165 92 ; Auto-generate javascript routes file on startup
166 93 generate_js_files = false
167 94
168 95 ; System global default language.
169 96 ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh
170 97 lang = en
171 98
172 99 ; Perform a full repository scan and import on each server start.
173 100 ; Settings this to true could lead to very long startup time.
174 101 startup.import_repos = false
175 102
176 103 ; URL at which the application is running. This is used for Bootstrapping
177 104 ; requests in context when no web request is available. Used in ishell, or
178 105 ; SSH calls. Set this for events to receive proper url for SSH calls.
179 106 app.base_url = http://rhodecode.local
180 107
181 108 ; Unique application ID. Should be a random unique string for security.
182 109 app_instance_uuid = rc-production
183 110
184 111 ; Cut off limit for large diffs (size in bytes). If overall diff size on
185 112 ; commit, or pull request exceeds this limit this diff will be displayed
186 113 ; partially. E.g 512000 == 512Kb
187 114 cut_off_limit_diff = 512000
188 115
189 116 ; Cut off limit for large files inside diffs (size in bytes). Each individual
190 117 ; file inside diff which exceeds this limit will be displayed partially.
191 118 ; E.g 128000 == 128Kb
192 119 cut_off_limit_file = 128000
193 120
194 121 ; Use cached version of vcs repositories everywhere. Recommended to be `true`
195 122 vcs_full_cache = true
196 123
197 124 ; Force https in RhodeCode, fixes https redirects, assumes it's always https.
198 125 ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache
199 126 force_https = false
200 127
201 128 ; use Strict-Transport-Security headers
202 129 use_htsts = false
203 130
204 131 ; Set to true if your repos are exposed using the dumb protocol
205 132 git_update_server_info = false
206 133
207 134 ; RSS/ATOM feed options
208 135 rss_cut_off_limit = 256000
209 136 rss_items_per_page = 10
210 137 rss_include_diff = false
211 138
212 139 ; gist URL alias, used to create nicer urls for gist. This should be an
213 140 ; url that does rewrites to _admin/gists/{gistid}.
214 141 ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal
215 142 ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid}
216 143 gist_alias_url =
217 144
218 145 ; List of views (using glob pattern syntax) that AUTH TOKENS could be
219 146 ; used for access.
220 147 ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it
221 148 ; came from the the logged in user who own this authentication token.
222 149 ; Additionally @TOKEN syntax can be used to bound the view to specific
223 150 ; authentication token. Such view would be only accessible when used together
224 151 ; with this authentication token
225 152 ; list of all views can be found under `/_admin/permissions/auth_token_access`
226 153 ; The list should be "," separated and on a single line.
227 154 ; Most common views to enable:
228 155
229 156 # RepoCommitsView:repo_commit_download
230 157 # RepoCommitsView:repo_commit_patch
231 158 # RepoCommitsView:repo_commit_raw
232 159 # RepoCommitsView:repo_commit_raw@TOKEN
233 160 # RepoFilesView:repo_files_diff
234 161 # RepoFilesView:repo_archivefile
235 162 # RepoFilesView:repo_file_raw
236 163 # GistView:*
237 164 api_access_controllers_whitelist =
238 165
239 166 ; Default encoding used to convert from and to unicode
240 167 ; can be also a comma separated list of encoding in case of mixed encodings
241 168 default_encoding = UTF-8
242 169
243 170 ; instance-id prefix
244 171 ; a prefix key for this instance used for cache invalidation when running
245 172 ; multiple instances of RhodeCode, make sure it's globally unique for
246 173 ; all running RhodeCode instances. Leave empty if you don't use it
247 174 instance_id =
248 175
249 176 ; Fallback authentication plugin. Set this to a plugin ID to force the usage
250 177 ; of an authentication plugin also if it is disabled by it's settings.
251 178 ; This could be useful if you are unable to log in to the system due to broken
252 179 ; authentication settings. Then you can enable e.g. the internal RhodeCode auth
253 180 ; module to log in again and fix the settings.
254 181 ; Available builtin plugin IDs (hash is part of the ID):
255 182 ; egg:rhodecode-enterprise-ce#rhodecode
256 183 ; egg:rhodecode-enterprise-ce#pam
257 184 ; egg:rhodecode-enterprise-ce#ldap
258 185 ; egg:rhodecode-enterprise-ce#jasig_cas
259 186 ; egg:rhodecode-enterprise-ce#headers
260 187 ; egg:rhodecode-enterprise-ce#crowd
261 188
262 189 #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode
263 190
264 191 ; Flag to control loading of legacy plugins in py:/path format
265 192 auth_plugin.import_legacy_plugins = true
266 193
267 194 ; alternative return HTTP header for failed authentication. Default HTTP
268 195 ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with
269 196 ; handling that causing a series of failed authentication calls.
270 197 ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code
271 198 ; This will be served instead of default 401 on bad authentication
272 199 auth_ret_code =
273 200
274 201 ; use special detection method when serving auth_ret_code, instead of serving
275 202 ; ret_code directly, use 401 initially (Which triggers credentials prompt)
276 203 ; and then serve auth_ret_code to clients
277 204 auth_ret_code_detection = false
278 205
279 206 ; locking return code. When repository is locked return this HTTP code. 2XX
280 207 ; codes don't break the transactions while 4XX codes do
281 208 lock_ret_code = 423
282 209
283 210 ; allows to change the repository location in settings page
284 211 allow_repo_location_change = true
285 212
286 213 ; allows to setup custom hooks in settings page
287 214 allow_custom_hooks_settings = true
288 215
289 216 ; Generated license token required for EE edition license.
290 217 ; New generated token value can be found in Admin > settings > license page.
291 218 license_token =
292 219
293 220 ; This flag hides sensitive information on the license page such as token, and license data
294 221 license.hide_license_info = false
295 222
296 223 ; supervisor connection uri, for managing supervisor and logs.
297 224 supervisor.uri =
298 225
299 226 ; supervisord group name/id we only want this RC instance to handle
300 227 supervisor.group_id = prod
301 228
302 229 ; Display extended labs settings
303 230 labs_settings_active = true
304 231
305 232 ; Custom exception store path, defaults to TMPDIR
306 233 ; This is used to store exception from RhodeCode in shared directory
307 234 #exception_tracker.store_path =
308 235
309 236 ; Send email with exception details when it happens
310 237 #exception_tracker.send_email = false
311 238
312 239 ; Comma separated list of recipients for exception emails,
313 240 ; e.g admin@rhodecode.com,devops@rhodecode.com
314 241 ; Can be left empty, then emails will be sent to ALL super-admins
315 242 #exception_tracker.send_email_recipients =
316 243
317 244 ; optional prefix to Add to email Subject
318 245 #exception_tracker.email_prefix = [RHODECODE ERROR]
319 246
320 247 ; File store configuration. This is used to store and serve uploaded files
321 248 file_store.enabled = true
322 249
323 250 ; Storage backend, available options are: local
324 251 file_store.backend = local
325 252
326 253 ; path to store the uploaded binaries
327 254 file_store.storage_path = %(here)s/data/file_store
328 255
329 256 ; Uncomment and set this path to control settings for archive download cache.
330 257 ; Generated repo archives will be cached at this location
331 258 ; and served from the cache during subsequent requests for the same archive of
332 259 ; the repository. This path is important to be shared across filesystems and with
333 260 ; RhodeCode and vcsserver
334 261
335 262 ; Default is $cache_dir/archive_cache if not set
336 263 archive_cache.store_dir = %(here)s/data/archive_cache
337 264
338 265 ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb
339 266 archive_cache.cache_size_gb = 10
340 267
341 268 ; By default cache uses sharding technique, this specifies how many shards are there
342 269 archive_cache.cache_shards = 10
343 270
344 271 ; #############
345 272 ; CELERY CONFIG
346 273 ; #############
347 274
348 275 ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini
349 276
350 277 use_celery = false
351 278
352 279 ; path to store schedule database
353 280 #celerybeat-schedule.path =
354 281
355 282 ; connection url to the message broker (default redis)
356 283 celery.broker_url = redis://localhost:6379/8
357 284
358 285 ; rabbitmq example
359 286 #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost
360 287
361 288 ; maximum tasks to execute before worker restart
362 289 celery.max_tasks_per_child = 20
363 290
364 291 ; tasks will never be sent to the queue, but executed locally instead.
365 292 celery.task_always_eager = false
366 293
367 294 ; #############
368 295 ; DOGPILE CACHE
369 296 ; #############
370 297
371 298 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
372 299 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
373 300 cache_dir = %(here)s/data
374 301
375 302 ; *********************************************
376 303 ; `sql_cache_short` cache for heavy SQL queries
377 304 ; Only supported backend is `memory_lru`
378 305 ; *********************************************
379 306 rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru
380 307 rc_cache.sql_cache_short.expiration_time = 30
381 308
382 309
383 310 ; *****************************************************
384 311 ; `cache_repo_longterm` cache for repo object instances
385 312 ; Only supported backend is `memory_lru`
386 313 ; *****************************************************
387 314 rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru
388 315 ; by default we use 30 Days, cache is still invalidated on push
389 316 rc_cache.cache_repo_longterm.expiration_time = 2592000
390 317 ; max items in LRU cache, set to smaller number to save memory, and expire last used caches
391 318 rc_cache.cache_repo_longterm.max_size = 10000
392 319
393 320
394 321 ; *********************************************
395 322 ; `cache_general` cache for general purpose use
396 323 ; for simplicity use rc.file_namespace backend,
397 324 ; for performance and scale use rc.redis
398 325 ; *********************************************
399 326 rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace
400 327 rc_cache.cache_general.expiration_time = 43200
401 328 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
402 329 #rc_cache.cache_general.arguments.filename = /tmp/cache_general_db
403 330
404 331 ; alternative `cache_general` redis backend with distributed lock
405 332 #rc_cache.cache_general.backend = dogpile.cache.rc.redis
406 333 #rc_cache.cache_general.expiration_time = 300
407 334
408 335 ; redis_expiration_time needs to be greater then expiration_time
409 336 #rc_cache.cache_general.arguments.redis_expiration_time = 7200
410 337
411 338 #rc_cache.cache_general.arguments.host = localhost
412 339 #rc_cache.cache_general.arguments.port = 6379
413 340 #rc_cache.cache_general.arguments.db = 0
414 341 #rc_cache.cache_general.arguments.socket_timeout = 30
415 342 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
416 343 #rc_cache.cache_general.arguments.distributed_lock = true
417 344
418 345 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
419 346 #rc_cache.cache_general.arguments.lock_auto_renewal = true
420 347
421 348 ; *************************************************
422 349 ; `cache_perms` cache for permission tree, auth TTL
423 350 ; for simplicity use rc.file_namespace backend,
424 351 ; for performance and scale use rc.redis
425 352 ; *************************************************
426 353 rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace
427 354 rc_cache.cache_perms.expiration_time = 3600
428 355 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
429 356 #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms_db
430 357
431 358 ; alternative `cache_perms` redis backend with distributed lock
432 359 #rc_cache.cache_perms.backend = dogpile.cache.rc.redis
433 360 #rc_cache.cache_perms.expiration_time = 300
434 361
435 362 ; redis_expiration_time needs to be greater then expiration_time
436 363 #rc_cache.cache_perms.arguments.redis_expiration_time = 7200
437 364
438 365 #rc_cache.cache_perms.arguments.host = localhost
439 366 #rc_cache.cache_perms.arguments.port = 6379
440 367 #rc_cache.cache_perms.arguments.db = 0
441 368 #rc_cache.cache_perms.arguments.socket_timeout = 30
442 369 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
443 370 #rc_cache.cache_perms.arguments.distributed_lock = true
444 371
445 372 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
446 373 #rc_cache.cache_perms.arguments.lock_auto_renewal = true
447 374
448 375 ; ***************************************************
449 376 ; `cache_repo` cache for file tree, Readme, RSS FEEDS
450 377 ; for simplicity use rc.file_namespace backend,
451 378 ; for performance and scale use rc.redis
452 379 ; ***************************************************
453 380 rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace
454 381 rc_cache.cache_repo.expiration_time = 2592000
455 382 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
456 383 #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo_db
457 384
458 385 ; alternative `cache_repo` redis backend with distributed lock
459 386 #rc_cache.cache_repo.backend = dogpile.cache.rc.redis
460 387 #rc_cache.cache_repo.expiration_time = 2592000
461 388
462 389 ; redis_expiration_time needs to be greater then expiration_time
463 390 #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400
464 391
465 392 #rc_cache.cache_repo.arguments.host = localhost
466 393 #rc_cache.cache_repo.arguments.port = 6379
467 394 #rc_cache.cache_repo.arguments.db = 1
468 395 #rc_cache.cache_repo.arguments.socket_timeout = 30
469 396 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
470 397 #rc_cache.cache_repo.arguments.distributed_lock = true
471 398
472 399 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
473 400 #rc_cache.cache_repo.arguments.lock_auto_renewal = true
474 401
475 402 ; ##############
476 403 ; BEAKER SESSION
477 404 ; ##############
478 405
479 406 ; beaker.session.type is type of storage options for the logged users sessions. Current allowed
480 407 ; types are file, ext:redis, ext:database, ext:memcached, and memory (default if not specified).
481 408 ; Fastest ones are Redis and ext:database
482 409 beaker.session.type = file
483 410 beaker.session.data_dir = %(here)s/data/sessions
484 411
485 412 ; Redis based sessions
486 413 #beaker.session.type = ext:redis
487 414 #beaker.session.url = redis://127.0.0.1:6379/2
488 415
489 416 ; DB based session, fast, and allows easy management over logged in users
490 417 #beaker.session.type = ext:database
491 418 #beaker.session.table_name = db_session
492 419 #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode
493 420 #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode
494 421 #beaker.session.sa.pool_recycle = 3600
495 422 #beaker.session.sa.echo = false
496 423
497 424 beaker.session.key = rhodecode
498 425 beaker.session.secret = production-rc-uytcxaz
499 426 beaker.session.lock_dir = %(here)s/data/sessions/lock
500 427
501 428 ; Secure encrypted cookie. Requires AES and AES python libraries
502 429 ; you must disable beaker.session.secret to use this
503 430 #beaker.session.encrypt_key = key_for_encryption
504 431 #beaker.session.validate_key = validation_key
505 432
506 433 ; Sets session as invalid (also logging out user) if it haven not been
507 434 ; accessed for given amount of time in seconds
508 435 beaker.session.timeout = 2592000
509 436 beaker.session.httponly = true
510 437
511 438 ; Path to use for the cookie. Set to prefix if you use prefix middleware
512 439 #beaker.session.cookie_path = /custom_prefix
513 440
514 441 ; Set https secure cookie
515 442 beaker.session.secure = false
516 443
517 444 ; default cookie expiration time in seconds, set to `true` to set expire
518 445 ; at browser close
519 446 #beaker.session.cookie_expires = 3600
520 447
521 448 ; #############################
522 449 ; SEARCH INDEXING CONFIGURATION
523 450 ; #############################
524 451
525 452 ; Full text search indexer is available in rhodecode-tools under
526 453 ; `rhodecode-tools index` command
527 454
528 455 ; WHOOSH Backend, doesn't require additional services to run
529 456 ; it works good with few dozen repos
530 457 search.module = rhodecode.lib.index.whoosh
531 458 search.location = %(here)s/data/index
532 459
533 460 ; ####################
534 461 ; CHANNELSTREAM CONFIG
535 462 ; ####################
536 463
537 464 ; channelstream enables persistent connections and live notification
538 465 ; in the system. It's also used by the chat system
539 466
540 467 channelstream.enabled = false
541 468
542 469 ; server address for channelstream server on the backend
543 470 channelstream.server = 127.0.0.1:9800
544 471
545 472 ; location of the channelstream server from outside world
546 473 ; use ws:// for http or wss:// for https. This address needs to be handled
547 474 ; by external HTTP server such as Nginx or Apache
548 475 ; see Nginx/Apache configuration examples in our docs
549 476 channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream
550 477 channelstream.secret = secret
551 478 channelstream.history.location = %(here)s/channelstream_history
552 479
553 480 ; Internal application path that Javascript uses to connect into.
554 481 ; If you use proxy-prefix the prefix should be added before /_channelstream
555 482 channelstream.proxy_path = /_channelstream
556 483
557 484
558 485 ; ##############################
559 486 ; MAIN RHODECODE DATABASE CONFIG
560 487 ; ##############################
561 488
562 489 #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
563 490 #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
564 491 #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8
565 492 ; pymysql is an alternative driver for MySQL, use in case of problems with default one
566 493 #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode
567 494
568 495 sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
569 496
570 497 ; see sqlalchemy docs for other advanced settings
571 498 ; print the sql statements to output
572 499 sqlalchemy.db1.echo = false
573 500
574 501 ; recycle the connections after this amount of seconds
575 502 sqlalchemy.db1.pool_recycle = 3600
576 503
577 504 ; the number of connections to keep open inside the connection pool.
578 505 ; 0 indicates no limit
579 506 ; the general calculus with gevent is:
580 507 ; if your system allows 500 concurrent greenlets (max_connections) that all do database access,
581 508 ; then increase pool size + max overflow so that they add up to 500.
582 509 #sqlalchemy.db1.pool_size = 5
583 510
584 511 ; The number of connections to allow in connection pool "overflow", that is
585 512 ; connections that can be opened above and beyond the pool_size setting,
586 513 ; which defaults to five.
587 514 #sqlalchemy.db1.max_overflow = 10
588 515
589 516 ; Connection check ping, used to detect broken database connections
590 517 ; could be enabled to better handle cases if MySQL has gone away errors
591 518 #sqlalchemy.db1.ping_connection = true
592 519
593 520 ; ##########
594 521 ; VCS CONFIG
595 522 ; ##########
596 523 vcs.server.enable = true
597 524 vcs.server = localhost:9900
598 525
599 526 ; Web server connectivity protocol, responsible for web based VCS operations
600 527 ; Available protocols are:
601 528 ; `http` - use http-rpc backend (default)
602 529 vcs.server.protocol = http
603 530
604 531 ; Push/Pull operations protocol, available options are:
605 532 ; `http` - use http-rpc backend (default)
606 533 vcs.scm_app_implementation = http
607 534
608 535 ; Push/Pull operations hooks protocol, available options are:
609 536 ; `http` - use http-rpc backend (default)
610 537 vcs.hooks.protocol = http
611 538
612 539 ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be
613 540 ; accessible via network.
614 541 ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker)
615 542 vcs.hooks.host = *
616 543
617 544 ; Start VCSServer with this instance as a subprocess, useful for development
618 545 vcs.start_server = false
619 546
620 547 ; List of enabled VCS backends, available options are:
621 548 ; `hg` - mercurial
622 549 ; `git` - git
623 550 ; `svn` - subversion
624 551 vcs.backends = hg, git, svn
625 552
626 553 ; Wait this number of seconds before killing connection to the vcsserver
627 554 vcs.connection_timeout = 3600
628 555
629 556 ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out.
630 557 ; Set a numeric version for your current SVN e.g 1.8, or 1.12
631 558 ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible
632 559 #vcs.svn.compatible_version = 1.8
633 560
634 561 ; Cache flag to cache vcsserver remote calls locally
635 562 ; It uses cache_region `cache_repo`
636 563 vcs.methods.cache = true
637 564
638 565 ; ####################################################
639 566 ; Subversion proxy support (mod_dav_svn)
640 567 ; Maps RhodeCode repo groups into SVN paths for Apache
641 568 ; ####################################################
642 569
643 570 ; Enable or disable the config file generation.
644 571 svn.proxy.generate_config = false
645 572
646 573 ; Generate config file with `SVNListParentPath` set to `On`.
647 574 svn.proxy.list_parent_path = true
648 575
649 576 ; Set location and file name of generated config file.
650 577 svn.proxy.config_file_path = %(here)s/mod_dav_svn.conf
651 578
652 579 ; alternative mod_dav config template. This needs to be a valid mako template
653 580 ; Example template can be found in the source code:
654 581 ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako
655 582 #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako
656 583
657 584 ; Used as a prefix to the `Location` block in the generated config file.
658 585 ; In most cases it should be set to `/`.
659 586 svn.proxy.location_root = /
660 587
661 588 ; Command to reload the mod dav svn configuration on change.
662 589 ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh
663 590 ; Make sure user who runs RhodeCode process is allowed to reload Apache
664 591 #svn.proxy.reload_cmd = /etc/init.d/apache2 reload
665 592
666 593 ; If the timeout expires before the reload command finishes, the command will
667 594 ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds.
668 595 #svn.proxy.reload_timeout = 10
669 596
670 597 ; ####################
671 598 ; SSH Support Settings
672 599 ; ####################
673 600
674 601 ; Defines if a custom authorized_keys file should be created and written on
675 602 ; any change user ssh keys. Setting this to false also disables possibility
676 603 ; of adding SSH keys by users from web interface. Super admins can still
677 604 ; manage SSH Keys.
678 605 ssh.generate_authorized_keyfile = false
679 606
680 607 ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding`
681 608 # ssh.authorized_keys_ssh_opts =
682 609
683 610 ; Path to the authorized_keys file where the generate entries are placed.
684 611 ; It is possible to have multiple key files specified in `sshd_config` e.g.
685 612 ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode
686 613 ssh.authorized_keys_file_path = ~/.ssh/authorized_keys_rhodecode
687 614
688 615 ; Command to execute the SSH wrapper. The binary is available in the
689 616 ; RhodeCode installation directory.
690 617 ; e.g ~/.rccontrol/community-1/profile/bin/rc-ssh-wrapper
691 618 ssh.wrapper_cmd = ~/.rccontrol/community-1/rc-ssh-wrapper
692 619
693 620 ; Allow shell when executing the ssh-wrapper command
694 621 ssh.wrapper_cmd_allow_shell = false
695 622
696 623 ; Enables logging, and detailed output send back to the client during SSH
697 624 ; operations. Useful for debugging, shouldn't be used in production.
698 625 ssh.enable_debug_logging = false
699 626
700 627 ; Paths to binary executable, by default they are the names, but we can
701 628 ; override them if we want to use a custom one
702 629 ssh.executable.hg = ~/.rccontrol/vcsserver-1/profile/bin/hg
703 630 ssh.executable.git = ~/.rccontrol/vcsserver-1/profile/bin/git
704 631 ssh.executable.svn = ~/.rccontrol/vcsserver-1/profile/bin/svnserve
705 632
706 633 ; Enables SSH key generator web interface. Disabling this still allows users
707 634 ; to add their own keys.
708 635 ssh.enable_ui_key_generator = true
709 636
710 637
711 638 ; #################
712 639 ; APPENLIGHT CONFIG
713 640 ; #################
714 641
715 642 ; Appenlight is tailored to work with RhodeCode, see
716 643 ; http://appenlight.rhodecode.com for details how to obtain an account
717 644
718 645 ; Appenlight integration enabled
719 646 #appenlight = false
720 647
721 648 #appenlight.server_url = https://api.appenlight.com
722 649 #appenlight.api_key = YOUR_API_KEY
723 650 #appenlight.transport_config = https://api.appenlight.com?threaded=1&timeout=5
724 651
725 652 ; used for JS client
726 653 #appenlight.api_public_key = YOUR_API_PUBLIC_KEY
727 654
728 655 ; TWEAK AMOUNT OF INFO SENT HERE
729 656
730 657 ; enables 404 error logging (default False)
731 658 #appenlight.report_404 = false
732 659
733 660 ; time in seconds after request is considered being slow (default 1)
734 661 #appenlight.slow_request_time = 1
735 662
736 663 ; record slow requests in application
737 664 ; (needs to be enabled for slow datastore recording and time tracking)
738 665 #appenlight.slow_requests = true
739 666
740 667 ; enable hooking to application loggers
741 668 #appenlight.logging = true
742 669
743 670 ; minimum log level for log capture
744 671 #ppenlight.logging.level = WARNING
745 672
746 673 ; send logs only from erroneous/slow requests
747 674 ; (saves API quota for intensive logging)
748 675 #appenlight.logging_on_error = false
749 676
750 677 ; list of additional keywords that should be grabbed from environ object
751 678 ; can be string with comma separated list of words in lowercase
752 679 ; (by default client will always send following info:
753 680 ; 'REMOTE_USER', 'REMOTE_ADDR', 'SERVER_NAME', 'CONTENT_TYPE' + all keys that
754 681 ; start with HTTP* this list be extended with additional keywords here
755 682 #appenlight.environ_keys_whitelist =
756 683
757 684 ; list of keywords that should be blanked from request object
758 685 ; can be string with comma separated list of words in lowercase
759 686 ; (by default client will always blank keys that contain following words
760 687 ; 'password', 'passwd', 'pwd', 'auth_tkt', 'secret', 'csrf'
761 688 ; this list be extended with additional keywords set here
762 689 #appenlight.request_keys_blacklist =
763 690
764 691 ; list of namespaces that should be ignores when gathering log entries
765 692 ; can be string with comma separated list of namespaces
766 693 ; (by default the client ignores own entries: appenlight_client.client)
767 694 #appenlight.log_namespace_blacklist =
768 695
769 696 ; Statsd client config, this is used to send metrics to statsd
770 697 ; We recommend setting statsd_exported and scrape them using Prometheus
771 698 #statsd.enabled = false
772 699 #statsd.statsd_host = 0.0.0.0
773 700 #statsd.statsd_port = 8125
774 701 #statsd.statsd_prefix =
775 702 #statsd.statsd_ipv6 = false
776 703
777 704 ; configure logging automatically at server startup set to false
778 705 ; to use the below custom logging config.
779 706 ; RC_LOGGING_FORMATTER
780 707 ; RC_LOGGING_LEVEL
781 708 ; env variables can control the settings for logging in case of autoconfigure
782 709
783 710 #logging.autoconfigure = true
784 711
785 712 ; specify your own custom logging config file to configure logging
786 713 #logging.logging_conf_file = /path/to/custom_logging.ini
787 714
788 715 ; Dummy marker to add new entries after.
789 716 ; Add any custom entries below. Please don't remove this marker.
790 717 custom.conf = 1
791 718
792 719
793 720 ; #####################
794 721 ; LOGGING CONFIGURATION
795 722 ; #####################
796 723
797 724 [loggers]
798 725 keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper
799 726
800 727 [handlers]
801 728 keys = console, console_sql
802 729
803 730 [formatters]
804 731 keys = generic, json, color_formatter, color_formatter_sql
805 732
806 733 ; #######
807 734 ; LOGGERS
808 735 ; #######
809 736 [logger_root]
810 737 level = NOTSET
811 738 handlers = console
812 739
813 740 [logger_sqlalchemy]
814 741 level = INFO
815 742 handlers = console_sql
816 743 qualname = sqlalchemy.engine
817 744 propagate = 0
818 745
819 746 [logger_beaker]
820 747 level = DEBUG
821 748 handlers =
822 749 qualname = beaker.container
823 750 propagate = 1
824 751
825 752 [logger_rhodecode]
826 753 level = DEBUG
827 754 handlers =
828 755 qualname = rhodecode
829 756 propagate = 1
830 757
831 758 [logger_ssh_wrapper]
832 759 level = DEBUG
833 760 handlers =
834 761 qualname = ssh_wrapper
835 762 propagate = 1
836 763
837 764 [logger_celery]
838 765 level = DEBUG
839 766 handlers =
840 767 qualname = celery
841 768
842 769
843 770 ; ########
844 771 ; HANDLERS
845 772 ; ########
846 773
847 774 [handler_console]
848 775 class = StreamHandler
849 776 args = (sys.stderr, )
850 777 level = INFO
851 778 ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json'
852 779 ; This allows sending properly formatted logs to grafana loki or elasticsearch
853 780 formatter = generic
854 781
855 782 [handler_console_sql]
856 783 ; "level = DEBUG" logs SQL queries and results.
857 784 ; "level = INFO" logs SQL queries.
858 785 ; "level = WARN" logs neither. (Recommended for production systems.)
859 786 class = StreamHandler
860 787 args = (sys.stderr, )
861 788 level = WARN
862 789 ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json'
863 790 ; This allows sending properly formatted logs to grafana loki or elasticsearch
864 791 formatter = generic
865 792
866 793 ; ##########
867 794 ; FORMATTERS
868 795 ; ##########
869 796
870 797 [formatter_generic]
871 798 class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter
872 799 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
873 800 datefmt = %Y-%m-%d %H:%M:%S
874 801
875 802 [formatter_color_formatter]
876 803 class = rhodecode.lib.logging_formatter.ColorFormatter
877 804 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
878 805 datefmt = %Y-%m-%d %H:%M:%S
879 806
880 807 [formatter_color_formatter_sql]
881 808 class = rhodecode.lib.logging_formatter.ColorFormatterSql
882 809 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
883 810 datefmt = %Y-%m-%d %H:%M:%S
884 811
885 812 [formatter_json]
886 813 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
887 814 class = rhodecode.lib._vendor.jsonlogger.JsonFormatter
General Comments 0
You need to be logged in to leave comments. Login now