##// END OF EJS Templates
fix(configs): optimize configs for 5.0.0 release defaults...
super-admin -
r5295:7d8e7465 default
parent child Browse files
Show More
@@ -1,863 +1,865 b''
1
2 1
3 2 ; #########################################
4 3 ; RHODECODE COMMUNITY EDITION CONFIGURATION
5 4 ; #########################################
6 5
7 6 [DEFAULT]
8 7 ; Debug flag sets all loggers to debug, and enables request tracking
9 8 debug = true
10 9
11 10 ; ########################################################################
12 11 ; EMAIL CONFIGURATION
13 12 ; These settings will be used by the RhodeCode mailing system
14 13 ; ########################################################################
15 14
16 15 ; prefix all emails subjects with given prefix, helps filtering out emails
17 16 #email_prefix = [RhodeCode]
18 17
19 18 ; email FROM address all mails will be sent
20 19 #app_email_from = rhodecode-noreply@localhost
21 20
22 21 #smtp_server = mail.server.com
23 22 #smtp_username =
24 23 #smtp_password =
25 24 #smtp_port =
26 25 #smtp_use_tls = false
27 26 #smtp_use_ssl = true
28 27
29 28 [server:main]
30 29 ; COMMON HOST/IP CONFIG, This applies mostly to develop setup,
31 30 ; Host port for gunicorn are controlled by gunicorn_conf.py
32 31 host = 127.0.0.1
33 32 port = 10020
34 33
35 34 ; ##################################################
36 35 ; WAITRESS WSGI SERVER - Recommended for Development
37 36 ; ##################################################
38 37
39 38 ; use server type
40 39 use = egg:waitress#main
41 40
42 41 ; number of worker threads
43 42 threads = 5
44 43
45 44 ; MAX BODY SIZE 100GB
46 45 max_request_body_size = 107374182400
47 46
48 47 ; Use poll instead of select, fixes file descriptors limits problems.
49 48 ; May not work on old windows systems.
50 49 asyncore_use_poll = true
51 50
52 51
53 52 ; ###########################
54 53 ; GUNICORN APPLICATION SERVER
55 54 ; ###########################
56 55
57 56 ; run with gunicorn --paste rhodecode.ini --config gunicorn_conf.py
58 57
59 58 ; Module to use, this setting shouldn't be changed
60 59 #use = egg:gunicorn#main
61 60
62 61 ; Prefix middleware for RhodeCode.
63 62 ; recommended when using proxy setup.
64 63 ; allows to set RhodeCode under a prefix in server.
65 64 ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well.
66 65 ; And set your prefix like: `prefix = /custom_prefix`
67 66 ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need
68 67 ; to make your cookies only work on prefix url
69 68 [filter:proxy-prefix]
70 69 use = egg:PasteDeploy#prefix
71 70 prefix = /
72 71
73 72 [app:main]
74 73 ; The %(here)s variable will be replaced with the absolute path of parent directory
75 74 ; of this file
76 75 ; Each option in the app:main can be override by an environmental variable
77 76 ;
78 77 ;To override an option:
79 78 ;
80 79 ;RC_<KeyName>
81 80 ;Everything should be uppercase, . and - should be replaced by _.
82 81 ;For example, if you have these configuration settings:
83 82 ;rc_cache.repo_object.backend = foo
84 83 ;can be overridden by
85 84 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
86 85
87 86 use = egg:rhodecode-enterprise-ce
88 87
89 88 ; enable proxy prefix middleware, defined above
90 89 #filter-with = proxy-prefix
91 90
92 91 ; #############
93 92 ; DEBUG OPTIONS
94 93 ; #############
95 94
96 95 pyramid.reload_templates = true
97 96
98 97 # During development the we want to have the debug toolbar enabled
99 98 pyramid.includes =
100 99 pyramid_debugtoolbar
101 100
102 101 debugtoolbar.hosts = 0.0.0.0/0
103 102 debugtoolbar.exclude_prefixes =
104 103 /css
105 104 /fonts
106 105 /images
107 106 /js
108 107
109 108 ## RHODECODE PLUGINS ##
110 109 rhodecode.includes =
111 110 rhodecode.api
112 111
113 112
114 113 # api prefix url
115 114 rhodecode.api.url = /_admin/api
116 115
117 116 ; enable debug style page
118 117 debug_style = true
119 118
120 119 ; #################
121 120 ; END DEBUG OPTIONS
122 121 ; #################
123 122
124 123 ; encryption key used to encrypt social plugin tokens,
125 124 ; remote_urls with credentials etc, if not set it defaults to
126 125 ; `beaker.session.secret`
127 126 #rhodecode.encrypted_values.secret =
128 127
129 128 ; decryption strict mode (enabled by default). It controls if decryption raises
130 129 ; `SignatureVerificationError` in case of wrong key, or damaged encryption data.
131 130 #rhodecode.encrypted_values.strict = false
132 131
133 132 ; Pick algorithm for encryption. Either fernet (more secure) or aes (default)
134 133 ; fernet is safer, and we strongly recommend switching to it.
135 134 ; Due to backward compatibility aes is used as default.
136 135 #rhodecode.encrypted_values.algorithm = fernet
137 136
138 137 ; Return gzipped responses from RhodeCode (static files/application)
139 138 gzip_responses = false
140 139
141 140 ; Auto-generate javascript routes file on startup
142 141 generate_js_files = false
143 142
144 143 ; System global default language.
145 144 ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh
146 145 lang = en
147 146
148 147 ; Perform a full repository scan and import on each server start.
149 148 ; Settings this to true could lead to very long startup time.
150 149 startup.import_repos = false
151 150
152 151 ; URL at which the application is running. This is used for Bootstrapping
153 152 ; requests in context when no web request is available. Used in ishell, or
154 153 ; SSH calls. Set this for events to receive proper url for SSH calls.
155 154 app.base_url = http://rhodecode.local
156 155
157 156 ; Unique application ID. Should be a random unique string for security.
158 157 app_instance_uuid = rc-production
159 158
160 159 ; Cut off limit for large diffs (size in bytes). If overall diff size on
161 160 ; commit, or pull request exceeds this limit this diff will be displayed
162 161 ; partially. E.g 512000 == 512Kb
163 162 cut_off_limit_diff = 512000
164 163
165 164 ; Cut off limit for large files inside diffs (size in bytes). Each individual
166 165 ; file inside diff which exceeds this limit will be displayed partially.
167 166 ; E.g 128000 == 128Kb
168 167 cut_off_limit_file = 128000
169 168
170 169 ; Use cached version of vcs repositories everywhere. Recommended to be `true`
171 170 vcs_full_cache = true
172 171
173 172 ; Force https in RhodeCode, fixes https redirects, assumes it's always https.
174 173 ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache
175 174 force_https = false
176 175
177 176 ; use Strict-Transport-Security headers
178 177 use_htsts = false
179 178
180 179 ; Set to true if your repos are exposed using the dumb protocol
181 180 git_update_server_info = false
182 181
183 182 ; RSS/ATOM feed options
184 183 rss_cut_off_limit = 256000
185 184 rss_items_per_page = 10
186 185 rss_include_diff = false
187 186
188 187 ; gist URL alias, used to create nicer urls for gist. This should be an
189 188 ; url that does rewrites to _admin/gists/{gistid}.
190 189 ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal
191 190 ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid}
192 191 gist_alias_url =
193 192
194 193 ; List of views (using glob pattern syntax) that AUTH TOKENS could be
195 194 ; used for access.
196 195 ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it
197 196 ; came from the the logged in user who own this authentication token.
198 197 ; Additionally @TOKEN syntax can be used to bound the view to specific
199 198 ; authentication token. Such view would be only accessible when used together
200 199 ; with this authentication token
201 200 ; list of all views can be found under `/_admin/permissions/auth_token_access`
202 201 ; The list should be "," separated and on a single line.
203 202 ; Most common views to enable:
204 203
205 204 # RepoCommitsView:repo_commit_download
206 205 # RepoCommitsView:repo_commit_patch
207 206 # RepoCommitsView:repo_commit_raw
208 207 # RepoCommitsView:repo_commit_raw@TOKEN
209 208 # RepoFilesView:repo_files_diff
210 209 # RepoFilesView:repo_archivefile
211 210 # RepoFilesView:repo_file_raw
212 211 # GistView:*
213 212 api_access_controllers_whitelist =
214 213
215 214 ; Default encoding used to convert from and to unicode
216 215 ; can be also a comma separated list of encoding in case of mixed encodings
217 216 default_encoding = UTF-8
218 217
219 218 ; instance-id prefix
220 219 ; a prefix key for this instance used for cache invalidation when running
221 220 ; multiple instances of RhodeCode, make sure it's globally unique for
222 221 ; all running RhodeCode instances. Leave empty if you don't use it
223 222 instance_id =
224 223
225 224 ; Fallback authentication plugin. Set this to a plugin ID to force the usage
226 225 ; of an authentication plugin also if it is disabled by it's settings.
227 226 ; This could be useful if you are unable to log in to the system due to broken
228 227 ; authentication settings. Then you can enable e.g. the internal RhodeCode auth
229 228 ; module to log in again and fix the settings.
230 229 ; Available builtin plugin IDs (hash is part of the ID):
231 230 ; egg:rhodecode-enterprise-ce#rhodecode
232 231 ; egg:rhodecode-enterprise-ce#pam
233 232 ; egg:rhodecode-enterprise-ce#ldap
234 233 ; egg:rhodecode-enterprise-ce#jasig_cas
235 234 ; egg:rhodecode-enterprise-ce#headers
236 235 ; egg:rhodecode-enterprise-ce#crowd
237 236
238 237 #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode
239 238
240 239 ; Flag to control loading of legacy plugins in py:/path format
241 240 auth_plugin.import_legacy_plugins = true
242 241
243 242 ; alternative return HTTP header for failed authentication. Default HTTP
244 243 ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with
245 244 ; handling that causing a series of failed authentication calls.
246 245 ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code
247 246 ; This will be served instead of default 401 on bad authentication
248 247 auth_ret_code =
249 248
250 249 ; use special detection method when serving auth_ret_code, instead of serving
251 250 ; ret_code directly, use 401 initially (Which triggers credentials prompt)
252 251 ; and then serve auth_ret_code to clients
253 252 auth_ret_code_detection = false
254 253
255 254 ; locking return code. When repository is locked return this HTTP code. 2XX
256 255 ; codes don't break the transactions while 4XX codes do
257 256 lock_ret_code = 423
258 257
259 258 ; allows to change the repository location in settings page
260 259 allow_repo_location_change = true
261 260
262 261 ; allows to setup custom hooks in settings page
263 262 allow_custom_hooks_settings = true
264 263
265 264 ; Generated license token required for EE edition license.
266 265 ; New generated token value can be found in Admin > settings > license page.
267 266 license_token =
268 267
269 268 ; This flag hides sensitive information on the license page such as token, and license data
270 269 license.hide_license_info = false
271 270
272 271 ; supervisor connection uri, for managing supervisor and logs.
273 272 supervisor.uri =
274 273
275 274 ; supervisord group name/id we only want this RC instance to handle
276 275 supervisor.group_id = dev
277 276
278 277 ; Display extended labs settings
279 278 labs_settings_active = true
280 279
281 280 ; Custom exception store path, defaults to TMPDIR
282 281 ; This is used to store exception from RhodeCode in shared directory
283 282 #exception_tracker.store_path =
284 283
285 284 ; Send email with exception details when it happens
286 285 #exception_tracker.send_email = false
287 286
288 287 ; Comma separated list of recipients for exception emails,
289 288 ; e.g admin@rhodecode.com,devops@rhodecode.com
290 289 ; Can be left empty, then emails will be sent to ALL super-admins
291 290 #exception_tracker.send_email_recipients =
292 291
293 292 ; optional prefix to Add to email Subject
294 293 #exception_tracker.email_prefix = [RHODECODE ERROR]
295 294
296 295 ; File store configuration. This is used to store and serve uploaded files
297 296 file_store.enabled = true
298 297
299 298 ; Storage backend, available options are: local
300 299 file_store.backend = local
301 300
302 301 ; path to store the uploaded binaries
303 302 file_store.storage_path = %(here)s/data/file_store
304 303
305 304 ; Uncomment and set this path to control settings for archive download cache.
306 305 ; Generated repo archives will be cached at this location
307 306 ; and served from the cache during subsequent requests for the same archive of
308 307 ; the repository. This path is important to be shared across filesystems and with
309 308 ; RhodeCode and vcsserver
310 309
311 310 ; Default is $cache_dir/archive_cache if not set
312 311 archive_cache.store_dir = %(here)s/data/archive_cache
313 312
314 313 ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb
315 314 archive_cache.cache_size_gb = 10
316 315
317 316 ; By default cache uses sharding technique, this specifies how many shards are there
318 317 archive_cache.cache_shards = 10
319 318
320 319 ; #############
321 320 ; CELERY CONFIG
322 321 ; #############
323 322
324 323 ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini
325 324
326 325 use_celery = false
327 326
328 327 ; path to store schedule database
329 328 #celerybeat-schedule.path =
330 329
331 330 ; connection url to the message broker (default redis)
332 celery.broker_url = redis://localhost:6379/8
331 celery.broker_url = redis://redis:6379/8
332
333 ; results backend to get results for (default redis)
334 celery.result_backend = redis://redis:6379/8
333 335
334 336 ; rabbitmq example
335 337 #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost
336 338
337 339 ; maximum tasks to execute before worker restart
338 340 celery.max_tasks_per_child = 20
339 341
340 342 ; tasks will never be sent to the queue, but executed locally instead.
341 343 celery.task_always_eager = false
342 344
343 345 ; #############
344 346 ; DOGPILE CACHE
345 347 ; #############
346 348
347 349 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
348 350 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
349 351 cache_dir = %(here)s/data
350 352
351 353 ; *********************************************
352 354 ; `sql_cache_short` cache for heavy SQL queries
353 355 ; Only supported backend is `memory_lru`
354 356 ; *********************************************
355 357 rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru
356 358 rc_cache.sql_cache_short.expiration_time = 30
357 359
358 360
359 361 ; *****************************************************
360 362 ; `cache_repo_longterm` cache for repo object instances
361 363 ; Only supported backend is `memory_lru`
362 364 ; *****************************************************
363 365 rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru
364 366 ; by default we use 30 Days, cache is still invalidated on push
365 367 rc_cache.cache_repo_longterm.expiration_time = 2592000
366 368 ; max items in LRU cache, set to smaller number to save memory, and expire last used caches
367 369 rc_cache.cache_repo_longterm.max_size = 10000
368 370
369 371
370 372 ; *********************************************
371 373 ; `cache_general` cache for general purpose use
372 374 ; for simplicity use rc.file_namespace backend,
373 375 ; for performance and scale use rc.redis
374 376 ; *********************************************
375 377 rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace
376 378 rc_cache.cache_general.expiration_time = 43200
377 379 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
378 380 #rc_cache.cache_general.arguments.filename = /tmp/cache_general_db
379 381
380 382 ; alternative `cache_general` redis backend with distributed lock
381 383 #rc_cache.cache_general.backend = dogpile.cache.rc.redis
382 384 #rc_cache.cache_general.expiration_time = 300
383 385
384 386 ; redis_expiration_time needs to be greater then expiration_time
385 387 #rc_cache.cache_general.arguments.redis_expiration_time = 7200
386 388
387 389 #rc_cache.cache_general.arguments.host = localhost
388 390 #rc_cache.cache_general.arguments.port = 6379
389 391 #rc_cache.cache_general.arguments.db = 0
390 392 #rc_cache.cache_general.arguments.socket_timeout = 30
391 393 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
392 394 #rc_cache.cache_general.arguments.distributed_lock = true
393 395
394 396 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
395 397 #rc_cache.cache_general.arguments.lock_auto_renewal = true
396 398
397 399 ; *************************************************
398 400 ; `cache_perms` cache for permission tree, auth TTL
399 401 ; for simplicity use rc.file_namespace backend,
400 402 ; for performance and scale use rc.redis
401 403 ; *************************************************
402 404 rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace
403 405 rc_cache.cache_perms.expiration_time = 3600
404 406 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
405 407 #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms_db
406 408
407 409 ; alternative `cache_perms` redis backend with distributed lock
408 410 #rc_cache.cache_perms.backend = dogpile.cache.rc.redis
409 411 #rc_cache.cache_perms.expiration_time = 300
410 412
411 413 ; redis_expiration_time needs to be greater then expiration_time
412 414 #rc_cache.cache_perms.arguments.redis_expiration_time = 7200
413 415
414 416 #rc_cache.cache_perms.arguments.host = localhost
415 417 #rc_cache.cache_perms.arguments.port = 6379
416 418 #rc_cache.cache_perms.arguments.db = 0
417 419 #rc_cache.cache_perms.arguments.socket_timeout = 30
418 420 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
419 421 #rc_cache.cache_perms.arguments.distributed_lock = true
420 422
421 423 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
422 424 #rc_cache.cache_perms.arguments.lock_auto_renewal = true
423 425
424 426 ; ***************************************************
425 427 ; `cache_repo` cache for file tree, Readme, RSS FEEDS
426 428 ; for simplicity use rc.file_namespace backend,
427 429 ; for performance and scale use rc.redis
428 430 ; ***************************************************
429 431 rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace
430 432 rc_cache.cache_repo.expiration_time = 2592000
431 433 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
432 434 #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo_db
433 435
434 436 ; alternative `cache_repo` redis backend with distributed lock
435 437 #rc_cache.cache_repo.backend = dogpile.cache.rc.redis
436 438 #rc_cache.cache_repo.expiration_time = 2592000
437 439
438 440 ; redis_expiration_time needs to be greater then expiration_time
439 441 #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400
440 442
441 443 #rc_cache.cache_repo.arguments.host = localhost
442 444 #rc_cache.cache_repo.arguments.port = 6379
443 445 #rc_cache.cache_repo.arguments.db = 1
444 446 #rc_cache.cache_repo.arguments.socket_timeout = 30
445 447 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
446 448 #rc_cache.cache_repo.arguments.distributed_lock = true
447 449
448 450 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
449 451 #rc_cache.cache_repo.arguments.lock_auto_renewal = true
450 452
451 453 ; ##############
452 454 ; BEAKER SESSION
453 455 ; ##############
454 456
455 457 ; beaker.session.type is type of storage options for the logged users sessions. Current allowed
456 458 ; types are file, ext:redis, ext:database, ext:memcached
457 459 ; Fastest ones are ext:redis and ext:database, DO NOT use memory type for session
458 460 beaker.session.type = file
459 461 beaker.session.data_dir = %(here)s/data/sessions
460 462
461 463 ; Redis based sessions
462 464 #beaker.session.type = ext:redis
463 465 #beaker.session.url = redis://127.0.0.1:6379/2
464 466
465 467 ; DB based session, fast, and allows easy management over logged in users
466 468 #beaker.session.type = ext:database
467 469 #beaker.session.table_name = db_session
468 470 #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode
469 471 #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode
470 472 #beaker.session.sa.pool_recycle = 3600
471 473 #beaker.session.sa.echo = false
472 474
473 475 beaker.session.key = rhodecode
474 476 beaker.session.secret = develop-rc-uytcxaz
475 477 beaker.session.lock_dir = %(here)s/data/sessions/lock
476 478
477 479 ; Secure encrypted cookie. Requires AES and AES python libraries
478 480 ; you must disable beaker.session.secret to use this
479 481 #beaker.session.encrypt_key = key_for_encryption
480 482 #beaker.session.validate_key = validation_key
481 483
482 484 ; Sets session as invalid (also logging out user) if it haven not been
483 485 ; accessed for given amount of time in seconds
484 486 beaker.session.timeout = 2592000
485 487 beaker.session.httponly = true
486 488
487 489 ; Path to use for the cookie. Set to prefix if you use prefix middleware
488 490 #beaker.session.cookie_path = /custom_prefix
489 491
490 492 ; Set https secure cookie
491 493 beaker.session.secure = false
492 494
493 495 ; default cookie expiration time in seconds, set to `true` to set expire
494 496 ; at browser close
495 497 #beaker.session.cookie_expires = 3600
496 498
497 499 ; #############################
498 500 ; SEARCH INDEXING CONFIGURATION
499 501 ; #############################
500 502
501 503 ; Full text search indexer is available in rhodecode-tools under
502 504 ; `rhodecode-tools index` command
503 505
504 506 ; WHOOSH Backend, doesn't require additional services to run
505 507 ; it works good with few dozen repos
506 508 search.module = rhodecode.lib.index.whoosh
507 509 search.location = %(here)s/data/index
508 510
509 511 ; ####################
510 512 ; CHANNELSTREAM CONFIG
511 513 ; ####################
512 514
513 515 ; channelstream enables persistent connections and live notification
514 516 ; in the system. It's also used by the chat system
515 517
516 518 channelstream.enabled = false
517 519
518 520 ; server address for channelstream server on the backend
519 521 channelstream.server = 127.0.0.1:9800
520 522
521 523 ; location of the channelstream server from outside world
522 524 ; use ws:// for http or wss:// for https. This address needs to be handled
523 525 ; by external HTTP server such as Nginx or Apache
524 526 ; see Nginx/Apache configuration examples in our docs
525 527 channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream
526 528 channelstream.secret = secret
527 529 channelstream.history.location = %(here)s/channelstream_history
528 530
529 531 ; Internal application path that Javascript uses to connect into.
530 532 ; If you use proxy-prefix the prefix should be added before /_channelstream
531 533 channelstream.proxy_path = /_channelstream
532 534
533 535
534 536 ; ##############################
535 537 ; MAIN RHODECODE DATABASE CONFIG
536 538 ; ##############################
537 539
538 540 #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
539 541 #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
540 542 #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8
541 543 ; pymysql is an alternative driver for MySQL, use in case of problems with default one
542 544 #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode
543 545
544 546 sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
545 547
546 548 ; see sqlalchemy docs for other advanced settings
547 549 ; print the sql statements to output
548 550 sqlalchemy.db1.echo = false
549 551
550 552 ; recycle the connections after this amount of seconds
551 553 sqlalchemy.db1.pool_recycle = 3600
552 554
553 555 ; the number of connections to keep open inside the connection pool.
554 556 ; 0 indicates no limit
555 557 ; the general calculus with gevent is:
556 558 ; if your system allows 500 concurrent greenlets (max_connections) that all do database access,
557 559 ; then increase pool size + max overflow so that they add up to 500.
558 560 #sqlalchemy.db1.pool_size = 5
559 561
560 562 ; The number of connections to allow in connection pool "overflow", that is
561 563 ; connections that can be opened above and beyond the pool_size setting,
562 564 ; which defaults to five.
563 565 #sqlalchemy.db1.max_overflow = 10
564 566
565 567 ; Connection check ping, used to detect broken database connections
566 568 ; could be enabled to better handle cases if MySQL has gone away errors
567 569 #sqlalchemy.db1.ping_connection = true
568 570
569 571 ; ##########
570 572 ; VCS CONFIG
571 573 ; ##########
572 574 vcs.server.enable = true
573 575 vcs.server = localhost:9900
574 576
575 577 ; Web server connectivity protocol, responsible for web based VCS operations
576 578 ; Available protocols are:
577 579 ; `http` - use http-rpc backend (default)
578 580 vcs.server.protocol = http
579 581
580 582 ; Push/Pull operations protocol, available options are:
581 583 ; `http` - use http-rpc backend (default)
582 584 vcs.scm_app_implementation = http
583 585
584 586 ; Push/Pull operations hooks protocol, available options are:
585 587 ; `http` - use http-rpc backend (default)
586 588 vcs.hooks.protocol = http
587 589
588 590 ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be
589 591 ; accessible via network.
590 592 ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker)
591 593 vcs.hooks.host = *
592 594
593 595 ; Start VCSServer with this instance as a subprocess, useful for development
594 596 vcs.start_server = false
595 597
596 598 ; List of enabled VCS backends, available options are:
597 599 ; `hg` - mercurial
598 600 ; `git` - git
599 601 ; `svn` - subversion
600 602 vcs.backends = hg, git, svn
601 603
602 604 ; Wait this number of seconds before killing connection to the vcsserver
603 605 vcs.connection_timeout = 3600
604 606
605 607 ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out.
606 608 ; Set a numeric version for your current SVN e.g 1.8, or 1.12
607 609 ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible
608 610 #vcs.svn.compatible_version = 1.8
609 611
610 612 ; Cache flag to cache vcsserver remote calls locally
611 613 ; It uses cache_region `cache_repo`
612 614 vcs.methods.cache = true
613 615
614 616 ; ####################################################
615 617 ; Subversion proxy support (mod_dav_svn)
616 618 ; Maps RhodeCode repo groups into SVN paths for Apache
617 619 ; ####################################################
618 620
619 621 ; Enable or disable the config file generation.
620 622 svn.proxy.generate_config = false
621 623
622 624 ; Generate config file with `SVNListParentPath` set to `On`.
623 625 svn.proxy.list_parent_path = true
624 626
625 627 ; Set location and file name of generated config file.
626 628 svn.proxy.config_file_path = %(here)s/mod_dav_svn.conf
627 629
628 630 ; alternative mod_dav config template. This needs to be a valid mako template
629 631 ; Example template can be found in the source code:
630 632 ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako
631 633 #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako
632 634
633 635 ; Used as a prefix to the `Location` block in the generated config file.
634 636 ; In most cases it should be set to `/`.
635 637 svn.proxy.location_root = /
636 638
637 639 ; Command to reload the mod dav svn configuration on change.
638 640 ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh
639 641 ; Make sure user who runs RhodeCode process is allowed to reload Apache
640 642 #svn.proxy.reload_cmd = /etc/init.d/apache2 reload
641 643
642 644 ; If the timeout expires before the reload command finishes, the command will
643 645 ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds.
644 646 #svn.proxy.reload_timeout = 10
645 647
646 648 ; ####################
647 649 ; SSH Support Settings
648 650 ; ####################
649 651
650 652 ; Defines if a custom authorized_keys file should be created and written on
651 653 ; any change user ssh keys. Setting this to false also disables possibility
652 654 ; of adding SSH keys by users from web interface. Super admins can still
653 655 ; manage SSH Keys.
654 656 ssh.generate_authorized_keyfile = false
655 657
656 658 ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding`
657 659 # ssh.authorized_keys_ssh_opts =
658 660
659 661 ; Path to the authorized_keys file where the generate entries are placed.
660 662 ; It is possible to have multiple key files specified in `sshd_config` e.g.
661 663 ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode
662 664 ssh.authorized_keys_file_path = ~/.ssh/authorized_keys_rhodecode
663 665
664 666 ; Command to execute the SSH wrapper. The binary is available in the
665 667 ; RhodeCode installation directory.
666 668 ; e.g ~/.rccontrol/community-1/profile/bin/rc-ssh-wrapper
667 669 ssh.wrapper_cmd = ~/.rccontrol/community-1/rc-ssh-wrapper
668 670
669 671 ; Allow shell when executing the ssh-wrapper command
670 672 ssh.wrapper_cmd_allow_shell = false
671 673
672 674 ; Enables logging, and detailed output send back to the client during SSH
673 675 ; operations. Useful for debugging, shouldn't be used in production.
674 676 ssh.enable_debug_logging = true
675 677
676 678 ; Paths to binary executable, by default they are the names, but we can
677 679 ; override them if we want to use a custom one
678 680 ssh.executable.hg = ~/.rccontrol/vcsserver-1/profile/bin/hg
679 681 ssh.executable.git = ~/.rccontrol/vcsserver-1/profile/bin/git
680 682 ssh.executable.svn = ~/.rccontrol/vcsserver-1/profile/bin/svnserve
681 683
682 684 ; Enables SSH key generator web interface. Disabling this still allows users
683 685 ; to add their own keys.
684 686 ssh.enable_ui_key_generator = true
685 687
686 688
687 689 ; #################
688 690 ; APPENLIGHT CONFIG
689 691 ; #################
690 692
691 693 ; Appenlight is tailored to work with RhodeCode, see
692 694 ; http://appenlight.rhodecode.com for details how to obtain an account
693 695
694 696 ; Appenlight integration enabled
695 697 #appenlight = false
696 698
697 699 #appenlight.server_url = https://api.appenlight.com
698 700 #appenlight.api_key = YOUR_API_KEY
699 701 #appenlight.transport_config = https://api.appenlight.com?threaded=1&timeout=5
700 702
701 703 ; used for JS client
702 704 #appenlight.api_public_key = YOUR_API_PUBLIC_KEY
703 705
704 706 ; TWEAK AMOUNT OF INFO SENT HERE
705 707
706 708 ; enables 404 error logging (default False)
707 709 #appenlight.report_404 = false
708 710
709 711 ; time in seconds after request is considered being slow (default 1)
710 712 #appenlight.slow_request_time = 1
711 713
712 714 ; record slow requests in application
713 715 ; (needs to be enabled for slow datastore recording and time tracking)
714 716 #appenlight.slow_requests = true
715 717
716 718 ; enable hooking to application loggers
717 719 #appenlight.logging = true
718 720
719 721 ; minimum log level for log capture
720 722 #ppenlight.logging.level = WARNING
721 723
722 724 ; send logs only from erroneous/slow requests
723 725 ; (saves API quota for intensive logging)
724 726 #appenlight.logging_on_error = false
725 727
726 728 ; list of additional keywords that should be grabbed from environ object
727 729 ; can be string with comma separated list of words in lowercase
728 730 ; (by default client will always send following info:
729 731 ; 'REMOTE_USER', 'REMOTE_ADDR', 'SERVER_NAME', 'CONTENT_TYPE' + all keys that
730 732 ; start with HTTP* this list be extended with additional keywords here
731 733 #appenlight.environ_keys_whitelist =
732 734
733 735 ; list of keywords that should be blanked from request object
734 736 ; can be string with comma separated list of words in lowercase
735 737 ; (by default client will always blank keys that contain following words
736 738 ; 'password', 'passwd', 'pwd', 'auth_tkt', 'secret', 'csrf'
737 739 ; this list be extended with additional keywords set here
738 740 #appenlight.request_keys_blacklist =
739 741
740 742 ; list of namespaces that should be ignores when gathering log entries
741 743 ; can be string with comma separated list of namespaces
742 744 ; (by default the client ignores own entries: appenlight_client.client)
743 745 #appenlight.log_namespace_blacklist =
744 746
745 747 ; Statsd client config, this is used to send metrics to statsd
746 748 ; We recommend setting statsd_exported and scrape them using Prometheus
747 749 #statsd.enabled = false
748 750 #statsd.statsd_host = 0.0.0.0
749 751 #statsd.statsd_port = 8125
750 752 #statsd.statsd_prefix =
751 753 #statsd.statsd_ipv6 = false
752 754
753 755 ; configure logging automatically at server startup set to false
754 756 ; to use the below custom logging config.
755 757 ; RC_LOGGING_FORMATTER
756 758 ; RC_LOGGING_LEVEL
757 759 ; env variables can control the settings for logging in case of autoconfigure
758 760
759 761 #logging.autoconfigure = true
760 762
761 763 ; specify your own custom logging config file to configure logging
762 764 #logging.logging_conf_file = /path/to/custom_logging.ini
763 765
764 766 ; Dummy marker to add new entries after.
765 767 ; Add any custom entries below. Please don't remove this marker.
766 768 custom.conf = 1
767 769
768 770
769 771 ; #####################
770 772 ; LOGGING CONFIGURATION
771 773 ; #####################
772 774
773 775 [loggers]
774 776 keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper
775 777
776 778 [handlers]
777 779 keys = console, console_sql
778 780
779 781 [formatters]
780 782 keys = generic, json, color_formatter, color_formatter_sql
781 783
782 784 ; #######
783 785 ; LOGGERS
784 786 ; #######
785 787 [logger_root]
786 788 level = NOTSET
787 789 handlers = console
788 790
789 791 [logger_sqlalchemy]
790 792 level = INFO
791 793 handlers = console_sql
792 794 qualname = sqlalchemy.engine
793 795 propagate = 0
794 796
795 797 [logger_beaker]
796 798 level = DEBUG
797 799 handlers =
798 800 qualname = beaker.container
799 801 propagate = 1
800 802
801 803 [logger_rhodecode]
802 804 level = DEBUG
803 805 handlers =
804 806 qualname = rhodecode
805 807 propagate = 1
806 808
807 809 [logger_ssh_wrapper]
808 810 level = DEBUG
809 811 handlers =
810 812 qualname = ssh_wrapper
811 813 propagate = 1
812 814
813 815 [logger_celery]
814 816 level = DEBUG
815 817 handlers =
816 818 qualname = celery
817 819
818 820
819 821 ; ########
820 822 ; HANDLERS
821 823 ; ########
822 824
823 825 [handler_console]
824 826 class = StreamHandler
825 827 args = (sys.stderr, )
826 828 level = DEBUG
827 829 ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json'
828 830 ; This allows sending properly formatted logs to grafana loki or elasticsearch
829 831 formatter = color_formatter
830 832
831 833 [handler_console_sql]
832 834 ; "level = DEBUG" logs SQL queries and results.
833 835 ; "level = INFO" logs SQL queries.
834 836 ; "level = WARN" logs neither. (Recommended for production systems.)
835 837 class = StreamHandler
836 838 args = (sys.stderr, )
837 839 level = WARN
838 840 ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json'
839 841 ; This allows sending properly formatted logs to grafana loki or elasticsearch
840 842 formatter = color_formatter_sql
841 843
842 844 ; ##########
843 845 ; FORMATTERS
844 846 ; ##########
845 847
846 848 [formatter_generic]
847 849 class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter
848 850 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
849 851 datefmt = %Y-%m-%d %H:%M:%S
850 852
851 853 [formatter_color_formatter]
852 854 class = rhodecode.lib.logging_formatter.ColorFormatter
853 855 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
854 856 datefmt = %Y-%m-%d %H:%M:%S
855 857
856 858 [formatter_color_formatter_sql]
857 859 class = rhodecode.lib.logging_formatter.ColorFormatterSql
858 860 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
859 861 datefmt = %Y-%m-%d %H:%M:%S
860 862
861 863 [formatter_json]
862 864 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
863 865 class = rhodecode.lib._vendor.jsonlogger.JsonFormatter
@@ -1,518 +1,520 b''
1 1 """
2 2 Gunicorn config extension and hooks. This config file adds some extra settings and memory management.
3 3 Gunicorn configuration should be managed by .ini files entries of RhodeCode or VCSServer
4 4 """
5 5
6 6 import gc
7 7 import os
8 8 import sys
9 9 import math
10 10 import time
11 11 import threading
12 12 import traceback
13 13 import random
14 14 import socket
15 15 import dataclasses
16 16 from gunicorn.glogging import Logger
17 17
18 18
19 19 def get_workers():
20 20 import multiprocessing
21 21 return multiprocessing.cpu_count() * 2 + 1
22 22
23 23
24 24 bind = "127.0.0.1:10020"
25 25
26 26
27 27 # Error logging output for gunicorn (-) is stdout
28 28 errorlog = '-'
29 29
30 30 # Access logging output for gunicorn (-) is stdout
31 31 accesslog = '-'
32 32
33 33
34 34 # SERVER MECHANICS
35 35 # None == system temp dir
36 36 # worker_tmp_dir is recommended to be set to some tmpfs
37 37 worker_tmp_dir = None
38 38 tmp_upload_dir = None
39 39
40 40 # use re-use port logic
41 41 #reuse_port = True
42 42
43 43 # Custom log format
44 44 #access_log_format = (
45 45 # '%(t)s %(p)s INFO [GNCRN] %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"')
46 46
47 47 # loki format for easier parsing in grafana
48 48 access_log_format = (
49 49 'time="%(t)s" pid=%(p)s level="INFO" type="[GNCRN]" ip="%(h)-15s" rqt="%(L)s" response_code="%(s)s" response_bytes="%(b)-6s" uri="%(m)s:%(U)s %(q)s" user=":%(u)s" user_agent="%(a)s"')
50 50
51
52 # Sets the number of process workers. More workers means more concurrent connections
53 # RhodeCode can handle at the same time. Each additional worker also it increases
54 # memory usage as each has its own set of caches.
55 # The Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
56 # than 8-10 unless for huge deployments .e.g 700-1000 users.
57 # `instance_id = *` must be set in the [app:main] section below (which is the default)
58 # when using more than 1 worker.
59 workers = 4
60
61 51 # self adjust workers based on CPU count, to use maximum of CPU and not overquota the resources
62 52 # workers = get_workers()
63 53
64 54 # Gunicorn access log level
65 55 loglevel = 'info'
66 56
67 57 # Process name visible in a process list
68 58 proc_name = 'rhodecode_enterprise'
69 59
70 60 # Type of worker class, one of `sync`, `gevent` or `gthread`
71 61 # currently `sync` is the only option allowed for vcsserver and for rhodecode all of 3 are allowed
72 62 # gevent:
73 63 # In this case, the maximum number of concurrent requests is (N workers * X worker_connections)
74 64 # e.g. workers =3 worker_connections=10 = 3*10, 30 concurrent requests can be handled
75 # gtrhead:
65 # gthread:
76 66 # In this case, the maximum number of concurrent requests is (N workers * X threads)
77 67 # e.g. workers = 3 threads=3 = 3*3, 9 concurrent requests can be handled
78 worker_class = 'gevent'
68 worker_class = 'gthread'
69
70 # Sets the number of process workers. More workers means more concurrent connections
71 # RhodeCode can handle at the same time. Each additional worker also it increases
72 # memory usage as each has its own set of caches.
73 # The Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
74 # than 8-10 unless for huge deployments .e.g 700-1000 users.
75 # `instance_id = *` must be set in the [app:main] section below (which is the default)
76 # when using more than 1 worker.
77 workers = 2
78
79 # Threads numbers for worker class gthread
80 threads = 1
79 81
80 82 # The maximum number of simultaneous clients. Valid only for gevent
81 83 # In this case, the maximum number of concurrent requests is (N workers * X worker_connections)
82 84 # e.g workers =3 worker_connections=10 = 3*10, 30 concurrent requests can be handled
83 85 worker_connections = 10
84 86
85 87 # Max number of requests that worker will handle before being gracefully restarted.
86 88 # Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
87 89 max_requests = 2000
88 90 max_requests_jitter = int(max_requests * 0.2) # 20% of max_requests
89 91
90 92 # The maximum number of pending connections.
91 93 # Exceeding this number results in the client getting an error when attempting to connect.
92 94 backlog = 64
93 95
94 96 # The Amount of time a worker can spend with handling a request before it
95 97 # gets killed and restarted. By default, set to 21600 (6hrs)
96 98 # Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
97 99 timeout = 21600
98 100
99 101 # The maximum size of HTTP request line in bytes.
100 102 # 0 for unlimited
101 103 limit_request_line = 0
102 104
103 105 # Limit the number of HTTP headers fields in a request.
104 106 # By default this value is 100 and can't be larger than 32768.
105 107 limit_request_fields = 32768
106 108
107 109 # Limit the allowed size of an HTTP request header field.
108 110 # Value is a positive number or 0.
109 111 # Setting it to 0 will allow unlimited header field sizes.
110 112 limit_request_field_size = 0
111 113
112 114 # Timeout for graceful workers restart.
113 115 # After receiving a restart signal, workers have this much time to finish
114 116 # serving requests. Workers still alive after the timeout (starting from the
115 117 # receipt of the restart signal) are force killed.
116 118 # Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
117 119 graceful_timeout = 21600
118 120
119 121 # The number of seconds to wait for requests on a Keep-Alive connection.
120 122 # Generally set in the 1-5 seconds range.
121 123 keepalive = 2
122 124
123 125 # Maximum memory usage that each worker can use before it will receive a
124 126 # graceful restart signal 0 = memory monitoring is disabled
125 127 # Examples: 268435456 (256MB), 536870912 (512MB)
126 128 # 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
127 129 # Dynamic formula 1024 * 1024 * 256 == 256MBs
128 130 memory_max_usage = 0
129 131
130 132 # How often in seconds to check for memory usage for each gunicorn worker
131 133 memory_usage_check_interval = 60
132 134
133 135 # Threshold value for which we don't recycle worker if GarbageCollection
134 136 # frees up enough resources. Before each restart, we try to run GC on worker
135 137 # in case we get enough free memory after that; restart will not happen.
136 138 memory_usage_recovery_threshold = 0.8
137 139
138 140
139 141 @dataclasses.dataclass
140 142 class MemoryCheckConfig:
141 143 max_usage: int
142 144 check_interval: int
143 145 recovery_threshold: float
144 146
145 147
146 148 def _get_process_rss(pid=None):
147 149 try:
148 150 import psutil
149 151 if pid:
150 152 proc = psutil.Process(pid)
151 153 else:
152 154 proc = psutil.Process()
153 155 return proc.memory_info().rss
154 156 except Exception:
155 157 return None
156 158
157 159
158 160 def _get_config(ini_path):
159 161 import configparser
160 162
161 163 try:
162 164 config = configparser.RawConfigParser()
163 165 config.read(ini_path)
164 166 return config
165 167 except Exception:
166 168 return None
167 169
168 170
169 171 def get_memory_usage_params(config=None):
170 172 # memory spec defaults
171 173 _memory_max_usage = memory_max_usage
172 174 _memory_usage_check_interval = memory_usage_check_interval
173 175 _memory_usage_recovery_threshold = memory_usage_recovery_threshold
174 176
175 177 if config:
176 178 ini_path = os.path.abspath(config)
177 179 conf = _get_config(ini_path)
178 180
179 181 section = 'server:main'
180 182 if conf and conf.has_section(section):
181 183
182 184 if conf.has_option(section, 'memory_max_usage'):
183 185 _memory_max_usage = conf.getint(section, 'memory_max_usage')
184 186
185 187 if conf.has_option(section, 'memory_usage_check_interval'):
186 188 _memory_usage_check_interval = conf.getint(section, 'memory_usage_check_interval')
187 189
188 190 if conf.has_option(section, 'memory_usage_recovery_threshold'):
189 191 _memory_usage_recovery_threshold = conf.getfloat(section, 'memory_usage_recovery_threshold')
190 192
191 193 _memory_max_usage = int(os.environ.get('RC_GUNICORN_MEMORY_MAX_USAGE', '')
192 194 or _memory_max_usage)
193 195 _memory_usage_check_interval = int(os.environ.get('RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL', '')
194 196 or _memory_usage_check_interval)
195 197 _memory_usage_recovery_threshold = float(os.environ.get('RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD', '')
196 198 or _memory_usage_recovery_threshold)
197 199
198 200 return MemoryCheckConfig(_memory_max_usage, _memory_usage_check_interval, _memory_usage_recovery_threshold)
199 201
200 202
201 203 def _time_with_offset(check_interval):
202 204 return time.time() - random.randint(0, check_interval/2.0)
203 205
204 206
205 207 def pre_fork(server, worker):
206 208 pass
207 209
208 210
209 211 def post_fork(server, worker):
210 212
211 213 memory_conf = get_memory_usage_params()
212 214 _memory_max_usage = memory_conf.max_usage
213 215 _memory_usage_check_interval = memory_conf.check_interval
214 216 _memory_usage_recovery_threshold = memory_conf.recovery_threshold
215 217
216 218 worker._memory_max_usage = int(os.environ.get('RC_GUNICORN_MEMORY_MAX_USAGE', '')
217 219 or _memory_max_usage)
218 220 worker._memory_usage_check_interval = int(os.environ.get('RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL', '')
219 221 or _memory_usage_check_interval)
220 222 worker._memory_usage_recovery_threshold = float(os.environ.get('RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD', '')
221 223 or _memory_usage_recovery_threshold)
222 224
223 225 # register memory last check time, with some random offset so we don't recycle all
224 226 # at once
225 227 worker._last_memory_check_time = _time_with_offset(_memory_usage_check_interval)
226 228
227 229 if _memory_max_usage:
228 230 server.log.info("pid=[%-10s] WORKER spawned with max memory set at %s", worker.pid,
229 231 _format_data_size(_memory_max_usage))
230 232 else:
231 233 server.log.info("pid=[%-10s] WORKER spawned", worker.pid)
232 234
233 235
234 236 def pre_exec(server):
235 237 server.log.info("Forked child, re-executing.")
236 238
237 239
238 240 def on_starting(server):
239 241 server_lbl = '{} {}'.format(server.proc_name, server.address)
240 242 server.log.info("Server %s is starting.", server_lbl)
241 243 server.log.info('Config:')
242 244 server.log.info(f"\n{server.cfg}")
243 245 server.log.info(get_memory_usage_params())
244 246
245 247
246 248 def when_ready(server):
247 249 server.log.info("Server %s is ready. Spawning workers", server)
248 250
249 251
250 252 def on_reload(server):
251 253 pass
252 254
253 255
254 256 def _format_data_size(size, unit="B", precision=1, binary=True):
255 257 """Format a number using SI units (kilo, mega, etc.).
256 258
257 259 ``size``: The number as a float or int.
258 260
259 261 ``unit``: The unit name in plural form. Examples: "bytes", "B".
260 262
261 263 ``precision``: How many digits to the right of the decimal point. Default
262 264 is 1. 0 suppresses the decimal point.
263 265
264 266 ``binary``: If false, use base-10 decimal prefixes (kilo = K = 1000).
265 267 If true, use base-2 binary prefixes (kibi = Ki = 1024).
266 268
267 269 ``full_name``: If false (default), use the prefix abbreviation ("k" or
268 270 "Ki"). If true, use the full prefix ("kilo" or "kibi"). If false,
269 271 use abbreviation ("k" or "Ki").
270 272
271 273 """
272 274
273 275 if not binary:
274 276 base = 1000
275 277 multiples = ('', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
276 278 else:
277 279 base = 1024
278 280 multiples = ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi')
279 281
280 282 sign = ""
281 283 if size > 0:
282 284 m = int(math.log(size, base))
283 285 elif size < 0:
284 286 sign = "-"
285 287 size = -size
286 288 m = int(math.log(size, base))
287 289 else:
288 290 m = 0
289 291 if m > 8:
290 292 m = 8
291 293
292 294 if m == 0:
293 295 precision = '%.0f'
294 296 else:
295 297 precision = '%%.%df' % precision
296 298
297 299 size = precision % (size / math.pow(base, m))
298 300
299 301 return '%s%s %s%s' % (sign, size.strip(), multiples[m], unit)
300 302
301 303
302 304 def _check_memory_usage(worker):
303 305 _memory_max_usage = worker._memory_max_usage
304 306 if not _memory_max_usage:
305 307 return
306 308
307 309 _memory_usage_check_interval = worker._memory_usage_check_interval
308 310 _memory_usage_recovery_threshold = memory_max_usage * worker._memory_usage_recovery_threshold
309 311
310 312 elapsed = time.time() - worker._last_memory_check_time
311 313 if elapsed > _memory_usage_check_interval:
312 314 mem_usage = _get_process_rss()
313 315 if mem_usage and mem_usage > _memory_max_usage:
314 316 worker.log.info(
315 317 "memory usage %s > %s, forcing gc",
316 318 _format_data_size(mem_usage), _format_data_size(_memory_max_usage))
317 319 # Try to clean it up by forcing a full collection.
318 320 gc.collect()
319 321 mem_usage = _get_process_rss()
320 322 if mem_usage > _memory_usage_recovery_threshold:
321 323 # Didn't clean up enough, we'll have to terminate.
322 324 worker.log.warning(
323 325 "memory usage %s > %s after gc, quitting",
324 326 _format_data_size(mem_usage), _format_data_size(_memory_max_usage))
325 327 # This will cause worker to auto-restart itself
326 328 worker.alive = False
327 329 worker._last_memory_check_time = time.time()
328 330
329 331
330 332 def worker_int(worker):
331 333 worker.log.info("pid=[%-10s] worker received INT or QUIT signal", worker.pid)
332 334
333 335 # get traceback info, when a worker crashes
334 336 def get_thread_id(t_id):
335 337 id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
336 338 return id2name.get(t_id, "unknown_thread_id")
337 339
338 340 code = []
339 341 for thread_id, stack in sys._current_frames().items(): # noqa
340 342 code.append(
341 343 "\n# Thread: %s(%d)" % (get_thread_id(thread_id), thread_id))
342 344 for fname, lineno, name, line in traceback.extract_stack(stack):
343 345 code.append('File: "%s", line %d, in %s' % (fname, lineno, name))
344 346 if line:
345 347 code.append(" %s" % (line.strip()))
346 348 worker.log.debug("\n".join(code))
347 349
348 350
349 351 def worker_abort(worker):
350 352 worker.log.info("pid=[%-10s] worker received SIGABRT signal", worker.pid)
351 353
352 354
353 355 def worker_exit(server, worker):
354 356 worker.log.info("pid=[%-10s] worker exit", worker.pid)
355 357
356 358
357 359 def child_exit(server, worker):
358 360 worker.log.info("pid=[%-10s] worker child exit", worker.pid)
359 361
360 362
361 363 def pre_request(worker, req):
362 364 worker.start_time = time.time()
363 365 worker.log.debug(
364 366 "GNCRN PRE WORKER [cnt:%s]: %s %s", worker.nr, req.method, req.path)
365 367
366 368
367 369 def post_request(worker, req, environ, resp):
368 370 total_time = time.time() - worker.start_time
369 371 # Gunicorn sometimes has problems with reading the status_code
370 372 status_code = getattr(resp, 'status_code', '')
371 373 worker.log.debug(
372 374 "GNCRN POST WORKER [cnt:%s]: %s %s resp: %s, Load Time: %.4fs",
373 375 worker.nr, req.method, req.path, status_code, total_time)
374 376 _check_memory_usage(worker)
375 377
376 378
377 379 def _filter_proxy(ip):
378 380 """
379 381 Passed in IP addresses in HEADERS can be in a special format of multiple
380 382 ips. Those comma separated IPs are passed from various proxies in the
381 383 chain of request processing. The left-most being the original client.
382 384 We only care about the first IP which came from the org. client.
383 385
384 386 :param ip: ip string from headers
385 387 """
386 388 if ',' in ip:
387 389 _ips = ip.split(',')
388 390 _first_ip = _ips[0].strip()
389 391 return _first_ip
390 392 return ip
391 393
392 394
393 395 def _filter_port(ip):
394 396 """
395 397 Removes a port from ip, there are 4 main cases to handle here.
396 398 - ipv4 eg. 127.0.0.1
397 399 - ipv6 eg. ::1
398 400 - ipv4+port eg. 127.0.0.1:8080
399 401 - ipv6+port eg. [::1]:8080
400 402
401 403 :param ip:
402 404 """
403 405 def is_ipv6(ip_addr):
404 406 if hasattr(socket, 'inet_pton'):
405 407 try:
406 408 socket.inet_pton(socket.AF_INET6, ip_addr)
407 409 except socket.error:
408 410 return False
409 411 else:
410 412 return False
411 413 return True
412 414
413 415 if ':' not in ip: # must be ipv4 pure ip
414 416 return ip
415 417
416 418 if '[' in ip and ']' in ip: # ipv6 with port
417 419 return ip.split(']')[0][1:].lower()
418 420
419 421 # must be ipv6 or ipv4 with port
420 422 if is_ipv6(ip):
421 423 return ip
422 424 else:
423 425 ip, _port = ip.split(':')[:2] # means ipv4+port
424 426 return ip
425 427
426 428
427 429 def get_ip_addr(environ):
428 430 proxy_key = 'HTTP_X_REAL_IP'
429 431 proxy_key2 = 'HTTP_X_FORWARDED_FOR'
430 432 def_key = 'REMOTE_ADDR'
431 433
432 434 def _filters(x):
433 435 return _filter_port(_filter_proxy(x))
434 436
435 437 ip = environ.get(proxy_key)
436 438 if ip:
437 439 return _filters(ip)
438 440
439 441 ip = environ.get(proxy_key2)
440 442 if ip:
441 443 return _filters(ip)
442 444
443 445 ip = environ.get(def_key, '0.0.0.0')
444 446 return _filters(ip)
445 447
446 448
447 449 class RhodeCodeLogger(Logger):
448 450 """
449 451 Custom Logger that allows some customization that gunicorn doesn't allow
450 452 """
451 453
452 454 datefmt = r"%Y-%m-%d %H:%M:%S"
453 455
454 456 def __init__(self, cfg):
455 457 Logger.__init__(self, cfg)
456 458
457 459 def now(self):
458 460 """ return date in RhodeCode Log format """
459 461 now = time.time()
460 462 msecs = int((now - int(now)) * 1000)
461 463 return time.strftime(self.datefmt, time.localtime(now)) + '.{0:03d}'.format(msecs)
462 464
463 465 def atoms(self, resp, req, environ, request_time):
464 466 """ Gets atoms for log formatting.
465 467 """
466 468 status = resp.status
467 469 if isinstance(status, str):
468 470 status = status.split(None, 1)[0]
469 471 atoms = {
470 472 'h': get_ip_addr(environ),
471 473 'l': '-',
472 474 'u': self._get_user(environ) or '-',
473 475 't': self.now(),
474 476 'r': "%s %s %s" % (environ['REQUEST_METHOD'],
475 477 environ['RAW_URI'],
476 478 environ["SERVER_PROTOCOL"]),
477 479 's': status,
478 480 'm': environ.get('REQUEST_METHOD'),
479 481 'U': environ.get('PATH_INFO'),
480 482 'q': environ.get('QUERY_STRING'),
481 483 'H': environ.get('SERVER_PROTOCOL'),
482 484 'b': getattr(resp, 'sent', None) is not None and str(resp.sent) or '-',
483 485 'B': getattr(resp, 'sent', None),
484 486 'f': environ.get('HTTP_REFERER', '-'),
485 487 'a': environ.get('HTTP_USER_AGENT', '-'),
486 488 'T': request_time.seconds,
487 489 'D': (request_time.seconds * 1000000) + request_time.microseconds,
488 490 'M': (request_time.seconds * 1000) + int(request_time.microseconds/1000),
489 491 'L': "%d.%06d" % (request_time.seconds, request_time.microseconds),
490 492 'p': "<%s>" % os.getpid()
491 493 }
492 494
493 495 # add request headers
494 496 if hasattr(req, 'headers'):
495 497 req_headers = req.headers
496 498 else:
497 499 req_headers = req
498 500
499 501 if hasattr(req_headers, "items"):
500 502 req_headers = req_headers.items()
501 503
502 504 atoms.update({"{%s}i" % k.lower(): v for k, v in req_headers})
503 505
504 506 resp_headers = resp.headers
505 507 if hasattr(resp_headers, "items"):
506 508 resp_headers = resp_headers.items()
507 509
508 510 # add response headers
509 511 atoms.update({"{%s}o" % k.lower(): v for k, v in resp_headers})
510 512
511 513 # add environ variables
512 514 environ_variables = environ.items()
513 515 atoms.update({"{%s}e" % k.lower(): v for k, v in environ_variables})
514 516
515 517 return atoms
516 518
517 519
518 520 logger_class = RhodeCodeLogger
@@ -1,813 +1,816 b''
1 1
2 2 ; #########################################
3 3 ; RHODECODE COMMUNITY EDITION CONFIGURATION
4 4 ; #########################################
5 5
6 6 [DEFAULT]
7 7 ; Debug flag sets all loggers to debug, and enables request tracking
8 8 debug = false
9 9
10 10 ; ########################################################################
11 11 ; EMAIL CONFIGURATION
12 12 ; These settings will be used by the RhodeCode mailing system
13 13 ; ########################################################################
14 14
15 15 ; prefix all emails subjects with given prefix, helps filtering out emails
16 16 #email_prefix = [RhodeCode]
17 17
18 18 ; email FROM address all mails will be sent
19 19 #app_email_from = rhodecode-noreply@localhost
20 20
21 21 #smtp_server = mail.server.com
22 22 #smtp_username =
23 23 #smtp_password =
24 24 #smtp_port =
25 25 #smtp_use_tls = false
26 26 #smtp_use_ssl = true
27 27
28 28 [server:main]
29 29 ; COMMON HOST/IP CONFIG, This applies mostly to develop setup,
30 30 ; Host port for gunicorn are controlled by gunicorn_conf.py
31 31 host = 127.0.0.1
32 32 port = 10020
33 33
34 34
35 35 ; ###########################
36 36 ; GUNICORN APPLICATION SERVER
37 37 ; ###########################
38 38
39 39 ; run with gunicorn --paste rhodecode.ini --config gunicorn_conf.py
40 40
41 41 ; Module to use, this setting shouldn't be changed
42 42 use = egg:gunicorn#main
43 43
44 44 ; Prefix middleware for RhodeCode.
45 45 ; recommended when using proxy setup.
46 46 ; allows to set RhodeCode under a prefix in server.
47 47 ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well.
48 48 ; And set your prefix like: `prefix = /custom_prefix`
49 49 ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need
50 50 ; to make your cookies only work on prefix url
51 51 [filter:proxy-prefix]
52 52 use = egg:PasteDeploy#prefix
53 53 prefix = /
54 54
55 55 [app:main]
56 56 ; The %(here)s variable will be replaced with the absolute path of parent directory
57 57 ; of this file
58 58 ; Each option in the app:main can be override by an environmental variable
59 59 ;
60 60 ;To override an option:
61 61 ;
62 62 ;RC_<KeyName>
63 63 ;Everything should be uppercase, . and - should be replaced by _.
64 64 ;For example, if you have these configuration settings:
65 65 ;rc_cache.repo_object.backend = foo
66 66 ;can be overridden by
67 67 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
68 68
69 69 use = egg:rhodecode-enterprise-ce
70 70
71 71 ; enable proxy prefix middleware, defined above
72 72 #filter-with = proxy-prefix
73 73
74 74 ; encryption key used to encrypt social plugin tokens,
75 75 ; remote_urls with credentials etc, if not set it defaults to
76 76 ; `beaker.session.secret`
77 77 #rhodecode.encrypted_values.secret =
78 78
79 79 ; decryption strict mode (enabled by default). It controls if decryption raises
80 80 ; `SignatureVerificationError` in case of wrong key, or damaged encryption data.
81 81 #rhodecode.encrypted_values.strict = false
82 82
83 83 ; Pick algorithm for encryption. Either fernet (more secure) or aes (default)
84 84 ; fernet is safer, and we strongly recommend switching to it.
85 85 ; Due to backward compatibility aes is used as default.
86 86 #rhodecode.encrypted_values.algorithm = fernet
87 87
88 88 ; Return gzipped responses from RhodeCode (static files/application)
89 89 gzip_responses = false
90 90
91 91 ; Auto-generate javascript routes file on startup
92 92 generate_js_files = false
93 93
94 94 ; System global default language.
95 95 ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh
96 96 lang = en
97 97
98 98 ; Perform a full repository scan and import on each server start.
99 99 ; Settings this to true could lead to very long startup time.
100 100 startup.import_repos = false
101 101
102 102 ; URL at which the application is running. This is used for Bootstrapping
103 103 ; requests in context when no web request is available. Used in ishell, or
104 104 ; SSH calls. Set this for events to receive proper url for SSH calls.
105 105 app.base_url = http://rhodecode.local
106 106
107 107 ; Unique application ID. Should be a random unique string for security.
108 108 app_instance_uuid = rc-production
109 109
110 110 ; Cut off limit for large diffs (size in bytes). If overall diff size on
111 111 ; commit, or pull request exceeds this limit this diff will be displayed
112 112 ; partially. E.g 512000 == 512Kb
113 113 cut_off_limit_diff = 512000
114 114
115 115 ; Cut off limit for large files inside diffs (size in bytes). Each individual
116 116 ; file inside diff which exceeds this limit will be displayed partially.
117 117 ; E.g 128000 == 128Kb
118 118 cut_off_limit_file = 128000
119 119
120 120 ; Use cached version of vcs repositories everywhere. Recommended to be `true`
121 121 vcs_full_cache = true
122 122
123 123 ; Force https in RhodeCode, fixes https redirects, assumes it's always https.
124 124 ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache
125 125 force_https = false
126 126
127 127 ; use Strict-Transport-Security headers
128 128 use_htsts = false
129 129
130 130 ; Set to true if your repos are exposed using the dumb protocol
131 131 git_update_server_info = false
132 132
133 133 ; RSS/ATOM feed options
134 134 rss_cut_off_limit = 256000
135 135 rss_items_per_page = 10
136 136 rss_include_diff = false
137 137
138 138 ; gist URL alias, used to create nicer urls for gist. This should be an
139 139 ; url that does rewrites to _admin/gists/{gistid}.
140 140 ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal
141 141 ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid}
142 142 gist_alias_url =
143 143
144 144 ; List of views (using glob pattern syntax) that AUTH TOKENS could be
145 145 ; used for access.
146 146 ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it
147 147 ; came from the the logged in user who own this authentication token.
148 148 ; Additionally @TOKEN syntax can be used to bound the view to specific
149 149 ; authentication token. Such view would be only accessible when used together
150 150 ; with this authentication token
151 151 ; list of all views can be found under `/_admin/permissions/auth_token_access`
152 152 ; The list should be "," separated and on a single line.
153 153 ; Most common views to enable:
154 154
155 155 # RepoCommitsView:repo_commit_download
156 156 # RepoCommitsView:repo_commit_patch
157 157 # RepoCommitsView:repo_commit_raw
158 158 # RepoCommitsView:repo_commit_raw@TOKEN
159 159 # RepoFilesView:repo_files_diff
160 160 # RepoFilesView:repo_archivefile
161 161 # RepoFilesView:repo_file_raw
162 162 # GistView:*
163 163 api_access_controllers_whitelist =
164 164
165 165 ; Default encoding used to convert from and to unicode
166 166 ; can be also a comma separated list of encoding in case of mixed encodings
167 167 default_encoding = UTF-8
168 168
169 169 ; instance-id prefix
170 170 ; a prefix key for this instance used for cache invalidation when running
171 171 ; multiple instances of RhodeCode, make sure it's globally unique for
172 172 ; all running RhodeCode instances. Leave empty if you don't use it
173 173 instance_id =
174 174
175 175 ; Fallback authentication plugin. Set this to a plugin ID to force the usage
176 176 ; of an authentication plugin also if it is disabled by it's settings.
177 177 ; This could be useful if you are unable to log in to the system due to broken
178 178 ; authentication settings. Then you can enable e.g. the internal RhodeCode auth
179 179 ; module to log in again and fix the settings.
180 180 ; Available builtin plugin IDs (hash is part of the ID):
181 181 ; egg:rhodecode-enterprise-ce#rhodecode
182 182 ; egg:rhodecode-enterprise-ce#pam
183 183 ; egg:rhodecode-enterprise-ce#ldap
184 184 ; egg:rhodecode-enterprise-ce#jasig_cas
185 185 ; egg:rhodecode-enterprise-ce#headers
186 186 ; egg:rhodecode-enterprise-ce#crowd
187 187
188 188 #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode
189 189
190 190 ; Flag to control loading of legacy plugins in py:/path format
191 191 auth_plugin.import_legacy_plugins = true
192 192
193 193 ; alternative return HTTP header for failed authentication. Default HTTP
194 194 ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with
195 195 ; handling that causing a series of failed authentication calls.
196 196 ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code
197 197 ; This will be served instead of default 401 on bad authentication
198 198 auth_ret_code =
199 199
200 200 ; use special detection method when serving auth_ret_code, instead of serving
201 201 ; ret_code directly, use 401 initially (Which triggers credentials prompt)
202 202 ; and then serve auth_ret_code to clients
203 203 auth_ret_code_detection = false
204 204
205 205 ; locking return code. When repository is locked return this HTTP code. 2XX
206 206 ; codes don't break the transactions while 4XX codes do
207 207 lock_ret_code = 423
208 208
209 209 ; allows to change the repository location in settings page
210 210 allow_repo_location_change = true
211 211
212 212 ; allows to setup custom hooks in settings page
213 213 allow_custom_hooks_settings = true
214 214
215 215 ; Generated license token required for EE edition license.
216 216 ; New generated token value can be found in Admin > settings > license page.
217 217 license_token =
218 218
219 219 ; This flag hides sensitive information on the license page such as token, and license data
220 220 license.hide_license_info = false
221 221
222 222 ; supervisor connection uri, for managing supervisor and logs.
223 223 supervisor.uri =
224 224
225 225 ; supervisord group name/id we only want this RC instance to handle
226 226 supervisor.group_id = prod
227 227
228 228 ; Display extended labs settings
229 229 labs_settings_active = true
230 230
231 231 ; Custom exception store path, defaults to TMPDIR
232 232 ; This is used to store exception from RhodeCode in shared directory
233 233 #exception_tracker.store_path =
234 234
235 235 ; Send email with exception details when it happens
236 236 #exception_tracker.send_email = false
237 237
238 238 ; Comma separated list of recipients for exception emails,
239 239 ; e.g admin@rhodecode.com,devops@rhodecode.com
240 240 ; Can be left empty, then emails will be sent to ALL super-admins
241 241 #exception_tracker.send_email_recipients =
242 242
243 243 ; optional prefix to Add to email Subject
244 244 #exception_tracker.email_prefix = [RHODECODE ERROR]
245 245
246 246 ; File store configuration. This is used to store and serve uploaded files
247 247 file_store.enabled = true
248 248
249 249 ; Storage backend, available options are: local
250 250 file_store.backend = local
251 251
252 252 ; path to store the uploaded binaries
253 253 file_store.storage_path = %(here)s/data/file_store
254 254
255 255 ; Uncomment and set this path to control settings for archive download cache.
256 256 ; Generated repo archives will be cached at this location
257 257 ; and served from the cache during subsequent requests for the same archive of
258 258 ; the repository. This path is important to be shared across filesystems and with
259 259 ; RhodeCode and vcsserver
260 260
261 261 ; Default is $cache_dir/archive_cache if not set
262 262 archive_cache.store_dir = %(here)s/data/archive_cache
263 263
264 264 ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb
265 265 archive_cache.cache_size_gb = 40
266 266
267 267 ; By default cache uses sharding technique, this specifies how many shards are there
268 268 archive_cache.cache_shards = 4
269 269
270 270 ; #############
271 271 ; CELERY CONFIG
272 272 ; #############
273 273
274 274 ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini
275 275
276 276 use_celery = false
277 277
278 278 ; path to store schedule database
279 279 #celerybeat-schedule.path =
280 280
281 281 ; connection url to the message broker (default redis)
282 celery.broker_url = redis://localhost:6379/8
282 celery.broker_url = redis://redis:6379/8
283
284 ; results backend to get results for (default redis)
285 celery.result_backend = redis://redis:6379/8
283 286
284 287 ; rabbitmq example
285 288 #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost
286 289
287 290 ; maximum tasks to execute before worker restart
288 291 celery.max_tasks_per_child = 20
289 292
290 293 ; tasks will never be sent to the queue, but executed locally instead.
291 294 celery.task_always_eager = false
292 295
293 296 ; #############
294 297 ; DOGPILE CACHE
295 298 ; #############
296 299
297 300 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
298 301 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
299 302 cache_dir = %(here)s/data
300 303
301 304 ; *********************************************
302 305 ; `sql_cache_short` cache for heavy SQL queries
303 306 ; Only supported backend is `memory_lru`
304 307 ; *********************************************
305 308 rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru
306 309 rc_cache.sql_cache_short.expiration_time = 30
307 310
308 311
309 312 ; *****************************************************
310 313 ; `cache_repo_longterm` cache for repo object instances
311 314 ; Only supported backend is `memory_lru`
312 315 ; *****************************************************
313 316 rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru
314 317 ; by default we use 30 Days, cache is still invalidated on push
315 318 rc_cache.cache_repo_longterm.expiration_time = 2592000
316 319 ; max items in LRU cache, set to smaller number to save memory, and expire last used caches
317 320 rc_cache.cache_repo_longterm.max_size = 10000
318 321
319 322
320 323 ; *********************************************
321 324 ; `cache_general` cache for general purpose use
322 325 ; for simplicity use rc.file_namespace backend,
323 326 ; for performance and scale use rc.redis
324 327 ; *********************************************
325 328 rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace
326 329 rc_cache.cache_general.expiration_time = 43200
327 330 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
328 331 #rc_cache.cache_general.arguments.filename = /tmp/cache_general_db
329 332
330 333 ; alternative `cache_general` redis backend with distributed lock
331 334 #rc_cache.cache_general.backend = dogpile.cache.rc.redis
332 335 #rc_cache.cache_general.expiration_time = 300
333 336
334 337 ; redis_expiration_time needs to be greater then expiration_time
335 338 #rc_cache.cache_general.arguments.redis_expiration_time = 7200
336 339
337 340 #rc_cache.cache_general.arguments.host = localhost
338 341 #rc_cache.cache_general.arguments.port = 6379
339 342 #rc_cache.cache_general.arguments.db = 0
340 343 #rc_cache.cache_general.arguments.socket_timeout = 30
341 344 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
342 345 #rc_cache.cache_general.arguments.distributed_lock = true
343 346
344 347 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
345 348 #rc_cache.cache_general.arguments.lock_auto_renewal = true
346 349
347 350 ; *************************************************
348 351 ; `cache_perms` cache for permission tree, auth TTL
349 352 ; for simplicity use rc.file_namespace backend,
350 353 ; for performance and scale use rc.redis
351 354 ; *************************************************
352 355 rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace
353 356 rc_cache.cache_perms.expiration_time = 3600
354 357 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
355 358 #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms_db
356 359
357 360 ; alternative `cache_perms` redis backend with distributed lock
358 361 #rc_cache.cache_perms.backend = dogpile.cache.rc.redis
359 362 #rc_cache.cache_perms.expiration_time = 300
360 363
361 364 ; redis_expiration_time needs to be greater then expiration_time
362 365 #rc_cache.cache_perms.arguments.redis_expiration_time = 7200
363 366
364 367 #rc_cache.cache_perms.arguments.host = localhost
365 368 #rc_cache.cache_perms.arguments.port = 6379
366 369 #rc_cache.cache_perms.arguments.db = 0
367 370 #rc_cache.cache_perms.arguments.socket_timeout = 30
368 371 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
369 372 #rc_cache.cache_perms.arguments.distributed_lock = true
370 373
371 374 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
372 375 #rc_cache.cache_perms.arguments.lock_auto_renewal = true
373 376
374 377 ; ***************************************************
375 378 ; `cache_repo` cache for file tree, Readme, RSS FEEDS
376 379 ; for simplicity use rc.file_namespace backend,
377 380 ; for performance and scale use rc.redis
378 381 ; ***************************************************
379 382 rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace
380 383 rc_cache.cache_repo.expiration_time = 2592000
381 384 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
382 385 #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo_db
383 386
384 387 ; alternative `cache_repo` redis backend with distributed lock
385 388 #rc_cache.cache_repo.backend = dogpile.cache.rc.redis
386 389 #rc_cache.cache_repo.expiration_time = 2592000
387 390
388 391 ; redis_expiration_time needs to be greater then expiration_time
389 392 #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400
390 393
391 394 #rc_cache.cache_repo.arguments.host = localhost
392 395 #rc_cache.cache_repo.arguments.port = 6379
393 396 #rc_cache.cache_repo.arguments.db = 1
394 397 #rc_cache.cache_repo.arguments.socket_timeout = 30
395 398 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
396 399 #rc_cache.cache_repo.arguments.distributed_lock = true
397 400
398 401 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
399 402 #rc_cache.cache_repo.arguments.lock_auto_renewal = true
400 403
401 404 ; ##############
402 405 ; BEAKER SESSION
403 406 ; ##############
404 407
405 408 ; beaker.session.type is type of storage options for the logged users sessions. Current allowed
406 409 ; types are file, ext:redis, ext:database, ext:memcached
407 410 ; Fastest ones are ext:redis and ext:database, DO NOT use memory type for session
408 411 beaker.session.type = file
409 412 beaker.session.data_dir = %(here)s/data/sessions
410 413
411 414 ; Redis based sessions
412 415 #beaker.session.type = ext:redis
413 416 #beaker.session.url = redis://127.0.0.1:6379/2
414 417
415 418 ; DB based session, fast, and allows easy management over logged in users
416 419 #beaker.session.type = ext:database
417 420 #beaker.session.table_name = db_session
418 421 #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode
419 422 #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode
420 423 #beaker.session.sa.pool_recycle = 3600
421 424 #beaker.session.sa.echo = false
422 425
423 426 beaker.session.key = rhodecode
424 427 beaker.session.secret = production-rc-uytcxaz
425 428 beaker.session.lock_dir = %(here)s/data/sessions/lock
426 429
427 430 ; Secure encrypted cookie. Requires AES and AES python libraries
428 431 ; you must disable beaker.session.secret to use this
429 432 #beaker.session.encrypt_key = key_for_encryption
430 433 #beaker.session.validate_key = validation_key
431 434
432 435 ; Sets session as invalid (also logging out user) if it haven not been
433 436 ; accessed for given amount of time in seconds
434 437 beaker.session.timeout = 2592000
435 438 beaker.session.httponly = true
436 439
437 440 ; Path to use for the cookie. Set to prefix if you use prefix middleware
438 441 #beaker.session.cookie_path = /custom_prefix
439 442
440 443 ; Set https secure cookie
441 444 beaker.session.secure = false
442 445
443 446 ; default cookie expiration time in seconds, set to `true` to set expire
444 447 ; at browser close
445 448 #beaker.session.cookie_expires = 3600
446 449
447 450 ; #############################
448 451 ; SEARCH INDEXING CONFIGURATION
449 452 ; #############################
450 453
451 454 ; Full text search indexer is available in rhodecode-tools under
452 455 ; `rhodecode-tools index` command
453 456
454 457 ; WHOOSH Backend, doesn't require additional services to run
455 458 ; it works good with few dozen repos
456 459 search.module = rhodecode.lib.index.whoosh
457 460 search.location = %(here)s/data/index
458 461
459 462 ; ####################
460 463 ; CHANNELSTREAM CONFIG
461 464 ; ####################
462 465
463 466 ; channelstream enables persistent connections and live notification
464 467 ; in the system. It's also used by the chat system
465 468
466 469 channelstream.enabled = false
467 470
468 471 ; server address for channelstream server on the backend
469 472 channelstream.server = 127.0.0.1:9800
470 473
471 474 ; location of the channelstream server from outside world
472 475 ; use ws:// for http or wss:// for https. This address needs to be handled
473 476 ; by external HTTP server such as Nginx or Apache
474 477 ; see Nginx/Apache configuration examples in our docs
475 478 channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream
476 479 channelstream.secret = secret
477 480 channelstream.history.location = %(here)s/channelstream_history
478 481
479 482 ; Internal application path that Javascript uses to connect into.
480 483 ; If you use proxy-prefix the prefix should be added before /_channelstream
481 484 channelstream.proxy_path = /_channelstream
482 485
483 486
484 487 ; ##############################
485 488 ; MAIN RHODECODE DATABASE CONFIG
486 489 ; ##############################
487 490
488 491 #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
489 492 #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
490 493 #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8
491 494 ; pymysql is an alternative driver for MySQL, use in case of problems with default one
492 495 #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode
493 496
494 497 sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
495 498
496 499 ; see sqlalchemy docs for other advanced settings
497 500 ; print the sql statements to output
498 501 sqlalchemy.db1.echo = false
499 502
500 503 ; recycle the connections after this amount of seconds
501 504 sqlalchemy.db1.pool_recycle = 3600
502 505
503 506 ; the number of connections to keep open inside the connection pool.
504 507 ; 0 indicates no limit
505 508 ; the general calculus with gevent is:
506 509 ; if your system allows 500 concurrent greenlets (max_connections) that all do database access,
507 510 ; then increase pool size + max overflow so that they add up to 500.
508 511 #sqlalchemy.db1.pool_size = 5
509 512
510 513 ; The number of connections to allow in connection pool "overflow", that is
511 514 ; connections that can be opened above and beyond the pool_size setting,
512 515 ; which defaults to five.
513 516 #sqlalchemy.db1.max_overflow = 10
514 517
515 518 ; Connection check ping, used to detect broken database connections
516 519 ; could be enabled to better handle cases if MySQL has gone away errors
517 520 #sqlalchemy.db1.ping_connection = true
518 521
519 522 ; ##########
520 523 ; VCS CONFIG
521 524 ; ##########
522 525 vcs.server.enable = true
523 526 vcs.server = localhost:9900
524 527
525 528 ; Web server connectivity protocol, responsible for web based VCS operations
526 529 ; Available protocols are:
527 530 ; `http` - use http-rpc backend (default)
528 531 vcs.server.protocol = http
529 532
530 533 ; Push/Pull operations protocol, available options are:
531 534 ; `http` - use http-rpc backend (default)
532 535 vcs.scm_app_implementation = http
533 536
534 537 ; Push/Pull operations hooks protocol, available options are:
535 538 ; `http` - use http-rpc backend (default)
536 539 vcs.hooks.protocol = http
537 540
538 541 ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be
539 542 ; accessible via network.
540 543 ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker)
541 544 vcs.hooks.host = *
542 545
543 546 ; Start VCSServer with this instance as a subprocess, useful for development
544 547 vcs.start_server = false
545 548
546 549 ; List of enabled VCS backends, available options are:
547 550 ; `hg` - mercurial
548 551 ; `git` - git
549 552 ; `svn` - subversion
550 553 vcs.backends = hg, git, svn
551 554
552 555 ; Wait this number of seconds before killing connection to the vcsserver
553 556 vcs.connection_timeout = 3600
554 557
555 558 ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out.
556 559 ; Set a numeric version for your current SVN e.g 1.8, or 1.12
557 560 ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible
558 561 #vcs.svn.compatible_version = 1.8
559 562
560 563 ; Cache flag to cache vcsserver remote calls locally
561 564 ; It uses cache_region `cache_repo`
562 565 vcs.methods.cache = true
563 566
564 567 ; ####################################################
565 568 ; Subversion proxy support (mod_dav_svn)
566 569 ; Maps RhodeCode repo groups into SVN paths for Apache
567 570 ; ####################################################
568 571
569 572 ; Enable or disable the config file generation.
570 573 svn.proxy.generate_config = false
571 574
572 575 ; Generate config file with `SVNListParentPath` set to `On`.
573 576 svn.proxy.list_parent_path = true
574 577
575 578 ; Set location and file name of generated config file.
576 579 svn.proxy.config_file_path = %(here)s/mod_dav_svn.conf
577 580
578 581 ; alternative mod_dav config template. This needs to be a valid mako template
579 582 ; Example template can be found in the source code:
580 583 ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako
581 584 #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako
582 585
583 586 ; Used as a prefix to the `Location` block in the generated config file.
584 587 ; In most cases it should be set to `/`.
585 588 svn.proxy.location_root = /
586 589
587 590 ; Command to reload the mod dav svn configuration on change.
588 591 ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh
589 592 ; Make sure user who runs RhodeCode process is allowed to reload Apache
590 593 #svn.proxy.reload_cmd = /etc/init.d/apache2 reload
591 594
592 595 ; If the timeout expires before the reload command finishes, the command will
593 596 ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds.
594 597 #svn.proxy.reload_timeout = 10
595 598
596 599 ; ####################
597 600 ; SSH Support Settings
598 601 ; ####################
599 602
600 603 ; Defines if a custom authorized_keys file should be created and written on
601 604 ; any change user ssh keys. Setting this to false also disables possibility
602 605 ; of adding SSH keys by users from web interface. Super admins can still
603 606 ; manage SSH Keys.
604 607 ssh.generate_authorized_keyfile = false
605 608
606 609 ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding`
607 610 # ssh.authorized_keys_ssh_opts =
608 611
609 612 ; Path to the authorized_keys file where the generate entries are placed.
610 613 ; It is possible to have multiple key files specified in `sshd_config` e.g.
611 614 ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode
612 615 ssh.authorized_keys_file_path = ~/.ssh/authorized_keys_rhodecode
613 616
614 617 ; Command to execute the SSH wrapper. The binary is available in the
615 618 ; RhodeCode installation directory.
616 619 ; e.g ~/.rccontrol/community-1/profile/bin/rc-ssh-wrapper
617 620 ssh.wrapper_cmd = ~/.rccontrol/community-1/rc-ssh-wrapper
618 621
619 622 ; Allow shell when executing the ssh-wrapper command
620 623 ssh.wrapper_cmd_allow_shell = false
621 624
622 625 ; Enables logging, and detailed output send back to the client during SSH
623 626 ; operations. Useful for debugging, shouldn't be used in production.
624 627 ssh.enable_debug_logging = false
625 628
626 629 ; Paths to binary executable, by default they are the names, but we can
627 630 ; override them if we want to use a custom one
628 631 ssh.executable.hg = ~/.rccontrol/vcsserver-1/profile/bin/hg
629 632 ssh.executable.git = ~/.rccontrol/vcsserver-1/profile/bin/git
630 633 ssh.executable.svn = ~/.rccontrol/vcsserver-1/profile/bin/svnserve
631 634
632 635 ; Enables SSH key generator web interface. Disabling this still allows users
633 636 ; to add their own keys.
634 637 ssh.enable_ui_key_generator = true
635 638
636 639
637 640 ; #################
638 641 ; APPENLIGHT CONFIG
639 642 ; #################
640 643
641 644 ; Appenlight is tailored to work with RhodeCode, see
642 645 ; http://appenlight.rhodecode.com for details how to obtain an account
643 646
644 647 ; Appenlight integration enabled
645 648 #appenlight = false
646 649
647 650 #appenlight.server_url = https://api.appenlight.com
648 651 #appenlight.api_key = YOUR_API_KEY
649 652 #appenlight.transport_config = https://api.appenlight.com?threaded=1&timeout=5
650 653
651 654 ; used for JS client
652 655 #appenlight.api_public_key = YOUR_API_PUBLIC_KEY
653 656
654 657 ; TWEAK AMOUNT OF INFO SENT HERE
655 658
656 659 ; enables 404 error logging (default False)
657 660 #appenlight.report_404 = false
658 661
659 662 ; time in seconds after request is considered being slow (default 1)
660 663 #appenlight.slow_request_time = 1
661 664
662 665 ; record slow requests in application
663 666 ; (needs to be enabled for slow datastore recording and time tracking)
664 667 #appenlight.slow_requests = true
665 668
666 669 ; enable hooking to application loggers
667 670 #appenlight.logging = true
668 671
669 672 ; minimum log level for log capture
670 673 #ppenlight.logging.level = WARNING
671 674
672 675 ; send logs only from erroneous/slow requests
673 676 ; (saves API quota for intensive logging)
674 677 #appenlight.logging_on_error = false
675 678
676 679 ; list of additional keywords that should be grabbed from environ object
677 680 ; can be string with comma separated list of words in lowercase
678 681 ; (by default client will always send following info:
679 682 ; 'REMOTE_USER', 'REMOTE_ADDR', 'SERVER_NAME', 'CONTENT_TYPE' + all keys that
680 683 ; start with HTTP* this list be extended with additional keywords here
681 684 #appenlight.environ_keys_whitelist =
682 685
683 686 ; list of keywords that should be blanked from request object
684 687 ; can be string with comma separated list of words in lowercase
685 688 ; (by default client will always blank keys that contain following words
686 689 ; 'password', 'passwd', 'pwd', 'auth_tkt', 'secret', 'csrf'
687 690 ; this list be extended with additional keywords set here
688 691 #appenlight.request_keys_blacklist =
689 692
690 693 ; list of namespaces that should be ignores when gathering log entries
691 694 ; can be string with comma separated list of namespaces
692 695 ; (by default the client ignores own entries: appenlight_client.client)
693 696 #appenlight.log_namespace_blacklist =
694 697
695 698 ; Statsd client config, this is used to send metrics to statsd
696 699 ; We recommend setting statsd_exported and scrape them using Prometheus
697 700 #statsd.enabled = false
698 701 #statsd.statsd_host = 0.0.0.0
699 702 #statsd.statsd_port = 8125
700 703 #statsd.statsd_prefix =
701 704 #statsd.statsd_ipv6 = false
702 705
703 706 ; configure logging automatically at server startup set to false
704 707 ; to use the below custom logging config.
705 708 ; RC_LOGGING_FORMATTER
706 709 ; RC_LOGGING_LEVEL
707 710 ; env variables can control the settings for logging in case of autoconfigure
708 711
709 712 #logging.autoconfigure = true
710 713
711 714 ; specify your own custom logging config file to configure logging
712 715 #logging.logging_conf_file = /path/to/custom_logging.ini
713 716
714 717 ; Dummy marker to add new entries after.
715 718 ; Add any custom entries below. Please don't remove this marker.
716 719 custom.conf = 1
717 720
718 721
719 722 ; #####################
720 723 ; LOGGING CONFIGURATION
721 724 ; #####################
722 725
723 726 [loggers]
724 727 keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper
725 728
726 729 [handlers]
727 730 keys = console, console_sql
728 731
729 732 [formatters]
730 733 keys = generic, json, color_formatter, color_formatter_sql
731 734
732 735 ; #######
733 736 ; LOGGERS
734 737 ; #######
735 738 [logger_root]
736 739 level = NOTSET
737 740 handlers = console
738 741
739 742 [logger_sqlalchemy]
740 743 level = INFO
741 744 handlers = console_sql
742 745 qualname = sqlalchemy.engine
743 746 propagate = 0
744 747
745 748 [logger_beaker]
746 749 level = DEBUG
747 750 handlers =
748 751 qualname = beaker.container
749 752 propagate = 1
750 753
751 754 [logger_rhodecode]
752 755 level = DEBUG
753 756 handlers =
754 757 qualname = rhodecode
755 758 propagate = 1
756 759
757 760 [logger_ssh_wrapper]
758 761 level = DEBUG
759 762 handlers =
760 763 qualname = ssh_wrapper
761 764 propagate = 1
762 765
763 766 [logger_celery]
764 767 level = DEBUG
765 768 handlers =
766 769 qualname = celery
767 770
768 771
769 772 ; ########
770 773 ; HANDLERS
771 774 ; ########
772 775
773 776 [handler_console]
774 777 class = StreamHandler
775 778 args = (sys.stderr, )
776 779 level = INFO
777 780 ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json'
778 781 ; This allows sending properly formatted logs to grafana loki or elasticsearch
779 782 formatter = generic
780 783
781 784 [handler_console_sql]
782 785 ; "level = DEBUG" logs SQL queries and results.
783 786 ; "level = INFO" logs SQL queries.
784 787 ; "level = WARN" logs neither. (Recommended for production systems.)
785 788 class = StreamHandler
786 789 args = (sys.stderr, )
787 790 level = WARN
788 791 ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json'
789 792 ; This allows sending properly formatted logs to grafana loki or elasticsearch
790 793 formatter = generic
791 794
792 795 ; ##########
793 796 ; FORMATTERS
794 797 ; ##########
795 798
796 799 [formatter_generic]
797 800 class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter
798 801 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
799 802 datefmt = %Y-%m-%d %H:%M:%S
800 803
801 804 [formatter_color_formatter]
802 805 class = rhodecode.lib.logging_formatter.ColorFormatter
803 806 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
804 807 datefmt = %Y-%m-%d %H:%M:%S
805 808
806 809 [formatter_color_formatter_sql]
807 810 class = rhodecode.lib.logging_formatter.ColorFormatterSql
808 811 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
809 812 datefmt = %Y-%m-%d %H:%M:%S
810 813
811 814 [formatter_json]
812 815 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
813 816 class = rhodecode.lib._vendor.jsonlogger.JsonFormatter
General Comments 0
You need to be logged in to leave comments. Login now