##// END OF EJS Templates
hooks: allow to bind to existing hostname automatically if nothing explicitly is set.
super-admin -
r4859:169b0860 default
parent child Browse files
Show More
@@ -1,922 +1,923 b''
1 1 ## -*- coding: utf-8 -*-
2 2
3 3 ; #########################################
4 4 ; RHODECODE COMMUNITY EDITION CONFIGURATION
5 5 ; #########################################
6 6
7 7 [DEFAULT]
8 8 ; Debug flag sets all loggers to debug, and enables request tracking
9 9 debug = true
10 10
11 11 ; ########################################################################
12 12 ; EMAIL CONFIGURATION
13 13 ; These settings will be used by the RhodeCode mailing system
14 14 ; ########################################################################
15 15
16 16 ; prefix all emails subjects with given prefix, helps filtering out emails
17 17 #email_prefix = [RhodeCode]
18 18
19 19 ; email FROM address all mails will be sent
20 20 #app_email_from = rhodecode-noreply@localhost
21 21
22 22 #smtp_server = mail.server.com
23 23 #smtp_username =
24 24 #smtp_password =
25 25 #smtp_port =
26 26 #smtp_use_tls = false
27 27 #smtp_use_ssl = true
28 28
29 29 [server:main]
30 30 ; COMMON HOST/IP CONFIG
31 31 host = 127.0.0.1
32 32 port = 5000
33 33
34 34 ; ##################################################
35 35 ; WAITRESS WSGI SERVER - Recommended for Development
36 36 ; ##################################################
37 37
38 38 ; use server type
39 39 use = egg:waitress#main
40 40
41 41 ; number of worker threads
42 42 threads = 5
43 43
44 44 ; MAX BODY SIZE 100GB
45 45 max_request_body_size = 107374182400
46 46
47 47 ; Use poll instead of select, fixes file descriptors limits problems.
48 48 ; May not work on old windows systems.
49 49 asyncore_use_poll = true
50 50
51 51
52 52 ; ###########################
53 53 ; GUNICORN APPLICATION SERVER
54 54 ; ###########################
55 55
56 56 ; run with gunicorn --log-config rhodecode.ini --paste rhodecode.ini
57 57
58 58 ; Module to use, this setting shouldn't be changed
59 59 #use = egg:gunicorn#main
60 60
61 61 ; Sets the number of process workers. More workers means more concurrent connections
62 62 ; RhodeCode can handle at the same time. Each additional worker also it increases
63 63 ; memory usage as each has it's own set of caches.
64 64 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
65 65 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
66 66 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
67 67 ; when using more than 1 worker.
68 68 #workers = 2
69 69
70 70 ; Gunicorn access log level
71 71 #loglevel = info
72 72
73 73 ; Process name visible in process list
74 74 #proc_name = rhodecode
75 75
76 76 ; Type of worker class, one of `sync`, `gevent`
77 77 ; Recommended type is `gevent`
78 78 #worker_class = gevent
79 79
80 80 ; The maximum number of simultaneous clients. Valid only for gevent
81 81 #worker_connections = 10
82 82
83 83 ; Max number of requests that worker will handle before being gracefully restarted.
84 84 ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
85 85 #max_requests = 1000
86 86 #max_requests_jitter = 30
87 87
88 88 ; Amount of time a worker can spend with handling a request before it
89 89 ; gets killed and restarted. By default set to 21600 (6hrs)
90 90 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
91 91 #timeout = 21600
92 92
93 93 ; The maximum size of HTTP request line in bytes.
94 94 ; 0 for unlimited
95 95 #limit_request_line = 0
96 96
97 97 ; Limit the number of HTTP headers fields in a request.
98 98 ; By default this value is 100 and can't be larger than 32768.
99 99 #limit_request_fields = 32768
100 100
101 101 ; Limit the allowed size of an HTTP request header field.
102 102 ; Value is a positive number or 0.
103 103 ; Setting it to 0 will allow unlimited header field sizes.
104 104 #limit_request_field_size = 0
105 105
106 106 ; Timeout for graceful workers restart.
107 107 ; After receiving a restart signal, workers have this much time to finish
108 108 ; serving requests. Workers still alive after the timeout (starting from the
109 109 ; receipt of the restart signal) are force killed.
110 110 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
111 111 #graceful_timeout = 3600
112 112
113 113 # The number of seconds to wait for requests on a Keep-Alive connection.
114 114 # Generally set in the 1-5 seconds range.
115 115 #keepalive = 2
116 116
117 117 ; Maximum memory usage that each worker can use before it will receive a
118 118 ; graceful restart signal 0 = memory monitoring is disabled
119 119 ; Examples: 268435456 (256MB), 536870912 (512MB)
120 120 ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
121 121 #memory_max_usage = 0
122 122
123 123 ; How often in seconds to check for memory usage for each gunicorn worker
124 124 #memory_usage_check_interval = 60
125 125
126 126 ; Threshold value for which we don't recycle worker if GarbageCollection
127 127 ; frees up enough resources. Before each restart we try to run GC on worker
128 128 ; in case we get enough free memory after that, restart will not happen.
129 129 #memory_usage_recovery_threshold = 0.8
130 130
131 131
132 132 ; Prefix middleware for RhodeCode.
133 133 ; recommended when using proxy setup.
134 134 ; allows to set RhodeCode under a prefix in server.
135 135 ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well.
136 136 ; And set your prefix like: `prefix = /custom_prefix`
137 137 ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need
138 138 ; to make your cookies only work on prefix url
139 139 [filter:proxy-prefix]
140 140 use = egg:PasteDeploy#prefix
141 141 prefix = /
142 142
143 143 [app:main]
144 144 ; The %(here)s variable will be replaced with the absolute path of parent directory
145 145 ; of this file
146 146 ; Each option in the app:main can be override by an environmental variable
147 147 ;
148 148 ;To override an option:
149 149 ;
150 150 ;RC_<KeyName>
151 151 ;Everything should be uppercase, . and - should be replaced by _.
152 152 ;For example, if you have these configuration settings:
153 153 ;rc_cache.repo_object.backend = foo
154 154 ;can be overridden by
155 155 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
156 156
157 157 use = egg:rhodecode-enterprise-ce
158 158
159 159 ; enable proxy prefix middleware, defined above
160 160 #filter-with = proxy-prefix
161 161
162 162 ; #############
163 163 ; DEBUG OPTIONS
164 164 ; #############
165 165
166 166 pyramid.reload_templates = true
167 167
168 168 # During development the we want to have the debug toolbar enabled
169 169 pyramid.includes =
170 170 pyramid_debugtoolbar
171 171
172 172 debugtoolbar.hosts = 0.0.0.0/0
173 173 debugtoolbar.exclude_prefixes =
174 174 /css
175 175 /fonts
176 176 /images
177 177 /js
178 178
179 179 ## RHODECODE PLUGINS ##
180 180 rhodecode.includes =
181 181 rhodecode.api
182 182
183 183
184 184 # api prefix url
185 185 rhodecode.api.url = /_admin/api
186 186
187 187 ; enable debug style page
188 188 debug_style = true
189 189
190 190 ; #################
191 191 ; END DEBUG OPTIONS
192 192 ; #################
193 193
194 194 ; encryption key used to encrypt social plugin tokens,
195 195 ; remote_urls with credentials etc, if not set it defaults to
196 196 ; `beaker.session.secret`
197 197 #rhodecode.encrypted_values.secret =
198 198
199 199 ; decryption strict mode (enabled by default). It controls if decryption raises
200 200 ; `SignatureVerificationError` in case of wrong key, or damaged encryption data.
201 201 #rhodecode.encrypted_values.strict = false
202 202
203 203 ; Pick algorithm for encryption. Either fernet (more secure) or aes (default)
204 204 ; fernet is safer, and we strongly recommend switching to it.
205 205 ; Due to backward compatibility aes is used as default.
206 206 #rhodecode.encrypted_values.algorithm = fernet
207 207
208 208 ; Return gzipped responses from RhodeCode (static files/application)
209 209 gzip_responses = false
210 210
211 211 ; Auto-generate javascript routes file on startup
212 212 generate_js_files = false
213 213
214 214 ; System global default language.
215 215 ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh
216 216 lang = en
217 217
218 218 ; Perform a full repository scan and import on each server start.
219 219 ; Settings this to true could lead to very long startup time.
220 220 startup.import_repos = false
221 221
222 222 ; Uncomment and set this path to use archive download cache.
223 223 ; Once enabled, generated archives will be cached at this location
224 224 ; and served from the cache during subsequent requests for the same archive of
225 225 ; the repository.
226 226 #archive_cache_dir = /tmp/tarballcache
227 227
228 228 ; URL at which the application is running. This is used for Bootstrapping
229 229 ; requests in context when no web request is available. Used in ishell, or
230 230 ; SSH calls. Set this for events to receive proper url for SSH calls.
231 231 app.base_url = http://rhodecode.local
232 232
233 233 ; Unique application ID. Should be a random unique string for security.
234 234 app_instance_uuid = rc-production
235 235
236 236 ; Cut off limit for large diffs (size in bytes). If overall diff size on
237 237 ; commit, or pull request exceeds this limit this diff will be displayed
238 238 ; partially. E.g 512000 == 512Kb
239 239 cut_off_limit_diff = 512000
240 240
241 241 ; Cut off limit for large files inside diffs (size in bytes). Each individual
242 242 ; file inside diff which exceeds this limit will be displayed partially.
243 243 ; E.g 128000 == 128Kb
244 244 cut_off_limit_file = 128000
245 245
246 246 ; Use cached version of vcs repositories everywhere. Recommended to be `true`
247 247 vcs_full_cache = true
248 248
249 249 ; Force https in RhodeCode, fixes https redirects, assumes it's always https.
250 250 ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache
251 251 force_https = false
252 252
253 253 ; use Strict-Transport-Security headers
254 254 use_htsts = false
255 255
256 256 ; Set to true if your repos are exposed using the dumb protocol
257 257 git_update_server_info = false
258 258
259 259 ; RSS/ATOM feed options
260 260 rss_cut_off_limit = 256000
261 261 rss_items_per_page = 10
262 262 rss_include_diff = false
263 263
264 264 ; gist URL alias, used to create nicer urls for gist. This should be an
265 265 ; url that does rewrites to _admin/gists/{gistid}.
266 266 ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal
267 267 ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid}
268 268 gist_alias_url =
269 269
270 270 ; List of views (using glob pattern syntax) that AUTH TOKENS could be
271 271 ; used for access.
272 272 ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it
273 273 ; came from the the logged in user who own this authentication token.
274 274 ; Additionally @TOKEN syntax can be used to bound the view to specific
275 275 ; authentication token. Such view would be only accessible when used together
276 276 ; with this authentication token
277 277 ; list of all views can be found under `/_admin/permissions/auth_token_access`
278 278 ; The list should be "," separated and on a single line.
279 279 ; Most common views to enable:
280 280
281 281 # RepoCommitsView:repo_commit_download
282 282 # RepoCommitsView:repo_commit_patch
283 283 # RepoCommitsView:repo_commit_raw
284 284 # RepoCommitsView:repo_commit_raw@TOKEN
285 285 # RepoFilesView:repo_files_diff
286 286 # RepoFilesView:repo_archivefile
287 287 # RepoFilesView:repo_file_raw
288 288 # GistView:*
289 289 api_access_controllers_whitelist =
290 290
291 291 ; Default encoding used to convert from and to unicode
292 292 ; can be also a comma separated list of encoding in case of mixed encodings
293 293 default_encoding = UTF-8
294 294
295 295 ; instance-id prefix
296 296 ; a prefix key for this instance used for cache invalidation when running
297 297 ; multiple instances of RhodeCode, make sure it's globally unique for
298 298 ; all running RhodeCode instances. Leave empty if you don't use it
299 299 instance_id =
300 300
301 301 ; Fallback authentication plugin. Set this to a plugin ID to force the usage
302 302 ; of an authentication plugin also if it is disabled by it's settings.
303 303 ; This could be useful if you are unable to log in to the system due to broken
304 304 ; authentication settings. Then you can enable e.g. the internal RhodeCode auth
305 305 ; module to log in again and fix the settings.
306 306 ; Available builtin plugin IDs (hash is part of the ID):
307 307 ; egg:rhodecode-enterprise-ce#rhodecode
308 308 ; egg:rhodecode-enterprise-ce#pam
309 309 ; egg:rhodecode-enterprise-ce#ldap
310 310 ; egg:rhodecode-enterprise-ce#jasig_cas
311 311 ; egg:rhodecode-enterprise-ce#headers
312 312 ; egg:rhodecode-enterprise-ce#crowd
313 313
314 314 #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode
315 315
316 316 ; Flag to control loading of legacy plugins in py:/path format
317 317 auth_plugin.import_legacy_plugins = true
318 318
319 319 ; alternative return HTTP header for failed authentication. Default HTTP
320 320 ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with
321 321 ; handling that causing a series of failed authentication calls.
322 322 ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code
323 323 ; This will be served instead of default 401 on bad authentication
324 324 auth_ret_code =
325 325
326 326 ; use special detection method when serving auth_ret_code, instead of serving
327 327 ; ret_code directly, use 401 initially (Which triggers credentials prompt)
328 328 ; and then serve auth_ret_code to clients
329 329 auth_ret_code_detection = false
330 330
331 331 ; locking return code. When repository is locked return this HTTP code. 2XX
332 332 ; codes don't break the transactions while 4XX codes do
333 333 lock_ret_code = 423
334 334
335 335 ; allows to change the repository location in settings page
336 336 allow_repo_location_change = true
337 337
338 338 ; allows to setup custom hooks in settings page
339 339 allow_custom_hooks_settings = true
340 340
341 341 ; Generated license token required for EE edition license.
342 342 ; New generated token value can be found in Admin > settings > license page.
343 343 license_token =
344 344
345 345 ; This flag hides sensitive information on the license page such as token, and license data
346 346 license.hide_license_info = false
347 347
348 348 ; supervisor connection uri, for managing supervisor and logs.
349 349 supervisor.uri =
350 350
351 351 ; supervisord group name/id we only want this RC instance to handle
352 352 supervisor.group_id = dev
353 353
354 354 ; Display extended labs settings
355 355 labs_settings_active = true
356 356
357 357 ; Custom exception store path, defaults to TMPDIR
358 358 ; This is used to store exception from RhodeCode in shared directory
359 359 #exception_tracker.store_path =
360 360
361 361 ; Send email with exception details when it happens
362 362 #exception_tracker.send_email = false
363 363
364 364 ; Comma separated list of recipients for exception emails,
365 365 ; e.g admin@rhodecode.com,devops@rhodecode.com
366 366 ; Can be left empty, then emails will be sent to ALL super-admins
367 367 #exception_tracker.send_email_recipients =
368 368
369 369 ; optional prefix to Add to email Subject
370 370 #exception_tracker.email_prefix = [RHODECODE ERROR]
371 371
372 372 ; File store configuration. This is used to store and serve uploaded files
373 373 file_store.enabled = true
374 374
375 375 ; Storage backend, available options are: local
376 376 file_store.backend = local
377 377
378 378 ; path to store the uploaded binaries
379 379 file_store.storage_path = %(here)s/data/file_store
380 380
381 381
382 382 ; #############
383 383 ; CELERY CONFIG
384 384 ; #############
385 385
386 386 ; manually run celery: /path/to/celery worker -E --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini
387 387
388 388 use_celery = false
389 389
390 390 ; path to store schedule database
391 391 #celerybeat-schedule.path =
392 392
393 393 ; connection url to the message broker (default redis)
394 394 celery.broker_url = redis://localhost:6379/8
395 395
396 396 ; rabbitmq example
397 397 #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost
398 398
399 399 ; maximum tasks to execute before worker restart
400 400 celery.max_tasks_per_child = 100
401 401
402 402 ; tasks will never be sent to the queue, but executed locally instead.
403 403 celery.task_always_eager = false
404 404
405 405 ; #############
406 406 ; DOGPILE CACHE
407 407 ; #############
408 408
409 409 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
410 410 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
411 411 cache_dir = %(here)s/data
412 412
413 413 ; *********************************************
414 414 ; `sql_cache_short` cache for heavy SQL queries
415 415 ; Only supported backend is `memory_lru`
416 416 ; *********************************************
417 417 rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru
418 418 rc_cache.sql_cache_short.expiration_time = 30
419 419
420 420
421 421 ; *****************************************************
422 422 ; `cache_repo_longterm` cache for repo object instances
423 423 ; Only supported backend is `memory_lru`
424 424 ; *****************************************************
425 425 rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru
426 426 ; by default we use 30 Days, cache is still invalidated on push
427 427 rc_cache.cache_repo_longterm.expiration_time = 2592000
428 428 ; max items in LRU cache, set to smaller number to save memory, and expire last used caches
429 429 rc_cache.cache_repo_longterm.max_size = 10000
430 430
431 431
432 432 ; *********************************************
433 433 ; `cache_general` cache for general purpose use
434 434 ; for simplicity use rc.file_namespace backend,
435 435 ; for performance and scale use rc.redis
436 436 ; *********************************************
437 437 rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace
438 438 rc_cache.cache_general.expiration_time = 43200
439 439 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
440 440 #rc_cache.cache_general.arguments.filename = /tmp/cache_general.db
441 441
442 442 ; alternative `cache_general` redis backend with distributed lock
443 443 #rc_cache.cache_general.backend = dogpile.cache.rc.redis
444 444 #rc_cache.cache_general.expiration_time = 300
445 445
446 446 ; redis_expiration_time needs to be greater then expiration_time
447 447 #rc_cache.cache_general.arguments.redis_expiration_time = 7200
448 448
449 449 #rc_cache.cache_general.arguments.host = localhost
450 450 #rc_cache.cache_general.arguments.port = 6379
451 451 #rc_cache.cache_general.arguments.db = 0
452 452 #rc_cache.cache_general.arguments.socket_timeout = 30
453 453 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
454 454 #rc_cache.cache_general.arguments.distributed_lock = true
455 455
456 456 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
457 457 #rc_cache.cache_general.arguments.lock_auto_renewal = true
458 458
459 459 ; *************************************************
460 460 ; `cache_perms` cache for permission tree, auth TTL
461 461 ; for simplicity use rc.file_namespace backend,
462 462 ; for performance and scale use rc.redis
463 463 ; *************************************************
464 464 rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace
465 465 rc_cache.cache_perms.expiration_time = 3600
466 466 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
467 467 #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms.db
468 468
469 469 ; alternative `cache_perms` redis backend with distributed lock
470 470 #rc_cache.cache_perms.backend = dogpile.cache.rc.redis
471 471 #rc_cache.cache_perms.expiration_time = 300
472 472
473 473 ; redis_expiration_time needs to be greater then expiration_time
474 474 #rc_cache.cache_perms.arguments.redis_expiration_time = 7200
475 475
476 476 #rc_cache.cache_perms.arguments.host = localhost
477 477 #rc_cache.cache_perms.arguments.port = 6379
478 478 #rc_cache.cache_perms.arguments.db = 0
479 479 #rc_cache.cache_perms.arguments.socket_timeout = 30
480 480 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
481 481 #rc_cache.cache_perms.arguments.distributed_lock = true
482 482
483 483 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
484 484 #rc_cache.cache_perms.arguments.lock_auto_renewal = true
485 485
486 486 ; ***************************************************
487 487 ; `cache_repo` cache for file tree, Readme, RSS FEEDS
488 488 ; for simplicity use rc.file_namespace backend,
489 489 ; for performance and scale use rc.redis
490 490 ; ***************************************************
491 491 rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace
492 492 rc_cache.cache_repo.expiration_time = 2592000
493 493 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
494 494 #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo.db
495 495
496 496 ; alternative `cache_repo` redis backend with distributed lock
497 497 #rc_cache.cache_repo.backend = dogpile.cache.rc.redis
498 498 #rc_cache.cache_repo.expiration_time = 2592000
499 499
500 500 ; redis_expiration_time needs to be greater then expiration_time
501 501 #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400
502 502
503 503 #rc_cache.cache_repo.arguments.host = localhost
504 504 #rc_cache.cache_repo.arguments.port = 6379
505 505 #rc_cache.cache_repo.arguments.db = 1
506 506 #rc_cache.cache_repo.arguments.socket_timeout = 30
507 507 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
508 508 #rc_cache.cache_repo.arguments.distributed_lock = true
509 509
510 510 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
511 511 #rc_cache.cache_repo.arguments.lock_auto_renewal = true
512 512
513 513 ; ##############
514 514 ; BEAKER SESSION
515 515 ; ##############
516 516
517 517 ; beaker.session.type is type of storage options for the logged users sessions. Current allowed
518 518 ; types are file, ext:redis, ext:database, ext:memcached, and memory (default if not specified).
519 519 ; Fastest ones are Redis and ext:database
520 520 beaker.session.type = file
521 521 beaker.session.data_dir = %(here)s/data/sessions
522 522
523 523 ; Redis based sessions
524 524 #beaker.session.type = ext:redis
525 525 #beaker.session.url = redis://127.0.0.1:6379/2
526 526
527 527 ; DB based session, fast, and allows easy management over logged in users
528 528 #beaker.session.type = ext:database
529 529 #beaker.session.table_name = db_session
530 530 #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode
531 531 #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode
532 532 #beaker.session.sa.pool_recycle = 3600
533 533 #beaker.session.sa.echo = false
534 534
535 535 beaker.session.key = rhodecode
536 536 beaker.session.secret = develop-rc-uytcxaz
537 537 beaker.session.lock_dir = %(here)s/data/sessions/lock
538 538
539 539 ; Secure encrypted cookie. Requires AES and AES python libraries
540 540 ; you must disable beaker.session.secret to use this
541 541 #beaker.session.encrypt_key = key_for_encryption
542 542 #beaker.session.validate_key = validation_key
543 543
544 544 ; Sets session as invalid (also logging out user) if it haven not been
545 545 ; accessed for given amount of time in seconds
546 546 beaker.session.timeout = 2592000
547 547 beaker.session.httponly = true
548 548
549 549 ; Path to use for the cookie. Set to prefix if you use prefix middleware
550 550 #beaker.session.cookie_path = /custom_prefix
551 551
552 552 ; Set https secure cookie
553 553 beaker.session.secure = false
554 554
555 555 ; default cookie expiration time in seconds, set to `true` to set expire
556 556 ; at browser close
557 557 #beaker.session.cookie_expires = 3600
558 558
559 559 ; #############################
560 560 ; SEARCH INDEXING CONFIGURATION
561 561 ; #############################
562 562
563 563 ; Full text search indexer is available in rhodecode-tools under
564 564 ; `rhodecode-tools index` command
565 565
566 566 ; WHOOSH Backend, doesn't require additional services to run
567 567 ; it works good with few dozen repos
568 568 search.module = rhodecode.lib.index.whoosh
569 569 search.location = %(here)s/data/index
570 570
571 571 ; ####################
572 572 ; CHANNELSTREAM CONFIG
573 573 ; ####################
574 574
575 575 ; channelstream enables persistent connections and live notification
576 576 ; in the system. It's also used by the chat system
577 577
578 578 channelstream.enabled = false
579 579
580 580 ; server address for channelstream server on the backend
581 581 channelstream.server = 127.0.0.1:9800
582 582
583 583 ; location of the channelstream server from outside world
584 584 ; use ws:// for http or wss:// for https. This address needs to be handled
585 585 ; by external HTTP server such as Nginx or Apache
586 586 ; see Nginx/Apache configuration examples in our docs
587 587 channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream
588 588 channelstream.secret = secret
589 589 channelstream.history.location = %(here)s/channelstream_history
590 590
591 591 ; Internal application path that Javascript uses to connect into.
592 592 ; If you use proxy-prefix the prefix should be added before /_channelstream
593 593 channelstream.proxy_path = /_channelstream
594 594
595 595
596 596 ; ##############################
597 597 ; MAIN RHODECODE DATABASE CONFIG
598 598 ; ##############################
599 599
600 600 #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
601 601 #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
602 602 #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8
603 603 ; pymysql is an alternative driver for MySQL, use in case of problems with default one
604 604 #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode
605 605
606 606 sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
607 607
608 608 ; see sqlalchemy docs for other advanced settings
609 609 ; print the sql statements to output
610 610 sqlalchemy.db1.echo = false
611 611
612 612 ; recycle the connections after this amount of seconds
613 613 sqlalchemy.db1.pool_recycle = 3600
614 614 sqlalchemy.db1.convert_unicode = true
615 615
616 616 ; the number of connections to keep open inside the connection pool.
617 617 ; 0 indicates no limit
618 618 #sqlalchemy.db1.pool_size = 5
619 619
620 620 ; The number of connections to allow in connection pool "overflow", that is
621 621 ; connections that can be opened above and beyond the pool_size setting,
622 622 ; which defaults to five.
623 623 #sqlalchemy.db1.max_overflow = 10
624 624
625 625 ; Connection check ping, used to detect broken database connections
626 626 ; could be enabled to better handle cases if MySQL has gone away errors
627 627 #sqlalchemy.db1.ping_connection = true
628 628
629 629 ; ##########
630 630 ; VCS CONFIG
631 631 ; ##########
632 632 vcs.server.enable = true
633 633 vcs.server = localhost:9900
634 634
635 635 ; Web server connectivity protocol, responsible for web based VCS operations
636 636 ; Available protocols are:
637 637 ; `http` - use http-rpc backend (default)
638 638 vcs.server.protocol = http
639 639
640 640 ; Push/Pull operations protocol, available options are:
641 641 ; `http` - use http-rpc backend (default)
642 642 vcs.scm_app_implementation = http
643 643
644 644 ; Push/Pull operations hooks protocol, available options are:
645 645 ; `http` - use http-rpc backend (default)
646 646 vcs.hooks.protocol = http
647 647
648 ; Host on which this instance is listening for hooks. If vcsserver is in other location
649 ; this should be adjusted.
650 vcs.hooks.host = 127.0.0.1
648 ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be
649 ; accessible via network.
650 ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker)
651 vcs.hooks.host = *
651 652
652 653 ; Start VCSServer with this instance as a subprocess, useful for development
653 654 vcs.start_server = false
654 655
655 656 ; List of enabled VCS backends, available options are:
656 657 ; `hg` - mercurial
657 658 ; `git` - git
658 659 ; `svn` - subversion
659 660 vcs.backends = hg, git, svn
660 661
661 662 ; Wait this number of seconds before killing connection to the vcsserver
662 663 vcs.connection_timeout = 3600
663 664
664 665 ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out.
665 666 ; Set a numeric version for your current SVN e.g 1.8, or 1.12
666 667 ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible
667 668 #vcs.svn.compatible_version = 1.8
668 669
669 670 ; Cache flag to cache vcsserver remote calls locally
670 671 ; It uses cache_region `cache_repo`
671 672 vcs.methods.cache = true
672 673
673 674 ; ####################################################
674 675 ; Subversion proxy support (mod_dav_svn)
675 676 ; Maps RhodeCode repo groups into SVN paths for Apache
676 677 ; ####################################################
677 678
678 679 ; Enable or disable the config file generation.
679 680 svn.proxy.generate_config = false
680 681
681 682 ; Generate config file with `SVNListParentPath` set to `On`.
682 683 svn.proxy.list_parent_path = true
683 684
684 685 ; Set location and file name of generated config file.
685 686 svn.proxy.config_file_path = %(here)s/mod_dav_svn.conf
686 687
687 688 ; alternative mod_dav config template. This needs to be a valid mako template
688 689 ; Example template can be found in the source code:
689 690 ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako
690 691 #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako
691 692
692 693 ; Used as a prefix to the `Location` block in the generated config file.
693 694 ; In most cases it should be set to `/`.
694 695 svn.proxy.location_root = /
695 696
696 697 ; Command to reload the mod dav svn configuration on change.
697 698 ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh
698 699 ; Make sure user who runs RhodeCode process is allowed to reload Apache
699 700 #svn.proxy.reload_cmd = /etc/init.d/apache2 reload
700 701
701 702 ; If the timeout expires before the reload command finishes, the command will
702 703 ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds.
703 704 #svn.proxy.reload_timeout = 10
704 705
705 706 ; ####################
706 707 ; SSH Support Settings
707 708 ; ####################
708 709
709 710 ; Defines if a custom authorized_keys file should be created and written on
710 711 ; any change user ssh keys. Setting this to false also disables possibility
711 712 ; of adding SSH keys by users from web interface. Super admins can still
712 713 ; manage SSH Keys.
713 714 ssh.generate_authorized_keyfile = false
714 715
715 716 ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding`
716 717 # ssh.authorized_keys_ssh_opts =
717 718
718 719 ; Path to the authorized_keys file where the generate entries are placed.
719 720 ; It is possible to have multiple key files specified in `sshd_config` e.g.
720 721 ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode
721 722 ssh.authorized_keys_file_path = ~/.ssh/authorized_keys_rhodecode
722 723
723 724 ; Command to execute the SSH wrapper. The binary is available in the
724 725 ; RhodeCode installation directory.
725 726 ; e.g ~/.rccontrol/community-1/profile/bin/rc-ssh-wrapper
726 727 ssh.wrapper_cmd = ~/.rccontrol/community-1/rc-ssh-wrapper
727 728
728 729 ; Allow shell when executing the ssh-wrapper command
729 730 ssh.wrapper_cmd_allow_shell = false
730 731
731 732 ; Enables logging, and detailed output send back to the client during SSH
732 733 ; operations. Useful for debugging, shouldn't be used in production.
733 734 ssh.enable_debug_logging = true
734 735
735 736 ; Paths to binary executable, by default they are the names, but we can
736 737 ; override them if we want to use a custom one
737 738 ssh.executable.hg = ~/.rccontrol/vcsserver-1/profile/bin/hg
738 739 ssh.executable.git = ~/.rccontrol/vcsserver-1/profile/bin/git
739 740 ssh.executable.svn = ~/.rccontrol/vcsserver-1/profile/bin/svnserve
740 741
741 742 ; Enables SSH key generator web interface. Disabling this still allows users
742 743 ; to add their own keys.
743 744 ssh.enable_ui_key_generator = true
744 745
745 746
746 747 ; #################
747 748 ; APPENLIGHT CONFIG
748 749 ; #################
749 750
750 751 ; Appenlight is tailored to work with RhodeCode, see
751 752 ; http://appenlight.rhodecode.com for details how to obtain an account
752 753
753 754 ; Appenlight integration enabled
754 755 #appenlight = false
755 756
756 757 #appenlight.server_url = https://api.appenlight.com
757 758 #appenlight.api_key = YOUR_API_KEY
758 759 #appenlight.transport_config = https://api.appenlight.com?threaded=1&timeout=5
759 760
760 761 ; used for JS client
761 762 #appenlight.api_public_key = YOUR_API_PUBLIC_KEY
762 763
763 764 ; TWEAK AMOUNT OF INFO SENT HERE
764 765
765 766 ; enables 404 error logging (default False)
766 767 #appenlight.report_404 = false
767 768
768 769 ; time in seconds after request is considered being slow (default 1)
769 770 #appenlight.slow_request_time = 1
770 771
771 772 ; record slow requests in application
772 773 ; (needs to be enabled for slow datastore recording and time tracking)
773 774 #appenlight.slow_requests = true
774 775
775 776 ; enable hooking to application loggers
776 777 #appenlight.logging = true
777 778
778 779 ; minimum log level for log capture
779 780 #ppenlight.logging.level = WARNING
780 781
781 782 ; send logs only from erroneous/slow requests
782 783 ; (saves API quota for intensive logging)
783 784 #appenlight.logging_on_error = false
784 785
785 786 ; list of additional keywords that should be grabbed from environ object
786 787 ; can be string with comma separated list of words in lowercase
787 788 ; (by default client will always send following info:
788 789 ; 'REMOTE_USER', 'REMOTE_ADDR', 'SERVER_NAME', 'CONTENT_TYPE' + all keys that
789 790 ; start with HTTP* this list be extended with additional keywords here
790 791 #appenlight.environ_keys_whitelist =
791 792
792 793 ; list of keywords that should be blanked from request object
793 794 ; can be string with comma separated list of words in lowercase
794 795 ; (by default client will always blank keys that contain following words
795 796 ; 'password', 'passwd', 'pwd', 'auth_tkt', 'secret', 'csrf'
796 797 ; this list be extended with additional keywords set here
797 798 #appenlight.request_keys_blacklist =
798 799
799 800 ; list of namespaces that should be ignores when gathering log entries
800 801 ; can be string with comma separated list of namespaces
801 802 ; (by default the client ignores own entries: appenlight_client.client)
802 803 #appenlight.log_namespace_blacklist =
803 804
804 805 ; Statsd client config, this is used to send metrics to statsd
805 806 ; We recommend setting statsd_exported and scrape them using Promethues
806 807 #statsd.enabled = false
807 808 #statsd.statsd_host = 0.0.0.0
808 809 #statsd.statsd_port = 8125
809 810 #statsd.statsd_prefix =
810 811 #statsd.statsd_ipv6 = false
811 812
812 813 ; configure logging automatically at server startup set to false
813 814 ; to use the below custom logging config.
814 815 ; RC_LOGGING_FORMATTER
815 816 ; RC_LOGGING_LEVEL
816 817 ; env variables can control the settings for logging in case of autoconfigure
817 818
818 819 #logging.autoconfigure = true
819 820
820 821 ; specify your own custom logging config file to configure logging
821 822 #logging.logging_conf_file = /path/to/custom_logging.ini
822 823
823 824 ; Dummy marker to add new entries after.
824 825 ; Add any custom entries below. Please don't remove this marker.
825 826 custom.conf = 1
826 827
827 828
828 829 ; #####################
829 830 ; LOGGING CONFIGURATION
830 831 ; #####################
831 832
832 833 [loggers]
833 834 keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper
834 835
835 836 [handlers]
836 837 keys = console, console_sql
837 838
838 839 [formatters]
839 840 keys = generic, json, color_formatter, color_formatter_sql
840 841
841 842 ; #######
842 843 ; LOGGERS
843 844 ; #######
844 845 [logger_root]
845 846 level = NOTSET
846 847 handlers = console
847 848
848 849 [logger_sqlalchemy]
849 850 level = INFO
850 851 handlers = console_sql
851 852 qualname = sqlalchemy.engine
852 853 propagate = 0
853 854
854 855 [logger_beaker]
855 856 level = DEBUG
856 857 handlers =
857 858 qualname = beaker.container
858 859 propagate = 1
859 860
860 861 [logger_rhodecode]
861 862 level = DEBUG
862 863 handlers =
863 864 qualname = rhodecode
864 865 propagate = 1
865 866
866 867 [logger_ssh_wrapper]
867 868 level = DEBUG
868 869 handlers =
869 870 qualname = ssh_wrapper
870 871 propagate = 1
871 872
872 873 [logger_celery]
873 874 level = DEBUG
874 875 handlers =
875 876 qualname = celery
876 877
877 878
878 879 ; ########
879 880 ; HANDLERS
880 881 ; ########
881 882
882 883 [handler_console]
883 884 class = StreamHandler
884 885 args = (sys.stderr, )
885 886 level = DEBUG
886 887 ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json'
887 888 ; This allows sending properly formatted logs to grafana loki or elasticsearch
888 889 formatter = color_formatter
889 890
890 891 [handler_console_sql]
891 892 ; "level = DEBUG" logs SQL queries and results.
892 893 ; "level = INFO" logs SQL queries.
893 894 ; "level = WARN" logs neither. (Recommended for production systems.)
894 895 class = StreamHandler
895 896 args = (sys.stderr, )
896 897 level = WARN
897 898 ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json'
898 899 ; This allows sending properly formatted logs to grafana loki or elasticsearch
899 900 formatter = color_formatter_sql
900 901
901 902 ; ##########
902 903 ; FORMATTERS
903 904 ; ##########
904 905
905 906 [formatter_generic]
906 907 class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter
907 908 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
908 909 datefmt = %Y-%m-%d %H:%M:%S
909 910
910 911 [formatter_color_formatter]
911 912 class = rhodecode.lib.logging_formatter.ColorFormatter
912 913 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
913 914 datefmt = %Y-%m-%d %H:%M:%S
914 915
915 916 [formatter_color_formatter_sql]
916 917 class = rhodecode.lib.logging_formatter.ColorFormatterSql
917 918 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
918 919 datefmt = %Y-%m-%d %H:%M:%S
919 920
920 921 [formatter_json]
921 922 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
922 923 class = rhodecode.lib._vendor.jsonlogger.JsonFormatter
@@ -1,873 +1,874 b''
1 1 ## -*- coding: utf-8 -*-
2 2
3 3 ; #########################################
4 4 ; RHODECODE COMMUNITY EDITION CONFIGURATION
5 5 ; #########################################
6 6
7 7 [DEFAULT]
8 8 ; Debug flag sets all loggers to debug, and enables request tracking
9 9 debug = false
10 10
11 11 ; ########################################################################
12 12 ; EMAIL CONFIGURATION
13 13 ; These settings will be used by the RhodeCode mailing system
14 14 ; ########################################################################
15 15
16 16 ; prefix all emails subjects with given prefix, helps filtering out emails
17 17 #email_prefix = [RhodeCode]
18 18
19 19 ; email FROM address all mails will be sent
20 20 #app_email_from = rhodecode-noreply@localhost
21 21
22 22 #smtp_server = mail.server.com
23 23 #smtp_username =
24 24 #smtp_password =
25 25 #smtp_port =
26 26 #smtp_use_tls = false
27 27 #smtp_use_ssl = true
28 28
29 29 [server:main]
30 30 ; COMMON HOST/IP CONFIG
31 31 host = 127.0.0.1
32 32 port = 5000
33 33
34 34
35 35 ; ###########################
36 36 ; GUNICORN APPLICATION SERVER
37 37 ; ###########################
38 38
39 39 ; run with gunicorn --paste rhodecode.ini
40 40
41 41 ; Module to use, this setting shouldn't be changed
42 42 use = egg:gunicorn#main
43 43
44 44 ; Sets the number of process workers. More workers means more concurrent connections
45 45 ; RhodeCode can handle at the same time. Each additional worker also it increases
46 46 ; memory usage as each has it's own set of caches.
47 47 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
48 48 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
49 49 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
50 50 ; when using more than 1 worker.
51 51 workers = 2
52 52
53 53 ; Gunicorn access log level
54 54 loglevel = info
55 55
56 56 ; Process name visible in process list
57 57 proc_name = rhodecode
58 58
59 59 ; Type of worker class, one of `sync`, `gevent`
60 60 ; Recommended type is `gevent`
61 61 worker_class = gevent
62 62
63 63 ; The maximum number of simultaneous clients per worker. Valid only for gevent
64 64 worker_connections = 10
65 65
66 66 ; Max number of requests that worker will handle before being gracefully restarted.
67 67 ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
68 68 max_requests = 1000
69 69 max_requests_jitter = 30
70 70
71 71 ; Amount of time a worker can spend with handling a request before it
72 72 ; gets killed and restarted. By default set to 21600 (6hrs)
73 73 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
74 74 timeout = 21600
75 75
76 76 ; The maximum size of HTTP request line in bytes.
77 77 ; 0 for unlimited
78 78 limit_request_line = 0
79 79
80 80 ; Limit the number of HTTP headers fields in a request.
81 81 ; By default this value is 100 and can't be larger than 32768.
82 82 limit_request_fields = 32768
83 83
84 84 ; Limit the allowed size of an HTTP request header field.
85 85 ; Value is a positive number or 0.
86 86 ; Setting it to 0 will allow unlimited header field sizes.
87 87 limit_request_field_size = 0
88 88
89 89 ; Timeout for graceful workers restart.
90 90 ; After receiving a restart signal, workers have this much time to finish
91 91 ; serving requests. Workers still alive after the timeout (starting from the
92 92 ; receipt of the restart signal) are force killed.
93 93 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
94 94 graceful_timeout = 3600
95 95
96 96 # The number of seconds to wait for requests on a Keep-Alive connection.
97 97 # Generally set in the 1-5 seconds range.
98 98 keepalive = 2
99 99
100 100 ; Maximum memory usage that each worker can use before it will receive a
101 101 ; graceful restart signal 0 = memory monitoring is disabled
102 102 ; Examples: 268435456 (256MB), 536870912 (512MB)
103 103 ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
104 104 memory_max_usage = 0
105 105
106 106 ; How often in seconds to check for memory usage for each gunicorn worker
107 107 memory_usage_check_interval = 60
108 108
109 109 ; Threshold value for which we don't recycle worker if GarbageCollection
110 110 ; frees up enough resources. Before each restart we try to run GC on worker
111 111 ; in case we get enough free memory after that, restart will not happen.
112 112 memory_usage_recovery_threshold = 0.8
113 113
114 114
115 115 ; Prefix middleware for RhodeCode.
116 116 ; recommended when using proxy setup.
117 117 ; allows to set RhodeCode under a prefix in server.
118 118 ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well.
119 119 ; And set your prefix like: `prefix = /custom_prefix`
120 120 ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need
121 121 ; to make your cookies only work on prefix url
122 122 [filter:proxy-prefix]
123 123 use = egg:PasteDeploy#prefix
124 124 prefix = /
125 125
126 126 [app:main]
127 127 ; The %(here)s variable will be replaced with the absolute path of parent directory
128 128 ; of this file
129 129 ; Each option in the app:main can be override by an environmental variable
130 130 ;
131 131 ;To override an option:
132 132 ;
133 133 ;RC_<KeyName>
134 134 ;Everything should be uppercase, . and - should be replaced by _.
135 135 ;For example, if you have these configuration settings:
136 136 ;rc_cache.repo_object.backend = foo
137 137 ;can be overridden by
138 138 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
139 139
140 140 use = egg:rhodecode-enterprise-ce
141 141
142 142 ; enable proxy prefix middleware, defined above
143 143 #filter-with = proxy-prefix
144 144
145 145 ; encryption key used to encrypt social plugin tokens,
146 146 ; remote_urls with credentials etc, if not set it defaults to
147 147 ; `beaker.session.secret`
148 148 #rhodecode.encrypted_values.secret =
149 149
150 150 ; decryption strict mode (enabled by default). It controls if decryption raises
151 151 ; `SignatureVerificationError` in case of wrong key, or damaged encryption data.
152 152 #rhodecode.encrypted_values.strict = false
153 153
154 154 ; Pick algorithm for encryption. Either fernet (more secure) or aes (default)
155 155 ; fernet is safer, and we strongly recommend switching to it.
156 156 ; Due to backward compatibility aes is used as default.
157 157 #rhodecode.encrypted_values.algorithm = fernet
158 158
159 159 ; Return gzipped responses from RhodeCode (static files/application)
160 160 gzip_responses = false
161 161
162 162 ; Auto-generate javascript routes file on startup
163 163 generate_js_files = false
164 164
165 165 ; System global default language.
166 166 ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh
167 167 lang = en
168 168
169 169 ; Perform a full repository scan and import on each server start.
170 170 ; Settings this to true could lead to very long startup time.
171 171 startup.import_repos = false
172 172
173 173 ; Uncomment and set this path to use archive download cache.
174 174 ; Once enabled, generated archives will be cached at this location
175 175 ; and served from the cache during subsequent requests for the same archive of
176 176 ; the repository.
177 177 #archive_cache_dir = /tmp/tarballcache
178 178
179 179 ; URL at which the application is running. This is used for Bootstrapping
180 180 ; requests in context when no web request is available. Used in ishell, or
181 181 ; SSH calls. Set this for events to receive proper url for SSH calls.
182 182 app.base_url = http://rhodecode.local
183 183
184 184 ; Unique application ID. Should be a random unique string for security.
185 185 app_instance_uuid = rc-production
186 186
187 187 ; Cut off limit for large diffs (size in bytes). If overall diff size on
188 188 ; commit, or pull request exceeds this limit this diff will be displayed
189 189 ; partially. E.g 512000 == 512Kb
190 190 cut_off_limit_diff = 512000
191 191
192 192 ; Cut off limit for large files inside diffs (size in bytes). Each individual
193 193 ; file inside diff which exceeds this limit will be displayed partially.
194 194 ; E.g 128000 == 128Kb
195 195 cut_off_limit_file = 128000
196 196
197 197 ; Use cached version of vcs repositories everywhere. Recommended to be `true`
198 198 vcs_full_cache = true
199 199
200 200 ; Force https in RhodeCode, fixes https redirects, assumes it's always https.
201 201 ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache
202 202 force_https = false
203 203
204 204 ; use Strict-Transport-Security headers
205 205 use_htsts = false
206 206
207 207 ; Set to true if your repos are exposed using the dumb protocol
208 208 git_update_server_info = false
209 209
210 210 ; RSS/ATOM feed options
211 211 rss_cut_off_limit = 256000
212 212 rss_items_per_page = 10
213 213 rss_include_diff = false
214 214
215 215 ; gist URL alias, used to create nicer urls for gist. This should be an
216 216 ; url that does rewrites to _admin/gists/{gistid}.
217 217 ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal
218 218 ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid}
219 219 gist_alias_url =
220 220
221 221 ; List of views (using glob pattern syntax) that AUTH TOKENS could be
222 222 ; used for access.
223 223 ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it
224 224 ; came from the the logged in user who own this authentication token.
225 225 ; Additionally @TOKEN syntax can be used to bound the view to specific
226 226 ; authentication token. Such view would be only accessible when used together
227 227 ; with this authentication token
228 228 ; list of all views can be found under `/_admin/permissions/auth_token_access`
229 229 ; The list should be "," separated and on a single line.
230 230 ; Most common views to enable:
231 231
232 232 # RepoCommitsView:repo_commit_download
233 233 # RepoCommitsView:repo_commit_patch
234 234 # RepoCommitsView:repo_commit_raw
235 235 # RepoCommitsView:repo_commit_raw@TOKEN
236 236 # RepoFilesView:repo_files_diff
237 237 # RepoFilesView:repo_archivefile
238 238 # RepoFilesView:repo_file_raw
239 239 # GistView:*
240 240 api_access_controllers_whitelist =
241 241
242 242 ; Default encoding used to convert from and to unicode
243 243 ; can be also a comma separated list of encoding in case of mixed encodings
244 244 default_encoding = UTF-8
245 245
246 246 ; instance-id prefix
247 247 ; a prefix key for this instance used for cache invalidation when running
248 248 ; multiple instances of RhodeCode, make sure it's globally unique for
249 249 ; all running RhodeCode instances. Leave empty if you don't use it
250 250 instance_id =
251 251
252 252 ; Fallback authentication plugin. Set this to a plugin ID to force the usage
253 253 ; of an authentication plugin also if it is disabled by it's settings.
254 254 ; This could be useful if you are unable to log in to the system due to broken
255 255 ; authentication settings. Then you can enable e.g. the internal RhodeCode auth
256 256 ; module to log in again and fix the settings.
257 257 ; Available builtin plugin IDs (hash is part of the ID):
258 258 ; egg:rhodecode-enterprise-ce#rhodecode
259 259 ; egg:rhodecode-enterprise-ce#pam
260 260 ; egg:rhodecode-enterprise-ce#ldap
261 261 ; egg:rhodecode-enterprise-ce#jasig_cas
262 262 ; egg:rhodecode-enterprise-ce#headers
263 263 ; egg:rhodecode-enterprise-ce#crowd
264 264
265 265 #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode
266 266
267 267 ; Flag to control loading of legacy plugins in py:/path format
268 268 auth_plugin.import_legacy_plugins = true
269 269
270 270 ; alternative return HTTP header for failed authentication. Default HTTP
271 271 ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with
272 272 ; handling that causing a series of failed authentication calls.
273 273 ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code
274 274 ; This will be served instead of default 401 on bad authentication
275 275 auth_ret_code =
276 276
277 277 ; use special detection method when serving auth_ret_code, instead of serving
278 278 ; ret_code directly, use 401 initially (Which triggers credentials prompt)
279 279 ; and then serve auth_ret_code to clients
280 280 auth_ret_code_detection = false
281 281
282 282 ; locking return code. When repository is locked return this HTTP code. 2XX
283 283 ; codes don't break the transactions while 4XX codes do
284 284 lock_ret_code = 423
285 285
286 286 ; allows to change the repository location in settings page
287 287 allow_repo_location_change = true
288 288
289 289 ; allows to setup custom hooks in settings page
290 290 allow_custom_hooks_settings = true
291 291
292 292 ; Generated license token required for EE edition license.
293 293 ; New generated token value can be found in Admin > settings > license page.
294 294 license_token =
295 295
296 296 ; This flag hides sensitive information on the license page such as token, and license data
297 297 license.hide_license_info = false
298 298
299 299 ; supervisor connection uri, for managing supervisor and logs.
300 300 supervisor.uri =
301 301
302 302 ; supervisord group name/id we only want this RC instance to handle
303 303 supervisor.group_id = prod
304 304
305 305 ; Display extended labs settings
306 306 labs_settings_active = true
307 307
308 308 ; Custom exception store path, defaults to TMPDIR
309 309 ; This is used to store exception from RhodeCode in shared directory
310 310 #exception_tracker.store_path =
311 311
312 312 ; Send email with exception details when it happens
313 313 #exception_tracker.send_email = false
314 314
315 315 ; Comma separated list of recipients for exception emails,
316 316 ; e.g admin@rhodecode.com,devops@rhodecode.com
317 317 ; Can be left empty, then emails will be sent to ALL super-admins
318 318 #exception_tracker.send_email_recipients =
319 319
320 320 ; optional prefix to Add to email Subject
321 321 #exception_tracker.email_prefix = [RHODECODE ERROR]
322 322
323 323 ; File store configuration. This is used to store and serve uploaded files
324 324 file_store.enabled = true
325 325
326 326 ; Storage backend, available options are: local
327 327 file_store.backend = local
328 328
329 329 ; path to store the uploaded binaries
330 330 file_store.storage_path = %(here)s/data/file_store
331 331
332 332
333 333 ; #############
334 334 ; CELERY CONFIG
335 335 ; #############
336 336
337 337 ; manually run celery: /path/to/celery worker -E --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini
338 338
339 339 use_celery = false
340 340
341 341 ; path to store schedule database
342 342 #celerybeat-schedule.path =
343 343
344 344 ; connection url to the message broker (default redis)
345 345 celery.broker_url = redis://localhost:6379/8
346 346
347 347 ; rabbitmq example
348 348 #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost
349 349
350 350 ; maximum tasks to execute before worker restart
351 351 celery.max_tasks_per_child = 100
352 352
353 353 ; tasks will never be sent to the queue, but executed locally instead.
354 354 celery.task_always_eager = false
355 355
356 356 ; #############
357 357 ; DOGPILE CACHE
358 358 ; #############
359 359
360 360 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
361 361 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
362 362 cache_dir = %(here)s/data
363 363
364 364 ; *********************************************
365 365 ; `sql_cache_short` cache for heavy SQL queries
366 366 ; Only supported backend is `memory_lru`
367 367 ; *********************************************
368 368 rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru
369 369 rc_cache.sql_cache_short.expiration_time = 30
370 370
371 371
372 372 ; *****************************************************
373 373 ; `cache_repo_longterm` cache for repo object instances
374 374 ; Only supported backend is `memory_lru`
375 375 ; *****************************************************
376 376 rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru
377 377 ; by default we use 30 Days, cache is still invalidated on push
378 378 rc_cache.cache_repo_longterm.expiration_time = 2592000
379 379 ; max items in LRU cache, set to smaller number to save memory, and expire last used caches
380 380 rc_cache.cache_repo_longterm.max_size = 10000
381 381
382 382
383 383 ; *********************************************
384 384 ; `cache_general` cache for general purpose use
385 385 ; for simplicity use rc.file_namespace backend,
386 386 ; for performance and scale use rc.redis
387 387 ; *********************************************
388 388 rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace
389 389 rc_cache.cache_general.expiration_time = 43200
390 390 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
391 391 #rc_cache.cache_general.arguments.filename = /tmp/cache_general.db
392 392
393 393 ; alternative `cache_general` redis backend with distributed lock
394 394 #rc_cache.cache_general.backend = dogpile.cache.rc.redis
395 395 #rc_cache.cache_general.expiration_time = 300
396 396
397 397 ; redis_expiration_time needs to be greater then expiration_time
398 398 #rc_cache.cache_general.arguments.redis_expiration_time = 7200
399 399
400 400 #rc_cache.cache_general.arguments.host = localhost
401 401 #rc_cache.cache_general.arguments.port = 6379
402 402 #rc_cache.cache_general.arguments.db = 0
403 403 #rc_cache.cache_general.arguments.socket_timeout = 30
404 404 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
405 405 #rc_cache.cache_general.arguments.distributed_lock = true
406 406
407 407 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
408 408 #rc_cache.cache_general.arguments.lock_auto_renewal = true
409 409
410 410 ; *************************************************
411 411 ; `cache_perms` cache for permission tree, auth TTL
412 412 ; for simplicity use rc.file_namespace backend,
413 413 ; for performance and scale use rc.redis
414 414 ; *************************************************
415 415 rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace
416 416 rc_cache.cache_perms.expiration_time = 3600
417 417 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
418 418 #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms.db
419 419
420 420 ; alternative `cache_perms` redis backend with distributed lock
421 421 #rc_cache.cache_perms.backend = dogpile.cache.rc.redis
422 422 #rc_cache.cache_perms.expiration_time = 300
423 423
424 424 ; redis_expiration_time needs to be greater then expiration_time
425 425 #rc_cache.cache_perms.arguments.redis_expiration_time = 7200
426 426
427 427 #rc_cache.cache_perms.arguments.host = localhost
428 428 #rc_cache.cache_perms.arguments.port = 6379
429 429 #rc_cache.cache_perms.arguments.db = 0
430 430 #rc_cache.cache_perms.arguments.socket_timeout = 30
431 431 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
432 432 #rc_cache.cache_perms.arguments.distributed_lock = true
433 433
434 434 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
435 435 #rc_cache.cache_perms.arguments.lock_auto_renewal = true
436 436
437 437 ; ***************************************************
438 438 ; `cache_repo` cache for file tree, Readme, RSS FEEDS
439 439 ; for simplicity use rc.file_namespace backend,
440 440 ; for performance and scale use rc.redis
441 441 ; ***************************************************
442 442 rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace
443 443 rc_cache.cache_repo.expiration_time = 2592000
444 444 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
445 445 #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo.db
446 446
447 447 ; alternative `cache_repo` redis backend with distributed lock
448 448 #rc_cache.cache_repo.backend = dogpile.cache.rc.redis
449 449 #rc_cache.cache_repo.expiration_time = 2592000
450 450
451 451 ; redis_expiration_time needs to be greater then expiration_time
452 452 #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400
453 453
454 454 #rc_cache.cache_repo.arguments.host = localhost
455 455 #rc_cache.cache_repo.arguments.port = 6379
456 456 #rc_cache.cache_repo.arguments.db = 1
457 457 #rc_cache.cache_repo.arguments.socket_timeout = 30
458 458 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
459 459 #rc_cache.cache_repo.arguments.distributed_lock = true
460 460
461 461 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
462 462 #rc_cache.cache_repo.arguments.lock_auto_renewal = true
463 463
464 464 ; ##############
465 465 ; BEAKER SESSION
466 466 ; ##############
467 467
468 468 ; beaker.session.type is type of storage options for the logged users sessions. Current allowed
469 469 ; types are file, ext:redis, ext:database, ext:memcached, and memory (default if not specified).
470 470 ; Fastest ones are Redis and ext:database
471 471 beaker.session.type = file
472 472 beaker.session.data_dir = %(here)s/data/sessions
473 473
474 474 ; Redis based sessions
475 475 #beaker.session.type = ext:redis
476 476 #beaker.session.url = redis://127.0.0.1:6379/2
477 477
478 478 ; DB based session, fast, and allows easy management over logged in users
479 479 #beaker.session.type = ext:database
480 480 #beaker.session.table_name = db_session
481 481 #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode
482 482 #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode
483 483 #beaker.session.sa.pool_recycle = 3600
484 484 #beaker.session.sa.echo = false
485 485
486 486 beaker.session.key = rhodecode
487 487 beaker.session.secret = production-rc-uytcxaz
488 488 beaker.session.lock_dir = %(here)s/data/sessions/lock
489 489
490 490 ; Secure encrypted cookie. Requires AES and AES python libraries
491 491 ; you must disable beaker.session.secret to use this
492 492 #beaker.session.encrypt_key = key_for_encryption
493 493 #beaker.session.validate_key = validation_key
494 494
495 495 ; Sets session as invalid (also logging out user) if it haven not been
496 496 ; accessed for given amount of time in seconds
497 497 beaker.session.timeout = 2592000
498 498 beaker.session.httponly = true
499 499
500 500 ; Path to use for the cookie. Set to prefix if you use prefix middleware
501 501 #beaker.session.cookie_path = /custom_prefix
502 502
503 503 ; Set https secure cookie
504 504 beaker.session.secure = false
505 505
506 506 ; default cookie expiration time in seconds, set to `true` to set expire
507 507 ; at browser close
508 508 #beaker.session.cookie_expires = 3600
509 509
510 510 ; #############################
511 511 ; SEARCH INDEXING CONFIGURATION
512 512 ; #############################
513 513
514 514 ; Full text search indexer is available in rhodecode-tools under
515 515 ; `rhodecode-tools index` command
516 516
517 517 ; WHOOSH Backend, doesn't require additional services to run
518 518 ; it works good with few dozen repos
519 519 search.module = rhodecode.lib.index.whoosh
520 520 search.location = %(here)s/data/index
521 521
522 522 ; ####################
523 523 ; CHANNELSTREAM CONFIG
524 524 ; ####################
525 525
526 526 ; channelstream enables persistent connections and live notification
527 527 ; in the system. It's also used by the chat system
528 528
529 529 channelstream.enabled = false
530 530
531 531 ; server address for channelstream server on the backend
532 532 channelstream.server = 127.0.0.1:9800
533 533
534 534 ; location of the channelstream server from outside world
535 535 ; use ws:// for http or wss:// for https. This address needs to be handled
536 536 ; by external HTTP server such as Nginx or Apache
537 537 ; see Nginx/Apache configuration examples in our docs
538 538 channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream
539 539 channelstream.secret = secret
540 540 channelstream.history.location = %(here)s/channelstream_history
541 541
542 542 ; Internal application path that Javascript uses to connect into.
543 543 ; If you use proxy-prefix the prefix should be added before /_channelstream
544 544 channelstream.proxy_path = /_channelstream
545 545
546 546
547 547 ; ##############################
548 548 ; MAIN RHODECODE DATABASE CONFIG
549 549 ; ##############################
550 550
551 551 #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
552 552 #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
553 553 #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8
554 554 ; pymysql is an alternative driver for MySQL, use in case of problems with default one
555 555 #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode
556 556
557 557 sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
558 558
559 559 ; see sqlalchemy docs for other advanced settings
560 560 ; print the sql statements to output
561 561 sqlalchemy.db1.echo = false
562 562
563 563 ; recycle the connections after this amount of seconds
564 564 sqlalchemy.db1.pool_recycle = 3600
565 565 sqlalchemy.db1.convert_unicode = true
566 566
567 567 ; the number of connections to keep open inside the connection pool.
568 568 ; 0 indicates no limit
569 569 #sqlalchemy.db1.pool_size = 5
570 570
571 571 ; The number of connections to allow in connection pool "overflow", that is
572 572 ; connections that can be opened above and beyond the pool_size setting,
573 573 ; which defaults to five.
574 574 #sqlalchemy.db1.max_overflow = 10
575 575
576 576 ; Connection check ping, used to detect broken database connections
577 577 ; could be enabled to better handle cases if MySQL has gone away errors
578 578 #sqlalchemy.db1.ping_connection = true
579 579
580 580 ; ##########
581 581 ; VCS CONFIG
582 582 ; ##########
583 583 vcs.server.enable = true
584 584 vcs.server = localhost:9900
585 585
586 586 ; Web server connectivity protocol, responsible for web based VCS operations
587 587 ; Available protocols are:
588 588 ; `http` - use http-rpc backend (default)
589 589 vcs.server.protocol = http
590 590
591 591 ; Push/Pull operations protocol, available options are:
592 592 ; `http` - use http-rpc backend (default)
593 593 vcs.scm_app_implementation = http
594 594
595 595 ; Push/Pull operations hooks protocol, available options are:
596 596 ; `http` - use http-rpc backend (default)
597 597 vcs.hooks.protocol = http
598 598
599 ; Host on which this instance is listening for hooks. If vcsserver is in other location
600 ; this should be adjusted.
601 vcs.hooks.host = 127.0.0.1
599 ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be
600 ; accessible via network.
601 ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker)
602 vcs.hooks.host = *
602 603
603 604 ; Start VCSServer with this instance as a subprocess, useful for development
604 605 vcs.start_server = false
605 606
606 607 ; List of enabled VCS backends, available options are:
607 608 ; `hg` - mercurial
608 609 ; `git` - git
609 610 ; `svn` - subversion
610 611 vcs.backends = hg, git, svn
611 612
612 613 ; Wait this number of seconds before killing connection to the vcsserver
613 614 vcs.connection_timeout = 3600
614 615
615 616 ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out.
616 617 ; Set a numeric version for your current SVN e.g 1.8, or 1.12
617 618 ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible
618 619 #vcs.svn.compatible_version = 1.8
619 620
620 621 ; Cache flag to cache vcsserver remote calls locally
621 622 ; It uses cache_region `cache_repo`
622 623 vcs.methods.cache = true
623 624
624 625 ; ####################################################
625 626 ; Subversion proxy support (mod_dav_svn)
626 627 ; Maps RhodeCode repo groups into SVN paths for Apache
627 628 ; ####################################################
628 629
629 630 ; Enable or disable the config file generation.
630 631 svn.proxy.generate_config = false
631 632
632 633 ; Generate config file with `SVNListParentPath` set to `On`.
633 634 svn.proxy.list_parent_path = true
634 635
635 636 ; Set location and file name of generated config file.
636 637 svn.proxy.config_file_path = %(here)s/mod_dav_svn.conf
637 638
638 639 ; alternative mod_dav config template. This needs to be a valid mako template
639 640 ; Example template can be found in the source code:
640 641 ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako
641 642 #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako
642 643
643 644 ; Used as a prefix to the `Location` block in the generated config file.
644 645 ; In most cases it should be set to `/`.
645 646 svn.proxy.location_root = /
646 647
647 648 ; Command to reload the mod dav svn configuration on change.
648 649 ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh
649 650 ; Make sure user who runs RhodeCode process is allowed to reload Apache
650 651 #svn.proxy.reload_cmd = /etc/init.d/apache2 reload
651 652
652 653 ; If the timeout expires before the reload command finishes, the command will
653 654 ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds.
654 655 #svn.proxy.reload_timeout = 10
655 656
656 657 ; ####################
657 658 ; SSH Support Settings
658 659 ; ####################
659 660
660 661 ; Defines if a custom authorized_keys file should be created and written on
661 662 ; any change user ssh keys. Setting this to false also disables possibility
662 663 ; of adding SSH keys by users from web interface. Super admins can still
663 664 ; manage SSH Keys.
664 665 ssh.generate_authorized_keyfile = false
665 666
666 667 ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding`
667 668 # ssh.authorized_keys_ssh_opts =
668 669
669 670 ; Path to the authorized_keys file where the generate entries are placed.
670 671 ; It is possible to have multiple key files specified in `sshd_config` e.g.
671 672 ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode
672 673 ssh.authorized_keys_file_path = ~/.ssh/authorized_keys_rhodecode
673 674
674 675 ; Command to execute the SSH wrapper. The binary is available in the
675 676 ; RhodeCode installation directory.
676 677 ; e.g ~/.rccontrol/community-1/profile/bin/rc-ssh-wrapper
677 678 ssh.wrapper_cmd = ~/.rccontrol/community-1/rc-ssh-wrapper
678 679
679 680 ; Allow shell when executing the ssh-wrapper command
680 681 ssh.wrapper_cmd_allow_shell = false
681 682
682 683 ; Enables logging, and detailed output send back to the client during SSH
683 684 ; operations. Useful for debugging, shouldn't be used in production.
684 685 ssh.enable_debug_logging = false
685 686
686 687 ; Paths to binary executable, by default they are the names, but we can
687 688 ; override them if we want to use a custom one
688 689 ssh.executable.hg = ~/.rccontrol/vcsserver-1/profile/bin/hg
689 690 ssh.executable.git = ~/.rccontrol/vcsserver-1/profile/bin/git
690 691 ssh.executable.svn = ~/.rccontrol/vcsserver-1/profile/bin/svnserve
691 692
692 693 ; Enables SSH key generator web interface. Disabling this still allows users
693 694 ; to add their own keys.
694 695 ssh.enable_ui_key_generator = true
695 696
696 697
697 698 ; #################
698 699 ; APPENLIGHT CONFIG
699 700 ; #################
700 701
701 702 ; Appenlight is tailored to work with RhodeCode, see
702 703 ; http://appenlight.rhodecode.com for details how to obtain an account
703 704
704 705 ; Appenlight integration enabled
705 706 #appenlight = false
706 707
707 708 #appenlight.server_url = https://api.appenlight.com
708 709 #appenlight.api_key = YOUR_API_KEY
709 710 #appenlight.transport_config = https://api.appenlight.com?threaded=1&timeout=5
710 711
711 712 ; used for JS client
712 713 #appenlight.api_public_key = YOUR_API_PUBLIC_KEY
713 714
714 715 ; TWEAK AMOUNT OF INFO SENT HERE
715 716
716 717 ; enables 404 error logging (default False)
717 718 #appenlight.report_404 = false
718 719
719 720 ; time in seconds after request is considered being slow (default 1)
720 721 #appenlight.slow_request_time = 1
721 722
722 723 ; record slow requests in application
723 724 ; (needs to be enabled for slow datastore recording and time tracking)
724 725 #appenlight.slow_requests = true
725 726
726 727 ; enable hooking to application loggers
727 728 #appenlight.logging = true
728 729
729 730 ; minimum log level for log capture
730 731 #ppenlight.logging.level = WARNING
731 732
732 733 ; send logs only from erroneous/slow requests
733 734 ; (saves API quota for intensive logging)
734 735 #appenlight.logging_on_error = false
735 736
736 737 ; list of additional keywords that should be grabbed from environ object
737 738 ; can be string with comma separated list of words in lowercase
738 739 ; (by default client will always send following info:
739 740 ; 'REMOTE_USER', 'REMOTE_ADDR', 'SERVER_NAME', 'CONTENT_TYPE' + all keys that
740 741 ; start with HTTP* this list be extended with additional keywords here
741 742 #appenlight.environ_keys_whitelist =
742 743
743 744 ; list of keywords that should be blanked from request object
744 745 ; can be string with comma separated list of words in lowercase
745 746 ; (by default client will always blank keys that contain following words
746 747 ; 'password', 'passwd', 'pwd', 'auth_tkt', 'secret', 'csrf'
747 748 ; this list be extended with additional keywords set here
748 749 #appenlight.request_keys_blacklist =
749 750
750 751 ; list of namespaces that should be ignores when gathering log entries
751 752 ; can be string with comma separated list of namespaces
752 753 ; (by default the client ignores own entries: appenlight_client.client)
753 754 #appenlight.log_namespace_blacklist =
754 755
755 756 ; Statsd client config, this is used to send metrics to statsd
756 757 ; We recommend setting statsd_exported and scrape them using Promethues
757 758 #statsd.enabled = false
758 759 #statsd.statsd_host = 0.0.0.0
759 760 #statsd.statsd_port = 8125
760 761 #statsd.statsd_prefix =
761 762 #statsd.statsd_ipv6 = false
762 763
763 764 ; configure logging automatically at server startup set to false
764 765 ; to use the below custom logging config.
765 766 ; RC_LOGGING_FORMATTER
766 767 ; RC_LOGGING_LEVEL
767 768 ; env variables can control the settings for logging in case of autoconfigure
768 769
769 770 #logging.autoconfigure = true
770 771
771 772 ; specify your own custom logging config file to configure logging
772 773 #logging.logging_conf_file = /path/to/custom_logging.ini
773 774
774 775 ; Dummy marker to add new entries after.
775 776 ; Add any custom entries below. Please don't remove this marker.
776 777 custom.conf = 1
777 778
778 779
779 780 ; #####################
780 781 ; LOGGING CONFIGURATION
781 782 ; #####################
782 783
783 784 [loggers]
784 785 keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper
785 786
786 787 [handlers]
787 788 keys = console, console_sql
788 789
789 790 [formatters]
790 791 keys = generic, json, color_formatter, color_formatter_sql
791 792
792 793 ; #######
793 794 ; LOGGERS
794 795 ; #######
795 796 [logger_root]
796 797 level = NOTSET
797 798 handlers = console
798 799
799 800 [logger_sqlalchemy]
800 801 level = INFO
801 802 handlers = console_sql
802 803 qualname = sqlalchemy.engine
803 804 propagate = 0
804 805
805 806 [logger_beaker]
806 807 level = DEBUG
807 808 handlers =
808 809 qualname = beaker.container
809 810 propagate = 1
810 811
811 812 [logger_rhodecode]
812 813 level = DEBUG
813 814 handlers =
814 815 qualname = rhodecode
815 816 propagate = 1
816 817
817 818 [logger_ssh_wrapper]
818 819 level = DEBUG
819 820 handlers =
820 821 qualname = ssh_wrapper
821 822 propagate = 1
822 823
823 824 [logger_celery]
824 825 level = DEBUG
825 826 handlers =
826 827 qualname = celery
827 828
828 829
829 830 ; ########
830 831 ; HANDLERS
831 832 ; ########
832 833
833 834 [handler_console]
834 835 class = StreamHandler
835 836 args = (sys.stderr, )
836 837 level = INFO
837 838 ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json'
838 839 ; This allows sending properly formatted logs to grafana loki or elasticsearch
839 840 formatter = generic
840 841
841 842 [handler_console_sql]
842 843 ; "level = DEBUG" logs SQL queries and results.
843 844 ; "level = INFO" logs SQL queries.
844 845 ; "level = WARN" logs neither. (Recommended for production systems.)
845 846 class = StreamHandler
846 847 args = (sys.stderr, )
847 848 level = WARN
848 849 ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json'
849 850 ; This allows sending properly formatted logs to grafana loki or elasticsearch
850 851 formatter = generic
851 852
852 853 ; ##########
853 854 ; FORMATTERS
854 855 ; ##########
855 856
856 857 [formatter_generic]
857 858 class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter
858 859 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
859 860 datefmt = %Y-%m-%d %H:%M:%S
860 861
861 862 [formatter_color_formatter]
862 863 class = rhodecode.lib.logging_formatter.ColorFormatter
863 864 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
864 865 datefmt = %Y-%m-%d %H:%M:%S
865 866
866 867 [formatter_color_formatter_sql]
867 868 class = rhodecode.lib.logging_formatter.ColorFormatterSql
868 869 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
869 870 datefmt = %Y-%m-%d %H:%M:%S
870 871
871 872 [formatter_json]
872 873 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
873 874 class = rhodecode.lib._vendor.jsonlogger.JsonFormatter
@@ -1,345 +1,350 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2020 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 import os
22 22 import time
23 23 import logging
24 24 import tempfile
25 25 import traceback
26 26 import threading
27 27 import socket
28 28
29 29 from BaseHTTPServer import BaseHTTPRequestHandler
30 30 from SocketServer import TCPServer
31 31
32 32 import rhodecode
33 33 from rhodecode.lib.exceptions import HTTPLockedRC, HTTPBranchProtected
34 34 from rhodecode.model import meta
35 35 from rhodecode.lib.base import bootstrap_request, bootstrap_config
36 36 from rhodecode.lib import hooks_base
37 37 from rhodecode.lib.utils2 import AttributeDict
38 38 from rhodecode.lib.ext_json import json
39 39 from rhodecode.lib import rc_cache
40 40
41 41 log = logging.getLogger(__name__)
42 42
43 43
44 44 class HooksHttpHandler(BaseHTTPRequestHandler):
45 45
46 46 def do_POST(self):
47 47 method, extras = self._read_request()
48 48 txn_id = getattr(self.server, 'txn_id', None)
49 49 if txn_id:
50 50 log.debug('Computing TXN_ID based on `%s`:`%s`',
51 51 extras['repository'], extras['txn_id'])
52 52 computed_txn_id = rc_cache.utils.compute_key_from_params(
53 53 extras['repository'], extras['txn_id'])
54 54 if txn_id != computed_txn_id:
55 55 raise Exception(
56 56 'TXN ID fail: expected {} got {} instead'.format(
57 57 txn_id, computed_txn_id))
58 58
59 59 try:
60 60 result = self._call_hook(method, extras)
61 61 except Exception as e:
62 62 exc_tb = traceback.format_exc()
63 63 result = {
64 64 'exception': e.__class__.__name__,
65 65 'exception_traceback': exc_tb,
66 66 'exception_args': e.args
67 67 }
68 68 self._write_response(result)
69 69
70 70 def _read_request(self):
71 71 length = int(self.headers['Content-Length'])
72 72 body = self.rfile.read(length).decode('utf-8')
73 73 data = json.loads(body)
74 74 return data['method'], data['extras']
75 75
76 76 def _write_response(self, result):
77 77 self.send_response(200)
78 78 self.send_header("Content-type", "text/json")
79 79 self.end_headers()
80 80 self.wfile.write(json.dumps(result))
81 81
82 82 def _call_hook(self, method, extras):
83 83 hooks = Hooks()
84 84 try:
85 85 result = getattr(hooks, method)(extras)
86 86 finally:
87 87 meta.Session.remove()
88 88 return result
89 89
90 90 def log_message(self, format, *args):
91 91 """
92 92 This is an overridden method of BaseHTTPRequestHandler which logs using
93 93 logging library instead of writing directly to stderr.
94 94 """
95 95
96 96 message = format % args
97 97
98 98 log.debug(
99 99 "%s - - [%s] %s", self.client_address[0],
100 100 self.log_date_time_string(), message)
101 101
102 102
103 103 class DummyHooksCallbackDaemon(object):
104 104 hooks_uri = ''
105 105
106 106 def __init__(self):
107 107 self.hooks_module = Hooks.__module__
108 108
109 109 def __enter__(self):
110 110 log.debug('Running `%s` callback daemon', self.__class__.__name__)
111 111 return self
112 112
113 113 def __exit__(self, exc_type, exc_val, exc_tb):
114 114 log.debug('Exiting `%s` callback daemon', self.__class__.__name__)
115 115
116 116
117 117 class ThreadedHookCallbackDaemon(object):
118 118
119 119 _callback_thread = None
120 120 _daemon = None
121 121 _done = False
122 122
123 123 def __init__(self, txn_id=None, host=None, port=None):
124 124 self._prepare(txn_id=txn_id, host=host, port=port)
125 125
126 126 def __enter__(self):
127 127 log.debug('Running `%s` callback daemon', self.__class__.__name__)
128 128 self._run()
129 129 return self
130 130
131 131 def __exit__(self, exc_type, exc_val, exc_tb):
132 132 log.debug('Exiting `%s` callback daemon', self.__class__.__name__)
133 133 self._stop()
134 134
135 135 def _prepare(self, txn_id=None, host=None, port=None):
136 136 raise NotImplementedError()
137 137
138 138 def _run(self):
139 139 raise NotImplementedError()
140 140
141 141 def _stop(self):
142 142 raise NotImplementedError()
143 143
144 144
145 145 class HttpHooksCallbackDaemon(ThreadedHookCallbackDaemon):
146 146 """
147 147 Context manager which will run a callback daemon in a background thread.
148 148 """
149 149
150 150 hooks_uri = None
151 151
152 152 # From Python docs: Polling reduces our responsiveness to a shutdown
153 153 # request and wastes cpu at all other times.
154 154 POLL_INTERVAL = 0.01
155 155
156 def get_hostname(self):
157 return socket.gethostname() or '127.0.0.1'
158
156 159 def get_available_port(self):
157 160 mysocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
158 mysocket.bind(('127.0.0.1', 0))
161 mysocket.bind((self.get_hostname(), 0))
159 162 port = mysocket.getsockname()[1]
160 163 mysocket.close()
161 164 del mysocket
162 165 return port
163 166
164 167 def _prepare(self, txn_id=None, host=None, port=None):
168 if not host or host == "*":
169 host = self.get_hostname()
170 if not port:
171 port = self.get_available_port()
165 172
166 host = host or '127.0.0.1'
167 port = port or self.get_available_port()
168 173 server_address = (host, port)
169 174 self.hooks_uri = '{}:{}'.format(host, port)
170 175 self.txn_id = txn_id
171 176 self._done = False
172 177
173 178 log.debug(
174 179 "Preparing HTTP callback daemon at `%s` and registering hook object: %s",
175 180 self.hooks_uri, HooksHttpHandler)
176 181
177 182 self._daemon = TCPServer(server_address, HooksHttpHandler)
178 183 # inject transaction_id for later verification
179 184 self._daemon.txn_id = self.txn_id
180 185
181 186 def _run(self):
182 187 log.debug("Running event loop of callback daemon in background thread")
183 188 callback_thread = threading.Thread(
184 189 target=self._daemon.serve_forever,
185 190 kwargs={'poll_interval': self.POLL_INTERVAL})
186 191 callback_thread.daemon = True
187 192 callback_thread.start()
188 193 self._callback_thread = callback_thread
189 194
190 195 def _stop(self):
191 196 log.debug("Waiting for background thread to finish.")
192 197 self._daemon.shutdown()
193 198 self._callback_thread.join()
194 199 self._daemon = None
195 200 self._callback_thread = None
196 201 if self.txn_id:
197 202 txn_id_file = get_txn_id_data_path(self.txn_id)
198 203 log.debug('Cleaning up TXN ID %s', txn_id_file)
199 204 if os.path.isfile(txn_id_file):
200 205 os.remove(txn_id_file)
201 206
202 207 log.debug("Background thread done.")
203 208
204 209
205 210 def get_txn_id_data_path(txn_id):
206 211 import rhodecode
207 212
208 213 root = rhodecode.CONFIG.get('cache_dir') or tempfile.gettempdir()
209 214 final_dir = os.path.join(root, 'svn_txn_id')
210 215
211 216 if not os.path.isdir(final_dir):
212 217 os.makedirs(final_dir)
213 218 return os.path.join(final_dir, 'rc_txn_id_{}'.format(txn_id))
214 219
215 220
216 221 def store_txn_id_data(txn_id, data_dict):
217 222 if not txn_id:
218 223 log.warning('Cannot store txn_id because it is empty')
219 224 return
220 225
221 226 path = get_txn_id_data_path(txn_id)
222 227 try:
223 228 with open(path, 'wb') as f:
224 229 f.write(json.dumps(data_dict))
225 230 except Exception:
226 231 log.exception('Failed to write txn_id metadata')
227 232
228 233
229 234 def get_txn_id_from_store(txn_id):
230 235 """
231 236 Reads txn_id from store and if present returns the data for callback manager
232 237 """
233 238 path = get_txn_id_data_path(txn_id)
234 239 try:
235 240 with open(path, 'rb') as f:
236 241 return json.loads(f.read())
237 242 except Exception:
238 243 return {}
239 244
240 245
241 246 def prepare_callback_daemon(extras, protocol, host, use_direct_calls, txn_id=None):
242 247 txn_details = get_txn_id_from_store(txn_id)
243 248 port = txn_details.get('port', 0)
244 249 if use_direct_calls:
245 250 callback_daemon = DummyHooksCallbackDaemon()
246 251 extras['hooks_module'] = callback_daemon.hooks_module
247 252 else:
248 253 if protocol == 'http':
249 254 callback_daemon = HttpHooksCallbackDaemon(
250 255 txn_id=txn_id, host=host, port=port)
251 256 else:
252 257 log.error('Unsupported callback daemon protocol "%s"', protocol)
253 258 raise Exception('Unsupported callback daemon protocol.')
254 259
255 260 extras['hooks_uri'] = callback_daemon.hooks_uri
256 261 extras['hooks_protocol'] = protocol
257 262 extras['time'] = time.time()
258 263
259 264 # register txn_id
260 265 extras['txn_id'] = txn_id
261 266 log.debug('Prepared a callback daemon: %s at url `%s`',
262 267 callback_daemon.__class__.__name__, callback_daemon.hooks_uri)
263 268 return callback_daemon, extras
264 269
265 270
266 271 class Hooks(object):
267 272 """
268 273 Exposes the hooks for remote call backs
269 274 """
270 275
271 276 def repo_size(self, extras):
272 277 log.debug("Called repo_size of %s object", self)
273 278 return self._call_hook(hooks_base.repo_size, extras)
274 279
275 280 def pre_pull(self, extras):
276 281 log.debug("Called pre_pull of %s object", self)
277 282 return self._call_hook(hooks_base.pre_pull, extras)
278 283
279 284 def post_pull(self, extras):
280 285 log.debug("Called post_pull of %s object", self)
281 286 return self._call_hook(hooks_base.post_pull, extras)
282 287
283 288 def pre_push(self, extras):
284 289 log.debug("Called pre_push of %s object", self)
285 290 return self._call_hook(hooks_base.pre_push, extras)
286 291
287 292 def post_push(self, extras):
288 293 log.debug("Called post_push of %s object", self)
289 294 return self._call_hook(hooks_base.post_push, extras)
290 295
291 296 def _call_hook(self, hook, extras):
292 297 extras = AttributeDict(extras)
293 298 server_url = extras['server_url']
294 299 request = bootstrap_request(application_url=server_url)
295 300
296 301 bootstrap_config(request) # inject routes and other interfaces
297 302
298 303 # inject the user for usage in hooks
299 304 request.user = AttributeDict({'username': extras.username,
300 305 'ip_addr': extras.ip,
301 306 'user_id': extras.user_id})
302 307
303 308 extras.request = request
304 309
305 310 try:
306 311 result = hook(extras)
307 312 if result is None:
308 313 raise Exception(
309 314 'Failed to obtain hook result from func: {}'.format(hook))
310 315 except HTTPBranchProtected as handled_error:
311 316 # Those special cases doesn't need error reporting. It's a case of
312 317 # locked repo or protected branch
313 318 result = AttributeDict({
314 319 'status': handled_error.code,
315 320 'output': handled_error.explanation
316 321 })
317 322 except (HTTPLockedRC, Exception) as error:
318 323 # locked needs different handling since we need to also
319 324 # handle PULL operations
320 325 exc_tb = ''
321 326 if not isinstance(error, HTTPLockedRC):
322 327 exc_tb = traceback.format_exc()
323 328 log.exception('Exception when handling hook %s', hook)
324 329 error_args = error.args
325 330 return {
326 331 'status': 128,
327 332 'output': '',
328 333 'exception': type(error).__name__,
329 334 'exception_traceback': exc_tb,
330 335 'exception_args': error_args,
331 336 }
332 337 finally:
333 338 meta.Session.remove()
334 339
335 340 log.debug('Got hook call response %s', result)
336 341 return {
337 342 'status': result.status,
338 343 'output': result.output,
339 344 }
340 345
341 346 def __enter__(self):
342 347 return self
343 348
344 349 def __exit__(self, exc_type, exc_val, exc_tb):
345 350 pass
@@ -1,332 +1,342 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2020 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 import json
22 22 import logging
23 23 from StringIO import StringIO
24 24
25 25 import mock
26 26 import pytest
27 27
28 28 from rhodecode.lib import hooks_daemon
29 29 from rhodecode.tests.utils import assert_message_in_log
30 30
31 31
32 32 class TestDummyHooksCallbackDaemon(object):
33 33 def test_hooks_module_path_set_properly(self):
34 34 daemon = hooks_daemon.DummyHooksCallbackDaemon()
35 35 assert daemon.hooks_module == 'rhodecode.lib.hooks_daemon'
36 36
37 37 def test_logs_entering_the_hook(self):
38 38 daemon = hooks_daemon.DummyHooksCallbackDaemon()
39 39 with mock.patch.object(hooks_daemon.log, 'debug') as log_mock:
40 40 with daemon as return_value:
41 41 log_mock.assert_called_once_with(
42 42 'Running `%s` callback daemon', 'DummyHooksCallbackDaemon')
43 43 assert return_value == daemon
44 44
45 45 def test_logs_exiting_the_hook(self):
46 46 daemon = hooks_daemon.DummyHooksCallbackDaemon()
47 47 with mock.patch.object(hooks_daemon.log, 'debug') as log_mock:
48 48 with daemon:
49 49 pass
50 50 log_mock.assert_called_with(
51 51 'Exiting `%s` callback daemon', 'DummyHooksCallbackDaemon')
52 52
53 53
54 54 class TestHooks(object):
55 55 def test_hooks_can_be_used_as_a_context_processor(self):
56 56 hooks = hooks_daemon.Hooks()
57 57 with hooks as return_value:
58 58 pass
59 59 assert hooks == return_value
60 60
61 61
62 62 class TestHooksHttpHandler(object):
63 63 def test_read_request_parses_method_name_and_arguments(self):
64 64 data = {
65 65 'method': 'test',
66 66 'extras': {
67 67 'param1': 1,
68 68 'param2': 'a'
69 69 }
70 70 }
71 71 request = self._generate_post_request(data)
72 72 hooks_patcher = mock.patch.object(
73 73 hooks_daemon.Hooks, data['method'], create=True, return_value=1)
74 74
75 75 with hooks_patcher as hooks_mock:
76 76 MockServer(hooks_daemon.HooksHttpHandler, request)
77 77
78 78 hooks_mock.assert_called_once_with(data['extras'])
79 79
80 80 def test_hooks_serialized_result_is_returned(self):
81 81 request = self._generate_post_request({})
82 82 rpc_method = 'test'
83 83 hook_result = {
84 84 'first': 'one',
85 85 'second': 2
86 86 }
87 87 read_patcher = mock.patch.object(
88 88 hooks_daemon.HooksHttpHandler, '_read_request',
89 89 return_value=(rpc_method, {}))
90 90 hooks_patcher = mock.patch.object(
91 91 hooks_daemon.Hooks, rpc_method, create=True,
92 92 return_value=hook_result)
93 93
94 94 with read_patcher, hooks_patcher:
95 95 server = MockServer(hooks_daemon.HooksHttpHandler, request)
96 96
97 97 expected_result = json.dumps(hook_result)
98 98 assert server.request.output_stream.buflist[-1] == expected_result
99 99
100 100 def test_exception_is_returned_in_response(self):
101 101 request = self._generate_post_request({})
102 102 rpc_method = 'test'
103 103 read_patcher = mock.patch.object(
104 104 hooks_daemon.HooksHttpHandler, '_read_request',
105 105 return_value=(rpc_method, {}))
106 106 hooks_patcher = mock.patch.object(
107 107 hooks_daemon.Hooks, rpc_method, create=True,
108 108 side_effect=Exception('Test exception'))
109 109
110 110 with read_patcher, hooks_patcher:
111 111 server = MockServer(hooks_daemon.HooksHttpHandler, request)
112 112
113 113 org_exc = json.loads(server.request.output_stream.buflist[-1])
114 114 expected_result = {
115 115 'exception': 'Exception',
116 116 'exception_traceback': org_exc['exception_traceback'],
117 117 'exception_args': ['Test exception']
118 118 }
119 119 assert org_exc == expected_result
120 120
121 121 def test_log_message_writes_to_debug_log(self, caplog):
122 122 ip_port = ('0.0.0.0', 8888)
123 123 handler = hooks_daemon.HooksHttpHandler(
124 124 MockRequest('POST /'), ip_port, mock.Mock())
125 125 fake_date = '1/Nov/2015 00:00:00'
126 126 date_patcher = mock.patch.object(
127 127 handler, 'log_date_time_string', return_value=fake_date)
128 128 with date_patcher, caplog.at_level(logging.DEBUG):
129 129 handler.log_message('Some message %d, %s', 123, 'string')
130 130
131 131 expected_message = '{} - - [{}] Some message 123, string'.format(
132 132 ip_port[0], fake_date)
133 133 assert_message_in_log(
134 134 caplog.records, expected_message,
135 135 levelno=logging.DEBUG, module='hooks_daemon')
136 136
137 137 def _generate_post_request(self, data):
138 138 payload = json.dumps(data)
139 139 return 'POST / HTTP/1.0\nContent-Length: {}\n\n{}'.format(
140 140 len(payload), payload)
141 141
142 142
143 143 class ThreadedHookCallbackDaemon(object):
144 144 def test_constructor_calls_prepare(self):
145 145 prepare_daemon_patcher = mock.patch.object(
146 146 hooks_daemon.ThreadedHookCallbackDaemon, '_prepare')
147 147 with prepare_daemon_patcher as prepare_daemon_mock:
148 148 hooks_daemon.ThreadedHookCallbackDaemon()
149 149 prepare_daemon_mock.assert_called_once_with()
150 150
151 151 def test_run_is_called_on_context_start(self):
152 152 patchers = mock.patch.multiple(
153 153 hooks_daemon.ThreadedHookCallbackDaemon,
154 154 _run=mock.DEFAULT, _prepare=mock.DEFAULT, __exit__=mock.DEFAULT)
155 155
156 156 with patchers as mocks:
157 157 daemon = hooks_daemon.ThreadedHookCallbackDaemon()
158 158 with daemon as daemon_context:
159 159 pass
160 160 mocks['_run'].assert_called_once_with()
161 161 assert daemon_context == daemon
162 162
163 163 def test_stop_is_called_on_context_exit(self):
164 164 patchers = mock.patch.multiple(
165 165 hooks_daemon.ThreadedHookCallbackDaemon,
166 166 _run=mock.DEFAULT, _prepare=mock.DEFAULT, _stop=mock.DEFAULT)
167 167
168 168 with patchers as mocks:
169 169 daemon = hooks_daemon.ThreadedHookCallbackDaemon()
170 170 with daemon as daemon_context:
171 171 assert mocks['_stop'].call_count == 0
172 172
173 173 mocks['_stop'].assert_called_once_with()
174 174 assert daemon_context == daemon
175 175
176 176
177 177 class TestHttpHooksCallbackDaemon(object):
178 def test_hooks_callback_generates_new_port(self, caplog):
179 with caplog.at_level(logging.DEBUG):
180 daemon = hooks_daemon.HttpHooksCallbackDaemon(host='127.0.0.1', port=8881)
181 assert daemon._daemon.server_address == ('127.0.0.1', 8881)
182
183 with caplog.at_level(logging.DEBUG):
184 daemon = hooks_daemon.HttpHooksCallbackDaemon(host=None, port=None)
185 assert daemon._daemon.server_address[1] in range(0, 66000)
186 assert daemon._daemon.server_address[0] != '127.0.0.1'
187
178 188 def test_prepare_inits_daemon_variable(self, tcp_server, caplog):
179 189 with self._tcp_patcher(tcp_server), caplog.at_level(logging.DEBUG):
180 daemon = hooks_daemon.HttpHooksCallbackDaemon()
190 daemon = hooks_daemon.HttpHooksCallbackDaemon(host='127.0.0.1', port=8881)
181 191 assert daemon._daemon == tcp_server
182 192
183 193 _, port = tcp_server.server_address
184 194 expected_uri = '{}:{}'.format('127.0.0.1', port)
185 195 msg = 'Preparing HTTP callback daemon at `{}` and ' \
186 'registering hook object'.format(expected_uri)
196 'registering hook object: rhodecode.lib.hooks_daemon.HooksHttpHandler'.format(expected_uri)
187 197 assert_message_in_log(
188 198 caplog.records, msg, levelno=logging.DEBUG, module='hooks_daemon')
189 199
190 200 def test_prepare_inits_hooks_uri_and_logs_it(
191 201 self, tcp_server, caplog):
192 202 with self._tcp_patcher(tcp_server), caplog.at_level(logging.DEBUG):
193 daemon = hooks_daemon.HttpHooksCallbackDaemon()
203 daemon = hooks_daemon.HttpHooksCallbackDaemon(host='127.0.0.1', port=8881)
194 204
195 205 _, port = tcp_server.server_address
196 206 expected_uri = '{}:{}'.format('127.0.0.1', port)
197 207 assert daemon.hooks_uri == expected_uri
198 208
199 209 msg = 'Preparing HTTP callback daemon at `{}` and ' \
200 'registering hook object'.format(expected_uri)
210 'registering hook object: rhodecode.lib.hooks_daemon.HooksHttpHandler'.format(expected_uri)
201 211 assert_message_in_log(
202 212 caplog.records, msg,
203 213 levelno=logging.DEBUG, module='hooks_daemon')
204 214
205 215 def test_run_creates_a_thread(self, tcp_server):
206 216 thread = mock.Mock()
207 217
208 218 with self._tcp_patcher(tcp_server):
209 219 daemon = hooks_daemon.HttpHooksCallbackDaemon()
210 220
211 221 with self._thread_patcher(thread) as thread_mock:
212 222 daemon._run()
213 223
214 224 thread_mock.assert_called_once_with(
215 225 target=tcp_server.serve_forever,
216 226 kwargs={'poll_interval': daemon.POLL_INTERVAL})
217 227 assert thread.daemon is True
218 228 thread.start.assert_called_once_with()
219 229
220 230 def test_run_logs(self, tcp_server, caplog):
221 231
222 232 with self._tcp_patcher(tcp_server):
223 233 daemon = hooks_daemon.HttpHooksCallbackDaemon()
224 234
225 235 with self._thread_patcher(mock.Mock()), caplog.at_level(logging.DEBUG):
226 236 daemon._run()
227 237
228 238 assert_message_in_log(
229 239 caplog.records,
230 240 'Running event loop of callback daemon in background thread',
231 241 levelno=logging.DEBUG, module='hooks_daemon')
232 242
233 243 def test_stop_cleans_up_the_connection(self, tcp_server, caplog):
234 244 thread = mock.Mock()
235 245
236 246 with self._tcp_patcher(tcp_server):
237 247 daemon = hooks_daemon.HttpHooksCallbackDaemon()
238 248
239 249 with self._thread_patcher(thread), caplog.at_level(logging.DEBUG):
240 250 with daemon:
241 251 assert daemon._daemon == tcp_server
242 252 assert daemon._callback_thread == thread
243 253
244 254 assert daemon._daemon is None
245 255 assert daemon._callback_thread is None
246 256 tcp_server.shutdown.assert_called_with()
247 257 thread.join.assert_called_once_with()
248 258
249 259 assert_message_in_log(
250 260 caplog.records, 'Waiting for background thread to finish.',
251 261 levelno=logging.DEBUG, module='hooks_daemon')
252 262
253 263 def _tcp_patcher(self, tcp_server):
254 264 return mock.patch.object(
255 265 hooks_daemon, 'TCPServer', return_value=tcp_server)
256 266
257 267 def _thread_patcher(self, thread):
258 268 return mock.patch.object(
259 269 hooks_daemon.threading, 'Thread', return_value=thread)
260 270
261 271
262 272 class TestPrepareHooksDaemon(object):
263 273 @pytest.mark.parametrize('protocol', ('http',))
264 274 def test_returns_dummy_hooks_callback_daemon_when_using_direct_calls(
265 275 self, protocol):
266 276 expected_extras = {'extra1': 'value1'}
267 277 callback, extras = hooks_daemon.prepare_callback_daemon(
268 278 expected_extras.copy(), protocol=protocol,
269 279 host='127.0.0.1', use_direct_calls=True)
270 280 assert isinstance(callback, hooks_daemon.DummyHooksCallbackDaemon)
271 281 expected_extras['hooks_module'] = 'rhodecode.lib.hooks_daemon'
272 282 expected_extras['time'] = extras['time']
273 283 assert 'extra1' in extras
274 284
275 285 @pytest.mark.parametrize('protocol, expected_class', (
276 286 ('http', hooks_daemon.HttpHooksCallbackDaemon),
277 287 ))
278 288 def test_returns_real_hooks_callback_daemon_when_protocol_is_specified(
279 289 self, protocol, expected_class):
280 290 expected_extras = {
281 291 'extra1': 'value1',
282 292 'txn_id': 'txnid2',
283 293 'hooks_protocol': protocol.lower()
284 294 }
285 295 callback, extras = hooks_daemon.prepare_callback_daemon(
286 296 expected_extras.copy(), protocol=protocol, host='127.0.0.1',
287 297 use_direct_calls=False,
288 298 txn_id='txnid2')
289 299 assert isinstance(callback, expected_class)
290 300 extras.pop('hooks_uri')
291 301 expected_extras['time'] = extras['time']
292 302 assert extras == expected_extras
293 303
294 304 @pytest.mark.parametrize('protocol', (
295 305 'invalid',
296 306 'Http',
297 307 'HTTP',
298 308 ))
299 309 def test_raises_on_invalid_protocol(self, protocol):
300 310 expected_extras = {
301 311 'extra1': 'value1',
302 312 'hooks_protocol': protocol.lower()
303 313 }
304 314 with pytest.raises(Exception):
305 315 callback, extras = hooks_daemon.prepare_callback_daemon(
306 316 expected_extras.copy(),
307 317 protocol=protocol, host='127.0.0.1',
308 318 use_direct_calls=False)
309 319
310 320
311 321 class MockRequest(object):
312 322 def __init__(self, request):
313 323 self.request = request
314 324 self.input_stream = StringIO(b'{}'.format(self.request))
315 325 self.output_stream = StringIO()
316 326
317 327 def makefile(self, mode, *args, **kwargs):
318 328 return self.output_stream if mode == 'wb' else self.input_stream
319 329
320 330
321 331 class MockServer(object):
322 332 def __init__(self, Handler, request):
323 333 ip_port = ('0.0.0.0', 8888)
324 334 self.request = MockRequest(request)
325 335 self.handler = Handler(self.request, ip_port, self)
326 336
327 337
328 338 @pytest.fixture()
329 339 def tcp_server():
330 340 server = mock.Mock()
331 341 server.server_address = ('127.0.0.1', 8881)
332 342 return server
@@ -1,717 +1,717 b''
1 1
2 2 ; #########################################
3 3 ; RHODECODE COMMUNITY EDITION CONFIGURATION
4 4 ; #########################################
5 5
6 6 [DEFAULT]
7 7 ; Debug flag sets all loggers to debug, and enables request tracking
8 8 debug = true
9 9
10 10 ; ########################################################################
11 11 ; EMAIL CONFIGURATION
12 12 ; These settings will be used by the RhodeCode mailing system
13 13 ; ########################################################################
14 14
15 15 ; prefix all emails subjects with given prefix, helps filtering out emails
16 16 #email_prefix = [RhodeCode]
17 17
18 18 ; email FROM address all mails will be sent
19 19 #app_email_from = rhodecode-noreply@localhost
20 20
21 21 #smtp_server = mail.server.com
22 22 #smtp_username =
23 23 #smtp_password =
24 24 #smtp_port =
25 25 #smtp_use_tls = false
26 26 #smtp_use_ssl = true
27 27
28 28 [server:main]
29 29 ; COMMON HOST/IP CONFIG
30 30 host = 0.0.0.0
31 31 port = 5000
32 32
33 33
34 34 ; ###########################
35 35 ; GUNICORN APPLICATION SERVER
36 36 ; ###########################
37 37
38 38 ; run with gunicorn --log-config rhodecode.ini --paste rhodecode.ini
39 39
40 40 ; Module to use, this setting shouldn't be changed
41 41 use = egg:gunicorn#main
42 42
43 43 ; Sets the number of process workers. More workers means more concurrent connections
44 44 ; RhodeCode can handle at the same time. Each additional worker also it increases
45 45 ; memory usage as each has it's own set of caches.
46 46 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
47 47 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
48 48 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
49 49 ; when using more than 1 worker.
50 50 #workers = 2
51 51
52 52 ; Gunicorn access log level
53 53 #loglevel = info
54 54
55 55 ; Process name visible in process list
56 56 #proc_name = rhodecode
57 57
58 58 ; Type of worker class, one of `sync`, `gevent`
59 59 ; Recommended type is `gevent`
60 60 #worker_class = gevent
61 61
62 62 ; The maximum number of simultaneous clients per worker. Valid only for gevent
63 63 #worker_connections = 10
64 64
65 65 ; Max number of requests that worker will handle before being gracefully restarted.
66 66 ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
67 67 #max_requests = 1000
68 68 #max_requests_jitter = 30
69 69
70 70 ; Amount of time a worker can spend with handling a request before it
71 71 ; gets killed and restarted. By default set to 21600 (6hrs)
72 72 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
73 73 #timeout = 21600
74 74
75 75 ; The maximum size of HTTP request line in bytes.
76 76 ; 0 for unlimited
77 77 #limit_request_line = 0
78 78
79 79
80 80 ; Prefix middleware for RhodeCode.
81 81 ; recommended when using proxy setup.
82 82 ; allows to set RhodeCode under a prefix in server.
83 83 ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well.
84 84 ; And set your prefix like: `prefix = /custom_prefix`
85 85 ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need
86 86 ; to make your cookies only work on prefix url
87 87 [filter:proxy-prefix]
88 88 use = egg:PasteDeploy#prefix
89 89 prefix = /
90 90
91 91 [app:main]
92 92 ; The %(here)s variable will be replaced with the absolute path of parent directory
93 93 ; of this file
94 94 ; Each option in the app:main can be override by an environmental variable
95 95 ;
96 96 ;To override an option:
97 97 ;
98 98 ;RC_<KeyName>
99 99 ;Everything should be uppercase, . and - should be replaced by _.
100 100 ;For example, if you have these configuration settings:
101 101 ;rc_cache.repo_object.backend = foo
102 102 ;can be overridden by
103 103 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
104 104
105 105 is_test = True
106 106 use = egg:rhodecode-enterprise-ce
107 107
108 108 ; enable proxy prefix middleware, defined above
109 109 #filter-with = proxy-prefix
110 110
111 111
112 112 ## RHODECODE PLUGINS ##
113 113 rhodecode.includes = rhodecode.api
114 114
115 115 # api prefix url
116 116 rhodecode.api.url = /_admin/api
117 117
118 118
119 119 ## END RHODECODE PLUGINS ##
120 120
121 121 ## encryption key used to encrypt social plugin tokens,
122 122 ## remote_urls with credentials etc, if not set it defaults to
123 123 ## `beaker.session.secret`
124 124 #rhodecode.encrypted_values.secret =
125 125
126 126 ; decryption strict mode (enabled by default). It controls if decryption raises
127 127 ; `SignatureVerificationError` in case of wrong key, or damaged encryption data.
128 128 #rhodecode.encrypted_values.strict = false
129 129
130 130 ; Pick algorithm for encryption. Either fernet (more secure) or aes (default)
131 131 ; fernet is safer, and we strongly recommend switching to it.
132 132 ; Due to backward compatibility aes is used as default.
133 133 #rhodecode.encrypted_values.algorithm = fernet
134 134
135 135 ; Return gzipped responses from RhodeCode (static files/application)
136 136 gzip_responses = false
137 137
138 138 ; Auto-generate javascript routes file on startup
139 139 generate_js_files = false
140 140
141 141 ; System global default language.
142 142 ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh
143 143 lang = en
144 144
145 145 ; Perform a full repository scan and import on each server start.
146 146 ; Settings this to true could lead to very long startup time.
147 147 startup.import_repos = true
148 148
149 149 ; Uncomment and set this path to use archive download cache.
150 150 ; Once enabled, generated archives will be cached at this location
151 151 ; and served from the cache during subsequent requests for the same archive of
152 152 ; the repository.
153 153 #archive_cache_dir = /tmp/tarballcache
154 154
155 155 ; URL at which the application is running. This is used for Bootstrapping
156 156 ; requests in context when no web request is available. Used in ishell, or
157 157 ; SSH calls. Set this for events to receive proper url for SSH calls.
158 158 app.base_url = http://rhodecode.local
159 159
160 160 ; Unique application ID. Should be a random unique string for security.
161 161 app_instance_uuid = rc-production
162 162
163 163 ## cut off limit for large diffs (size in bytes)
164 164 cut_off_limit_diff = 1024000
165 165 cut_off_limit_file = 256000
166 166
167 167 ; Use cached version of vcs repositories everywhere. Recommended to be `true`
168 168 vcs_full_cache = false
169 169
170 170 ; Force https in RhodeCode, fixes https redirects, assumes it's always https.
171 171 ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache
172 172 force_https = false
173 173
174 174 ; use Strict-Transport-Security headers
175 175 use_htsts = false
176 176
177 177 ; Set to true if your repos are exposed using the dumb protocol
178 178 git_update_server_info = false
179 179
180 180 ; RSS/ATOM feed options
181 181 rss_cut_off_limit = 256000
182 182 rss_items_per_page = 10
183 183 rss_include_diff = false
184 184
185 185 ; gist URL alias, used to create nicer urls for gist. This should be an
186 186 ; url that does rewrites to _admin/gists/{gistid}.
187 187 ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal
188 188 ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid}
189 189 gist_alias_url =
190 190
191 191 ; List of views (using glob pattern syntax) that AUTH TOKENS could be
192 192 ; used for access.
193 193 ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it
194 194 ; came from the the logged in user who own this authentication token.
195 195 ; Additionally @TOKEN syntax can be used to bound the view to specific
196 196 ; authentication token. Such view would be only accessible when used together
197 197 ; with this authentication token
198 198 ; list of all views can be found under `/_admin/permissions/auth_token_access`
199 199 ; The list should be "," separated and on a single line.
200 200 ; Most common views to enable:
201 201
202 202 # RepoCommitsView:repo_commit_download
203 203 # RepoCommitsView:repo_commit_patch
204 204 # RepoCommitsView:repo_commit_raw
205 205 # RepoCommitsView:repo_commit_raw@TOKEN
206 206 # RepoFilesView:repo_files_diff
207 207 # RepoFilesView:repo_archivefile
208 208 # RepoFilesView:repo_file_raw
209 209 # GistView:*
210 210 api_access_controllers_whitelist =
211 211
212 212 ; Default encoding used to convert from and to unicode
213 213 ; can be also a comma separated list of encoding in case of mixed encodings
214 214 default_encoding = UTF-8
215 215
216 216 ; instance-id prefix
217 217 ; a prefix key for this instance used for cache invalidation when running
218 218 ; multiple instances of RhodeCode, make sure it's globally unique for
219 219 ; all running RhodeCode instances. Leave empty if you don't use it
220 220 instance_id =
221 221
222 222 ; Fallback authentication plugin. Set this to a plugin ID to force the usage
223 223 ; of an authentication plugin also if it is disabled by it's settings.
224 224 ; This could be useful if you are unable to log in to the system due to broken
225 225 ; authentication settings. Then you can enable e.g. the internal RhodeCode auth
226 226 ; module to log in again and fix the settings.
227 227 ; Available builtin plugin IDs (hash is part of the ID):
228 228 ; egg:rhodecode-enterprise-ce#rhodecode
229 229 ; egg:rhodecode-enterprise-ce#pam
230 230 ; egg:rhodecode-enterprise-ce#ldap
231 231 ; egg:rhodecode-enterprise-ce#jasig_cas
232 232 ; egg:rhodecode-enterprise-ce#headers
233 233 ; egg:rhodecode-enterprise-ce#crowd
234 234
235 235 #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode
236 236
237 237 ; Flag to control loading of legacy plugins in py:/path format
238 238 auth_plugin.import_legacy_plugins = true
239 239
240 240 ; alternative return HTTP header for failed authentication. Default HTTP
241 241 ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with
242 242 ; handling that causing a series of failed authentication calls.
243 243 ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code
244 244 ; This will be served instead of default 401 on bad authentication
245 245 auth_ret_code =
246 246
247 247 ; use special detection method when serving auth_ret_code, instead of serving
248 248 ; ret_code directly, use 401 initially (Which triggers credentials prompt)
249 249 ; and then serve auth_ret_code to clients
250 250 auth_ret_code_detection = false
251 251
252 252 ; locking return code. When repository is locked return this HTTP code. 2XX
253 253 ; codes don't break the transactions while 4XX codes do
254 254 lock_ret_code = 423
255 255
256 256 ; allows to change the repository location in settings page
257 257 allow_repo_location_change = true
258 258
259 259 ; allows to setup custom hooks in settings page
260 260 allow_custom_hooks_settings = true
261 261
262 262 ## generated license token, goto license page in RhodeCode settings to obtain
263 263 ## new token
264 264 license_token = abra-cada-bra1-rce3
265 265
266 266 ## supervisor connection uri, for managing supervisor and logs.
267 267 supervisor.uri =
268 268 ## supervisord group name/id we only want this RC instance to handle
269 269 supervisor.group_id = dev
270 270
271 271 ## Display extended labs settings
272 272 labs_settings_active = true
273 273
274 274 ; Custom exception store path, defaults to TMPDIR
275 275 ; This is used to store exception from RhodeCode in shared directory
276 276 #exception_tracker.store_path =
277 277
278 278 ; Send email with exception details when it happens
279 279 #exception_tracker.send_email = false
280 280
281 281 ; Comma separated list of recipients for exception emails,
282 282 ; e.g admin@rhodecode.com,devops@rhodecode.com
283 283 ; Can be left empty, then emails will be sent to ALL super-admins
284 284 #exception_tracker.send_email_recipients =
285 285
286 286 ; optional prefix to Add to email Subject
287 287 #exception_tracker.email_prefix = [RHODECODE ERROR]
288 288
289 289 ; File store configuration. This is used to store and serve uploaded files
290 290 file_store.enabled = true
291 291
292 292 ; Storage backend, available options are: local
293 293 file_store.backend = local
294 294
295 295 ; path to store the uploaded binaries
296 296 file_store.storage_path = %(here)s/data/file_store
297 297
298 298
299 299 ; #############
300 300 ; CELERY CONFIG
301 301 ; #############
302 302
303 303 ; manually run celery: /path/to/celery worker -E --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini
304 304
305 305 use_celery = false
306 306
307 307 ; path to store schedule database
308 308 #celerybeat-schedule.path =
309 309
310 310 ; connection url to the message broker (default redis)
311 311 celery.broker_url = redis://localhost:6379/8
312 312
313 313 ; rabbitmq example
314 314 #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost
315 315
316 316 ; maximum tasks to execute before worker restart
317 317 celery.max_tasks_per_child = 100
318 318
319 319 ; tasks will never be sent to the queue, but executed locally instead.
320 320 celery.task_always_eager = false
321 321
322 322 ; #############
323 323 ; DOGPILE CACHE
324 324 ; #############
325 325
326 326 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
327 327 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
328 328 cache_dir = %(here)s/data
329 329
330 330 ## locking and default file storage for Beaker. Putting this into a ramdisk
331 331 ## can boost performance, eg. %(here)s/data_ramdisk/cache/beaker_data
332 332 beaker.cache.data_dir = %(here)s/rc/data/cache/beaker_data
333 333 beaker.cache.lock_dir = %(here)s/rc/data/cache/beaker_lock
334 334
335 335 beaker.cache.regions = long_term
336 336
337 337 beaker.cache.long_term.type = memory
338 338 beaker.cache.long_term.expire = 36000
339 339 beaker.cache.long_term.key_length = 256
340 340
341 341
342 342 #####################################
343 343 ### DOGPILE CACHE ####
344 344 #####################################
345 345
346 346 ## permission tree cache settings
347 347 rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace
348 348 rc_cache.cache_perms.expiration_time = 0
349 349 rc_cache.cache_perms.arguments.filename = /tmp/rc_cache_1
350 350
351 351
352 352 ## cache settings for SQL queries
353 353 rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru
354 354 rc_cache.sql_cache_short.expiration_time = 0
355 355
356 356
357 357 ; ##############
358 358 ; BEAKER SESSION
359 359 ; ##############
360 360
361 361 ; beaker.session.type is type of storage options for the logged users sessions. Current allowed
362 362 ; types are file, ext:redis, ext:database, ext:memcached, and memory (default if not specified).
363 363 ; Fastest ones are Redis and ext:database
364 364 beaker.session.type = file
365 365 beaker.session.data_dir = %(here)s/rc/data/sessions/data
366 366
367 367 ; Redis based sessions
368 368 #beaker.session.type = ext:redis
369 369 #beaker.session.url = redis://127.0.0.1:6379/2
370 370
371 371 ; DB based session, fast, and allows easy management over logged in users
372 372 #beaker.session.type = ext:database
373 373 #beaker.session.table_name = db_session
374 374 #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode
375 375 #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode
376 376 #beaker.session.sa.pool_recycle = 3600
377 377 #beaker.session.sa.echo = false
378 378
379 379 beaker.session.key = rhodecode
380 380 beaker.session.secret = test-rc-uytcxaz
381 381 beaker.session.lock_dir = %(here)s/rc/data/sessions/lock
382 382
383 383 ; Secure encrypted cookie. Requires AES and AES python libraries
384 384 ; you must disable beaker.session.secret to use this
385 385 #beaker.session.encrypt_key = key_for_encryption
386 386 #beaker.session.validate_key = validation_key
387 387
388 388 ; Sets session as invalid (also logging out user) if it haven not been
389 389 ; accessed for given amount of time in seconds
390 390 beaker.session.timeout = 2592000
391 391 beaker.session.httponly = true
392 392
393 393 ; Path to use for the cookie. Set to prefix if you use prefix middleware
394 394 #beaker.session.cookie_path = /custom_prefix
395 395
396 396 ; Set https secure cookie
397 397 beaker.session.secure = false
398 398
399 399 ## auto save the session to not to use .save()
400 400 beaker.session.auto = false
401 401
402 402 ; default cookie expiration time in seconds, set to `true` to set expire
403 403 ; at browser close
404 404 #beaker.session.cookie_expires = 3600
405 405
406 406 ; #############################
407 407 ; SEARCH INDEXING CONFIGURATION
408 408 ; #############################
409 409
410 410 ; Full text search indexer is available in rhodecode-tools under
411 411 ; `rhodecode-tools index` command
412 412
413 413 ; WHOOSH Backend, doesn't require additional services to run
414 414 ; it works good with few dozen repos
415 415 search.module = rhodecode.lib.index.whoosh
416 416 search.location = %(here)s/data/index
417 417
418 418 ; ####################
419 419 ; CHANNELSTREAM CONFIG
420 420 ; ####################
421 421
422 422 ; channelstream enables persistent connections and live notification
423 423 ; in the system. It's also used by the chat system
424 424
425 425 channelstream.enabled = false
426 426
427 427 ; server address for channelstream server on the backend
428 428 channelstream.server = 127.0.0.1:9800
429 429
430 430 ; location of the channelstream server from outside world
431 431 ; use ws:// for http or wss:// for https. This address needs to be handled
432 432 ; by external HTTP server such as Nginx or Apache
433 433 ; see Nginx/Apache configuration examples in our docs
434 434 channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream
435 435 channelstream.secret = secret
436 436 channelstream.history.location = %(here)s/channelstream_history
437 437
438 438 ; Internal application path that Javascript uses to connect into.
439 439 ; If you use proxy-prefix the prefix should be added before /_channelstream
440 440 channelstream.proxy_path = /_channelstream
441 441
442 442
443 443 ; ##############################
444 444 ; MAIN RHODECODE DATABASE CONFIG
445 445 ; ##############################
446 446
447 447 #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
448 448 #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
449 449 #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8
450 450 ; pymysql is an alternative driver for MySQL, use in case of problems with default one
451 451 #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode
452 452
453 453 sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode_test.db?timeout=30
454 454
455 455 ; see sqlalchemy docs for other advanced settings
456 456 ; print the sql statements to output
457 457 sqlalchemy.db1.echo = false
458 458
459 459 ; recycle the connections after this amount of seconds
460 460 sqlalchemy.db1.pool_recycle = 3600
461 461 sqlalchemy.db1.convert_unicode = true
462 462
463 463 ; the number of connections to keep open inside the connection pool.
464 464 ; 0 indicates no limit
465 465 #sqlalchemy.db1.pool_size = 5
466 466
467 467 ; The number of connections to allow in connection pool "overflow", that is
468 468 ; connections that can be opened above and beyond the pool_size setting,
469 469 ; which defaults to five.
470 470 #sqlalchemy.db1.max_overflow = 10
471 471
472 472 ; Connection check ping, used to detect broken database connections
473 473 ; could be enabled to better handle cases if MySQL has gone away errors
474 474 #sqlalchemy.db1.ping_connection = true
475 475
476 476 ; ##########
477 477 ; VCS CONFIG
478 478 ; ##########
479 479 vcs.server.enable = true
480 480 vcs.server = localhost:9901
481 481
482 482 ; Web server connectivity protocol, responsible for web based VCS operations
483 483 ; Available protocols are:
484 484 ; `http` - use http-rpc backend (default)
485 485 vcs.server.protocol = http
486 486
487 487 ; Push/Pull operations protocol, available options are:
488 488 ; `http` - use http-rpc backend (default)
489 489 vcs.scm_app_implementation = http
490 490
491 491 ; Push/Pull operations hooks protocol, available options are:
492 492 ; `http` - use http-rpc backend (default)
493 493 vcs.hooks.protocol = http
494 494
495 495 ; Host on which this instance is listening for hooks. If vcsserver is in other location
496 496 ; this should be adjusted.
497 vcs.hooks.host = 127.0.0.1
497 vcs.hooks.host = *
498 498
499 499 ; Start VCSServer with this instance as a subprocess, useful for development
500 500 vcs.start_server = false
501 501
502 502 ; List of enabled VCS backends, available options are:
503 503 ; `hg` - mercurial
504 504 ; `git` - git
505 505 ; `svn` - subversion
506 506 vcs.backends = hg, git, svn
507 507
508 508 ; Wait this number of seconds before killing connection to the vcsserver
509 509 vcs.connection_timeout = 3600
510 510
511 511 ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out.
512 512 ; Set a numeric version for your current SVN e.g 1.8, or 1.12
513 513 ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible
514 514 #vcs.svn.compatible_version = 1.8
515 515
516 516 ; Cache flag to cache vcsserver remote calls locally
517 517 ; It uses cache_region `cache_repo`
518 518 vcs.methods.cache = false
519 519
520 520 ; ####################################################
521 521 ; Subversion proxy support (mod_dav_svn)
522 522 ; Maps RhodeCode repo groups into SVN paths for Apache
523 523 ; ####################################################
524 524
525 525 ; Enable or disable the config file generation.
526 526 svn.proxy.generate_config = false
527 527
528 528 ; Generate config file with `SVNListParentPath` set to `On`.
529 529 svn.proxy.list_parent_path = true
530 530
531 531 ; Set location and file name of generated config file.
532 532 svn.proxy.config_file_path = %(here)s/mod_dav_svn.conf
533 533
534 534 ; alternative mod_dav config template. This needs to be a valid mako template
535 535 ; Example template can be found in the source code:
536 536 ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako
537 537 #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako
538 538
539 539 ; Used as a prefix to the `Location` block in the generated config file.
540 540 ; In most cases it should be set to `/`.
541 541 svn.proxy.location_root = /
542 542
543 543 ; Command to reload the mod dav svn configuration on change.
544 544 ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh
545 545 ; Make sure user who runs RhodeCode process is allowed to reload Apache
546 546 #svn.proxy.reload_cmd = /etc/init.d/apache2 reload
547 547
548 548 ; If the timeout expires before the reload command finishes, the command will
549 549 ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds.
550 550 #svn.proxy.reload_timeout = 10
551 551
552 552 ; ####################
553 553 ; SSH Support Settings
554 554 ; ####################
555 555
556 556 ; Defines if a custom authorized_keys file should be created and written on
557 557 ; any change user ssh keys. Setting this to false also disables possibility
558 558 ; of adding SSH keys by users from web interface. Super admins can still
559 559 ; manage SSH Keys.
560 560 ssh.generate_authorized_keyfile = true
561 561
562 562 ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding`
563 563 # ssh.authorized_keys_ssh_opts =
564 564
565 565 ; Path to the authorized_keys file where the generate entries are placed.
566 566 ; It is possible to have multiple key files specified in `sshd_config` e.g.
567 567 ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode
568 568 ssh.authorized_keys_file_path = %(here)s/rc/authorized_keys_rhodecode
569 569
570 570 ; Command to execute the SSH wrapper. The binary is available in the
571 571 ; RhodeCode installation directory.
572 572 ; e.g ~/.rccontrol/community-1/profile/bin/rc-ssh-wrapper
573 573 ssh.wrapper_cmd = ~/.rccontrol/community-1/rc-ssh-wrapper
574 574
575 575 ; Allow shell when executing the ssh-wrapper command
576 576 ssh.wrapper_cmd_allow_shell = false
577 577
578 578 ; Enables logging, and detailed output send back to the client during SSH
579 579 ; operations. Useful for debugging, shouldn't be used in production.
580 580 ssh.enable_debug_logging = false
581 581
582 582 ; Paths to binary executable, by default they are the names, but we can
583 583 ; override them if we want to use a custom one
584 584 ssh.executable.hg = ~/.rccontrol/vcsserver-1/profile/bin/hg
585 585 ssh.executable.git = ~/.rccontrol/vcsserver-1/profile/bin/git
586 586 ssh.executable.svn = ~/.rccontrol/vcsserver-1/profile/bin/svnserve
587 587
588 588 ; Enables SSH key generator web interface. Disabling this still allows users
589 589 ; to add their own keys.
590 590 ssh.enable_ui_key_generator = true
591 591
592 592 ; Statsd client config, this is used to send metrics to statsd
593 593 ; We recommend setting statsd_exported and scrape them using Promethues
594 594 #statsd.enabled = false
595 595 #statsd.statsd_host = 0.0.0.0
596 596 #statsd.statsd_port = 8125
597 597 #statsd.statsd_prefix =
598 598 #statsd.statsd_ipv6 = false
599 599
600 600 ; configure logging automatically at server startup set to false
601 601 ; to use the below custom logging config.
602 602 ; RC_LOGGING_FORMATTER
603 603 ; RC_LOGGING_LEVEL
604 604 ; env variables can control the settings for logging in case of autoconfigure
605 605
606 606 logging.autoconfigure = false
607 607
608 608 ; specify your own custom logging config file to configure logging
609 609 #logging.logging_conf_file = /path/to/custom_logging.ini
610 610
611 611 ; Dummy marker to add new entries after.
612 612 ; Add any custom entries below. Please don't remove this marker.
613 613 custom.conf = 1
614 614
615 615
616 616 ; #####################
617 617 ; LOGGING CONFIGURATION
618 618 ; #####################
619 619
620 620 [loggers]
621 621 keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper
622 622
623 623 [handlers]
624 624 keys = console, console_sql
625 625
626 626 [formatters]
627 627 keys = generic, json, color_formatter, color_formatter_sql
628 628
629 629 ; #######
630 630 ; LOGGERS
631 631 ; #######
632 632 [logger_root]
633 633 level = NOTSET
634 634 handlers = console
635 635
636 636 [logger_routes]
637 637 level = DEBUG
638 638 handlers =
639 639 qualname = routes.middleware
640 640 ## "level = DEBUG" logs the route matched and routing variables.
641 641 propagate = 1
642 642
643 643 [logger_sqlalchemy]
644 644 level = INFO
645 645 handlers = console_sql
646 646 qualname = sqlalchemy.engine
647 647 propagate = 0
648 648
649 649 [logger_beaker]
650 650 level = DEBUG
651 651 handlers =
652 652 qualname = beaker.container
653 653 propagate = 1
654 654
655 655 [logger_rhodecode]
656 656 level = DEBUG
657 657 handlers =
658 658 qualname = rhodecode
659 659 propagate = 1
660 660
661 661 [logger_ssh_wrapper]
662 662 level = DEBUG
663 663 handlers =
664 664 qualname = ssh_wrapper
665 665 propagate = 1
666 666
667 667 [logger_celery]
668 668 level = DEBUG
669 669 handlers =
670 670 qualname = celery
671 671
672 672
673 673 ; ########
674 674 ; HANDLERS
675 675 ; ########
676 676
677 677 [handler_console]
678 678 class = StreamHandler
679 679 args = (sys.stderr, )
680 680 level = DEBUG
681 681 ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json'
682 682 ; This allows sending properly formatted logs to grafana loki or elasticsearch
683 683 formatter = generic
684 684
685 685 [handler_console_sql]
686 686 ; "level = DEBUG" logs SQL queries and results.
687 687 ; "level = INFO" logs SQL queries.
688 688 ; "level = WARN" logs neither. (Recommended for production systems.)
689 689 class = StreamHandler
690 690 args = (sys.stderr, )
691 691 level = WARN
692 692 ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json'
693 693 ; This allows sending properly formatted logs to grafana loki or elasticsearch
694 694 formatter = generic
695 695
696 696 ; ##########
697 697 ; FORMATTERS
698 698 ; ##########
699 699
700 700 [formatter_generic]
701 701 class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter
702 702 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
703 703 datefmt = %Y-%m-%d %H:%M:%S
704 704
705 705 [formatter_color_formatter]
706 706 class = rhodecode.lib.logging_formatter.ColorFormatter
707 707 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
708 708 datefmt = %Y-%m-%d %H:%M:%S
709 709
710 710 [formatter_color_formatter_sql]
711 711 class = rhodecode.lib.logging_formatter.ColorFormatterSql
712 712 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
713 713 datefmt = %Y-%m-%d %H:%M:%S
714 714
715 715 [formatter_json]
716 716 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
717 717 class = rhodecode.lib._vendor.jsonlogger.JsonFormatter
General Comments 0
You need to be logged in to leave comments. Login now