##// END OF EJS Templates
feat(redis): added ability to specify a redis key prefix for all cache regions of dogpile that use redis backend
super-admin -
r5582:15d02bff default
parent child Browse files
Show More
@@ -1,915 +1,927 b''
1 1
2 2 ; #########################################
3 3 ; RHODECODE COMMUNITY EDITION CONFIGURATION
4 4 ; #########################################
5 5
6 6 [DEFAULT]
7 7 ; Debug flag sets all loggers to debug, and enables request tracking
8 8 debug = true
9 9
10 10 ; ########################################################################
11 11 ; EMAIL CONFIGURATION
12 12 ; These settings will be used by the RhodeCode mailing system
13 13 ; ########################################################################
14 14
15 15 ; prefix all emails subjects with given prefix, helps filtering out emails
16 16 #email_prefix = [RhodeCode]
17 17
18 18 ; email FROM address all mails will be sent
19 19 #app_email_from = rhodecode-noreply@localhost
20 20
21 21 #smtp_server = mail.server.com
22 22 #smtp_username =
23 23 #smtp_password =
24 24 #smtp_port =
25 25 #smtp_use_tls = false
26 26 #smtp_use_ssl = true
27 27
28 28 [server:main]
29 29 ; COMMON HOST/IP CONFIG, This applies mostly to develop setup,
30 30 ; Host port for gunicorn are controlled by gunicorn_conf.py
31 31 host = 127.0.0.1
32 32 port = 10020
33 33
34 34
35 35 ; ###########################
36 36 ; GUNICORN APPLICATION SERVER
37 37 ; ###########################
38 38
39 39 ; run with gunicorn --config gunicorn_conf.py --paste rhodecode.ini
40 40
41 41 ; Module to use, this setting shouldn't be changed
42 42 use = egg:gunicorn#main
43 43
44 44 ; Prefix middleware for RhodeCode.
45 45 ; recommended when using proxy setup.
46 46 ; allows to set RhodeCode under a prefix in server.
47 47 ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well.
48 48 ; And set your prefix like: `prefix = /custom_prefix`
49 49 ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need
50 50 ; to make your cookies only work on prefix url
51 51 [filter:proxy-prefix]
52 52 use = egg:PasteDeploy#prefix
53 53 prefix = /
54 54
55 55 [app:main]
56 56 ; The %(here)s variable will be replaced with the absolute path of parent directory
57 57 ; of this file
58 58 ; Each option in the app:main can be override by an environmental variable
59 59 ;
60 60 ;To override an option:
61 61 ;
62 62 ;RC_<KeyName>
63 63 ;Everything should be uppercase, . and - should be replaced by _.
64 64 ;For example, if you have these configuration settings:
65 65 ;rc_cache.repo_object.backend = foo
66 66 ;can be overridden by
67 67 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
68 68
69 69 use = egg:rhodecode-enterprise-ce
70 70
71 71 ; enable proxy prefix middleware, defined above
72 72 #filter-with = proxy-prefix
73 73
74 74 ; control if environmental variables to be expanded into the .ini settings
75 75 #rhodecode.env_expand = true
76 76
77 77 ; #############
78 78 ; DEBUG OPTIONS
79 79 ; #############
80 80
81 81 pyramid.reload_templates = true
82 82
83 83 # During development the we want to have the debug toolbar enabled
84 84 pyramid.includes =
85 85 pyramid_debugtoolbar
86 86
87 87 debugtoolbar.hosts = 0.0.0.0/0
88 88 debugtoolbar.exclude_prefixes =
89 89 /css
90 90 /fonts
91 91 /images
92 92 /js
93 93
94 94 ## RHODECODE PLUGINS ##
95 95 rhodecode.includes =
96 96 rhodecode.api
97 97
98 98
99 99 # api prefix url
100 100 rhodecode.api.url = /_admin/api
101 101
102 102 ; enable debug style page
103 103 debug_style = true
104 104
105 105 ; #################
106 106 ; END DEBUG OPTIONS
107 107 ; #################
108 108
109 109 ; encryption key used to encrypt social plugin tokens,
110 110 ; remote_urls with credentials etc, if not set it defaults to
111 111 ; `beaker.session.secret`
112 112 #rhodecode.encrypted_values.secret =
113 113
114 114 ; decryption strict mode (enabled by default). It controls if decryption raises
115 115 ; `SignatureVerificationError` in case of wrong key, or damaged encryption data.
116 116 #rhodecode.encrypted_values.strict = false
117 117
118 118 ; Pick algorithm for encryption. Either fernet (more secure) or aes (default)
119 119 ; fernet is safer, and we strongly recommend switching to it.
120 120 ; Due to backward compatibility aes is used as default.
121 121 #rhodecode.encrypted_values.algorithm = fernet
122 122
123 123 ; Return gzipped responses from RhodeCode (static files/application)
124 124 gzip_responses = false
125 125
126 126 ; Auto-generate javascript routes file on startup
127 127 generate_js_files = false
128 128
129 129 ; System global default language.
130 130 ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh
131 131 lang = en
132 132
133 133 ; Perform a full repository scan and import on each server start.
134 134 ; Settings this to true could lead to very long startup time.
135 135 startup.import_repos = false
136 136
137 137 ; URL at which the application is running. This is used for Bootstrapping
138 138 ; requests in context when no web request is available. Used in ishell, or
139 139 ; SSH calls. Set this for events to receive proper url for SSH calls.
140 140 app.base_url = http://rhodecode.local
141 141
142 142 ; Host at which the Service API is running.
143 143 app.service_api.host = http://rhodecode.local:10020
144 144
145 145 ; Secret for Service API authentication.
146 146 app.service_api.token =
147 147
148 148 ; Unique application ID. Should be a random unique string for security.
149 149 app_instance_uuid = rc-production
150 150
151 151 ; Cut off limit for large diffs (size in bytes). If overall diff size on
152 152 ; commit, or pull request exceeds this limit this diff will be displayed
153 153 ; partially. E.g 512000 == 512Kb
154 154 cut_off_limit_diff = 512000
155 155
156 156 ; Cut off limit for large files inside diffs (size in bytes). Each individual
157 157 ; file inside diff which exceeds this limit will be displayed partially.
158 158 ; E.g 128000 == 128Kb
159 159 cut_off_limit_file = 128000
160 160
161 161 ; Use cached version of vcs repositories everywhere. Recommended to be `true`
162 162 vcs_full_cache = true
163 163
164 164 ; Force https in RhodeCode, fixes https redirects, assumes it's always https.
165 165 ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache
166 166 force_https = false
167 167
168 168 ; use Strict-Transport-Security headers
169 169 use_htsts = false
170 170
171 171 ; Set to true if your repos are exposed using the dumb protocol
172 172 git_update_server_info = false
173 173
174 174 ; RSS/ATOM feed options
175 175 rss_cut_off_limit = 256000
176 176 rss_items_per_page = 10
177 177 rss_include_diff = false
178 178
179 179 ; gist URL alias, used to create nicer urls for gist. This should be an
180 180 ; url that does rewrites to _admin/gists/{gistid}.
181 181 ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal
182 182 ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid}
183 183 gist_alias_url =
184 184
185 185 ; List of views (using glob pattern syntax) that AUTH TOKENS could be
186 186 ; used for access.
187 187 ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it
188 188 ; came from the the logged in user who own this authentication token.
189 189 ; Additionally @TOKEN syntax can be used to bound the view to specific
190 190 ; authentication token. Such view would be only accessible when used together
191 191 ; with this authentication token
192 192 ; list of all views can be found under `/_admin/permissions/auth_token_access`
193 193 ; The list should be "," separated and on a single line.
194 194 ; Most common views to enable:
195 195
196 196 # RepoCommitsView:repo_commit_download
197 197 # RepoCommitsView:repo_commit_patch
198 198 # RepoCommitsView:repo_commit_raw
199 199 # RepoCommitsView:repo_commit_raw@TOKEN
200 200 # RepoFilesView:repo_files_diff
201 201 # RepoFilesView:repo_archivefile
202 202 # RepoFilesView:repo_file_raw
203 203 # GistView:*
204 204 api_access_controllers_whitelist =
205 205
206 206 ; Default encoding used to convert from and to unicode
207 207 ; can be also a comma separated list of encoding in case of mixed encodings
208 208 default_encoding = UTF-8
209 209
210 210 ; instance-id prefix
211 211 ; a prefix key for this instance used for cache invalidation when running
212 212 ; multiple instances of RhodeCode, make sure it's globally unique for
213 213 ; all running RhodeCode instances. Leave empty if you don't use it
214 214 instance_id =
215 215
216 216 ; Fallback authentication plugin. Set this to a plugin ID to force the usage
217 217 ; of an authentication plugin also if it is disabled by it's settings.
218 218 ; This could be useful if you are unable to log in to the system due to broken
219 219 ; authentication settings. Then you can enable e.g. the internal RhodeCode auth
220 220 ; module to log in again and fix the settings.
221 221 ; Available builtin plugin IDs (hash is part of the ID):
222 222 ; egg:rhodecode-enterprise-ce#rhodecode
223 223 ; egg:rhodecode-enterprise-ce#pam
224 224 ; egg:rhodecode-enterprise-ce#ldap
225 225 ; egg:rhodecode-enterprise-ce#jasig_cas
226 226 ; egg:rhodecode-enterprise-ce#headers
227 227 ; egg:rhodecode-enterprise-ce#crowd
228 228
229 229 #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode
230 230
231 231 ; Flag to control loading of legacy plugins in py:/path format
232 232 auth_plugin.import_legacy_plugins = true
233 233
234 234 ; alternative return HTTP header for failed authentication. Default HTTP
235 235 ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with
236 236 ; handling that causing a series of failed authentication calls.
237 237 ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code
238 238 ; This will be served instead of default 401 on bad authentication
239 239 auth_ret_code =
240 240
241 241 ; use special detection method when serving auth_ret_code, instead of serving
242 242 ; ret_code directly, use 401 initially (Which triggers credentials prompt)
243 243 ; and then serve auth_ret_code to clients
244 244 auth_ret_code_detection = false
245 245
246 246 ; locking return code. When repository is locked return this HTTP code. 2XX
247 247 ; codes don't break the transactions while 4XX codes do
248 248 lock_ret_code = 423
249 249
250 250 ; Filesystem location were repositories should be stored
251 251 repo_store.path = /var/opt/rhodecode_repo_store
252 252
253 253 ; allows to setup custom hooks in settings page
254 254 allow_custom_hooks_settings = true
255 255
256 256 ; Generated license token required for EE edition license.
257 257 ; New generated token value can be found in Admin > settings > license page.
258 258 license_token =
259 259
260 260 ; This flag hides sensitive information on the license page such as token, and license data
261 261 license.hide_license_info = false
262 262
263 263 ; Import EE license from this license path
264 264 #license.import_path = %(here)s/rhodecode_enterprise.license
265 265
266 266 ; import license 'if-missing' or 'force' (always override)
267 267 ; if-missing means apply license if it doesn't exist. 'force' option always overrides it
268 268 license.import_path_mode = if-missing
269 269
270 270 ; supervisor connection uri, for managing supervisor and logs.
271 271 supervisor.uri =
272 272
273 273 ; supervisord group name/id we only want this RC instance to handle
274 274 supervisor.group_id = dev
275 275
276 276 ; Display extended labs settings
277 277 labs_settings_active = true
278 278
279 279 ; Custom exception store path, defaults to TMPDIR
280 280 ; This is used to store exception from RhodeCode in shared directory
281 281 #exception_tracker.store_path =
282 282
283 283 ; Send email with exception details when it happens
284 284 #exception_tracker.send_email = false
285 285
286 286 ; Comma separated list of recipients for exception emails,
287 287 ; e.g admin@rhodecode.com,devops@rhodecode.com
288 288 ; Can be left empty, then emails will be sent to ALL super-admins
289 289 #exception_tracker.send_email_recipients =
290 290
291 291 ; optional prefix to Add to email Subject
292 292 #exception_tracker.email_prefix = [RHODECODE ERROR]
293 293
294 294 ; NOTE: this setting IS DEPRECATED:
295 295 ; file_store backend is always enabled
296 296 #file_store.enabled = true
297 297
298 298 ; NOTE: this setting IS DEPRECATED:
299 299 ; file_store.backend = X -> use `file_store.backend.type = filesystem_v2` instead
300 300 ; Storage backend, available options are: local
301 301 #file_store.backend = local
302 302
303 303 ; NOTE: this setting IS DEPRECATED:
304 304 ; file_store.storage_path = X -> use `file_store.filesystem_v2.storage_path = X` instead
305 305 ; path to store the uploaded binaries and artifacts
306 306 #file_store.storage_path = /var/opt/rhodecode_data/file_store
307 307
308 308 ; Artifacts file-store, is used to store comment attachments and artifacts uploads.
309 309 ; file_store backend type: filesystem_v1, filesystem_v2 or objectstore (s3-based) are available as options
310 310 ; filesystem_v1 is backwards compat with pre 5.1 storage changes
311 311 ; new installations should choose filesystem_v2 or objectstore (s3-based), pick filesystem when migrating from
312 312 ; previous installations to keep the artifacts without a need of migration
313 313 #file_store.backend.type = filesystem_v2
314 314
315 315 ; filesystem options...
316 316 #file_store.filesystem_v1.storage_path = /var/opt/rhodecode_data/artifacts_file_store
317 317
318 318 ; filesystem_v2 options...
319 319 #file_store.filesystem_v2.storage_path = /var/opt/rhodecode_data/artifacts_file_store
320 320 #file_store.filesystem_v2.shards = 8
321 321
322 322 ; objectstore options...
323 323 ; url for s3 compatible storage that allows to upload artifacts
324 324 ; e.g http://minio:9000
325 325 #file_store.backend.type = objectstore
326 326 #file_store.objectstore.url = http://s3-minio:9000
327 327
328 328 ; a top-level bucket to put all other shards in
329 329 ; objects will be stored in rhodecode-file-store/shard-N based on the bucket_shards number
330 330 #file_store.objectstore.bucket = rhodecode-file-store
331 331
332 332 ; number of sharded buckets to create to distribute archives across
333 333 ; default is 8 shards
334 334 #file_store.objectstore.bucket_shards = 8
335 335
336 336 ; key for s3 auth
337 337 #file_store.objectstore.key = s3admin
338 338
339 339 ; secret for s3 auth
340 340 #file_store.objectstore.secret = s3secret4
341 341
342 342 ;region for s3 storage
343 343 #file_store.objectstore.region = eu-central-1
344 344
345 345 ; Redis url to acquire/check generation of archives locks
346 346 archive_cache.locking.url = redis://redis:6379/1
347 347
348 348 ; Storage backend, only 'filesystem' and 'objectstore' are available now
349 349 archive_cache.backend.type = filesystem
350 350
351 351 ; url for s3 compatible storage that allows to upload artifacts
352 352 ; e.g http://minio:9000
353 353 archive_cache.objectstore.url = http://s3-minio:9000
354 354
355 355 ; key for s3 auth
356 356 archive_cache.objectstore.key = key
357 357
358 358 ; secret for s3 auth
359 359 archive_cache.objectstore.secret = secret
360 360
361 361 ;region for s3 storage
362 362 archive_cache.objectstore.region = eu-central-1
363 363
364 364 ; number of sharded buckets to create to distribute archives across
365 365 ; default is 8 shards
366 366 archive_cache.objectstore.bucket_shards = 8
367 367
368 368 ; a top-level bucket to put all other shards in
369 369 ; objects will be stored in rhodecode-archive-cache/shard-N based on the bucket_shards number
370 370 archive_cache.objectstore.bucket = rhodecode-archive-cache
371 371
372 372 ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time
373 373 archive_cache.objectstore.retry = false
374 374
375 375 ; number of seconds to wait for next try using retry
376 376 archive_cache.objectstore.retry_backoff = 1
377 377
378 378 ; how many tries do do a retry fetch from this backend
379 379 archive_cache.objectstore.retry_attempts = 10
380 380
381 381 ; Default is $cache_dir/archive_cache if not set
382 382 ; Generated repo archives will be cached at this location
383 383 ; and served from the cache during subsequent requests for the same archive of
384 384 ; the repository. This path is important to be shared across filesystems and with
385 385 ; RhodeCode and vcsserver
386 386 archive_cache.filesystem.store_dir = /var/opt/rhodecode_data/archive_cache
387 387
388 388 ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb
389 389 archive_cache.filesystem.cache_size_gb = 1
390 390
391 391 ; Eviction policy used to clear out after cache_size_gb limit is reached
392 392 archive_cache.filesystem.eviction_policy = least-recently-stored
393 393
394 394 ; By default cache uses sharding technique, this specifies how many shards are there
395 395 ; default is 8 shards
396 396 archive_cache.filesystem.cache_shards = 8
397 397
398 398 ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time
399 399 archive_cache.filesystem.retry = false
400 400
401 401 ; number of seconds to wait for next try using retry
402 402 archive_cache.filesystem.retry_backoff = 1
403 403
404 404 ; how many tries do do a retry fetch from this backend
405 405 archive_cache.filesystem.retry_attempts = 10
406 406
407 407
408 408 ; #############
409 409 ; CELERY CONFIG
410 410 ; #############
411 411
412 412 ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini
413 413
414 414 use_celery = true
415 415
416 416 ; path to store schedule database
417 417 #celerybeat-schedule.path =
418 418
419 419 ; connection url to the message broker (default redis)
420 420 celery.broker_url = redis://redis:6379/8
421 421
422 422 ; results backend to get results for (default redis)
423 423 celery.result_backend = redis://redis:6379/8
424 424
425 425 ; rabbitmq example
426 426 #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost
427 427
428 428 ; maximum tasks to execute before worker restart
429 429 celery.max_tasks_per_child = 20
430 430
431 431 ; tasks will never be sent to the queue, but executed locally instead.
432 432 celery.task_always_eager = false
433 433
434 434 ; #############
435 435 ; DOGPILE CACHE
436 436 ; #############
437 437
438 438 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
439 439 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
440 440 cache_dir = /var/opt/rhodecode_data
441 441
442 442 ; *********************************************
443 443 ; `sql_cache_short` cache for heavy SQL queries
444 444 ; Only supported backend is `memory_lru`
445 445 ; *********************************************
446 446 rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru
447 447 rc_cache.sql_cache_short.expiration_time = 30
448 448
449 449
450 450 ; *****************************************************
451 451 ; `cache_repo_longterm` cache for repo object instances
452 452 ; Only supported backend is `memory_lru`
453 453 ; *****************************************************
454 454 rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru
455 455 ; by default we use 30 Days, cache is still invalidated on push
456 456 rc_cache.cache_repo_longterm.expiration_time = 2592000
457 457 ; max items in LRU cache, set to smaller number to save memory, and expire last used caches
458 458 rc_cache.cache_repo_longterm.max_size = 10000
459 459
460 460
461 461 ; *********************************************
462 462 ; `cache_general` cache for general purpose use
463 463 ; for simplicity use rc.file_namespace backend,
464 464 ; for performance and scale use rc.redis
465 465 ; *********************************************
466 466 rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace
467 467 rc_cache.cache_general.expiration_time = 43200
468 468 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
469 469 #rc_cache.cache_general.arguments.filename = /tmp/cache_general_db
470 470
471 471 ; alternative `cache_general` redis backend with distributed lock
472 472 #rc_cache.cache_general.backend = dogpile.cache.rc.redis
473 473 #rc_cache.cache_general.expiration_time = 300
474 474
475 475 ; redis_expiration_time needs to be greater then expiration_time
476 476 #rc_cache.cache_general.arguments.redis_expiration_time = 7200
477 477
478 478 #rc_cache.cache_general.arguments.host = localhost
479 479 #rc_cache.cache_general.arguments.port = 6379
480 480 #rc_cache.cache_general.arguments.db = 0
481 481 #rc_cache.cache_general.arguments.socket_timeout = 30
482 482 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
483 483 #rc_cache.cache_general.arguments.distributed_lock = true
484 484
485 485 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
486 486 #rc_cache.cache_general.arguments.lock_auto_renewal = true
487 487
488 ; prefix for redis keys used for this cache backend, the final key is constructed using {custom-prefix}{key}
489 #rc_cache.cache_general.arguments.key_prefix = custom-prefix-
490
491
488 492 ; *************************************************
489 493 ; `cache_perms` cache for permission tree, auth TTL
490 494 ; for simplicity use rc.file_namespace backend,
491 495 ; for performance and scale use rc.redis
492 496 ; *************************************************
493 497 rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace
494 498 rc_cache.cache_perms.expiration_time = 3600
495 499 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
496 500 #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms_db
497 501
498 502 ; alternative `cache_perms` redis backend with distributed lock
499 503 #rc_cache.cache_perms.backend = dogpile.cache.rc.redis
500 504 #rc_cache.cache_perms.expiration_time = 300
501 505
502 506 ; redis_expiration_time needs to be greater then expiration_time
503 507 #rc_cache.cache_perms.arguments.redis_expiration_time = 7200
504 508
505 509 #rc_cache.cache_perms.arguments.host = localhost
506 510 #rc_cache.cache_perms.arguments.port = 6379
507 511 #rc_cache.cache_perms.arguments.db = 0
508 512 #rc_cache.cache_perms.arguments.socket_timeout = 30
509 513 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
510 514 #rc_cache.cache_perms.arguments.distributed_lock = true
511 515
512 516 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
513 517 #rc_cache.cache_perms.arguments.lock_auto_renewal = true
514 518
519 ; prefix for redis keys used for this cache backend, the final key is constructed using {custom-prefix}{key}
520 #rc_cache.cache_perms.arguments.key_prefix = custom-prefix-
521
522
515 523 ; ***************************************************
516 524 ; `cache_repo` cache for file tree, Readme, RSS FEEDS
517 525 ; for simplicity use rc.file_namespace backend,
518 526 ; for performance and scale use rc.redis
519 527 ; ***************************************************
520 528 rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace
521 529 rc_cache.cache_repo.expiration_time = 2592000
522 530 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
523 531 #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo_db
524 532
525 533 ; alternative `cache_repo` redis backend with distributed lock
526 534 #rc_cache.cache_repo.backend = dogpile.cache.rc.redis
527 535 #rc_cache.cache_repo.expiration_time = 2592000
528 536
529 537 ; redis_expiration_time needs to be greater then expiration_time
530 538 #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400
531 539
532 540 #rc_cache.cache_repo.arguments.host = localhost
533 541 #rc_cache.cache_repo.arguments.port = 6379
534 542 #rc_cache.cache_repo.arguments.db = 1
535 543 #rc_cache.cache_repo.arguments.socket_timeout = 30
536 544 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
537 545 #rc_cache.cache_repo.arguments.distributed_lock = true
538 546
539 547 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
540 548 #rc_cache.cache_repo.arguments.lock_auto_renewal = true
541 549
550 ; prefix for redis keys used for this cache backend, the final key is constructed using {custom-prefix}{key}
551 #rc_cache.cache_repo.arguments.key_prefix = custom-prefix-
552
553
542 554 ; ##############
543 555 ; BEAKER SESSION
544 556 ; ##############
545 557
546 558 ; beaker.session.type is type of storage options for the logged users sessions. Current allowed
547 559 ; types are file, ext:redis, ext:database, ext:memcached
548 560 ; Fastest ones are ext:redis and ext:database, DO NOT use memory type for session
549 561 #beaker.session.type = file
550 562 #beaker.session.data_dir = /var/opt/rhodecode_data/sessions
551 563
552 564 ; Redis based sessions
553 565 beaker.session.type = ext:redis
554 566 beaker.session.url = redis://redis:6379/2
555 567
556 568 ; DB based session, fast, and allows easy management over logged in users
557 569 #beaker.session.type = ext:database
558 570 #beaker.session.table_name = db_session
559 571 #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode
560 572 #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode
561 573 #beaker.session.sa.pool_recycle = 3600
562 574 #beaker.session.sa.echo = false
563 575
564 576 beaker.session.key = rhodecode
565 577 beaker.session.secret = develop-rc-uytcxaz
566 578 beaker.session.lock_dir = /data_ramdisk/lock
567 579
568 580 ; Secure encrypted cookie. Requires AES and AES python libraries
569 581 ; you must disable beaker.session.secret to use this
570 582 #beaker.session.encrypt_key = key_for_encryption
571 583 #beaker.session.validate_key = validation_key
572 584
573 585 ; Sets session as invalid (also logging out user) if it haven not been
574 586 ; accessed for given amount of time in seconds
575 587 beaker.session.timeout = 2592000
576 588 beaker.session.httponly = true
577 589
578 590 ; Path to use for the cookie. Set to prefix if you use prefix middleware
579 591 #beaker.session.cookie_path = /custom_prefix
580 592
581 593 ; Set https secure cookie
582 594 beaker.session.secure = false
583 595
584 596 ; default cookie expiration time in seconds, set to `true` to set expire
585 597 ; at browser close
586 598 #beaker.session.cookie_expires = 3600
587 599
588 600 ; #############################
589 601 ; SEARCH INDEXING CONFIGURATION
590 602 ; #############################
591 603
592 604 ; Full text search indexer is available in rhodecode-tools under
593 605 ; `rhodecode-tools index` command
594 606
595 607 ; WHOOSH Backend, doesn't require additional services to run
596 608 ; it works good with few dozen repos
597 609 search.module = rhodecode.lib.index.whoosh
598 610 search.location = %(here)s/data/index
599 611
600 612 ; ####################
601 613 ; CHANNELSTREAM CONFIG
602 614 ; ####################
603 615
604 616 ; channelstream enables persistent connections and live notification
605 617 ; in the system. It's also used by the chat system
606 618
607 619 channelstream.enabled = true
608 620
609 621 ; server address for channelstream server on the backend
610 622 channelstream.server = channelstream:9800
611 623
612 624 ; location of the channelstream server from outside world
613 625 ; use ws:// for http or wss:// for https. This address needs to be handled
614 626 ; by external HTTP server such as Nginx or Apache
615 627 ; see Nginx/Apache configuration examples in our docs
616 628 channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream
617 629 channelstream.secret = ENV_GENERATED
618 630 channelstream.history.location = /var/opt/rhodecode_data/channelstream_history
619 631
620 632 ; Internal application path that Javascript uses to connect into.
621 633 ; If you use proxy-prefix the prefix should be added before /_channelstream
622 634 channelstream.proxy_path = /_channelstream
623 635
624 636
625 637 ; ##############################
626 638 ; MAIN RHODECODE DATABASE CONFIG
627 639 ; ##############################
628 640
629 641 #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
630 642 #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
631 643 #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8
632 644 ; pymysql is an alternative driver for MySQL, use in case of problems with default one
633 645 #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode
634 646
635 647 sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
636 648
637 649 ; see sqlalchemy docs for other advanced settings
638 650 ; print the sql statements to output
639 651 sqlalchemy.db1.echo = false
640 652
641 653 ; recycle the connections after this amount of seconds
642 654 sqlalchemy.db1.pool_recycle = 3600
643 655
644 656 ; the number of connections to keep open inside the connection pool.
645 657 ; 0 indicates no limit
646 658 ; the general calculus with gevent is:
647 659 ; if your system allows 500 concurrent greenlets (max_connections) that all do database access,
648 660 ; then increase pool size + max overflow so that they add up to 500.
649 661 #sqlalchemy.db1.pool_size = 5
650 662
651 663 ; The number of connections to allow in connection pool "overflow", that is
652 664 ; connections that can be opened above and beyond the pool_size setting,
653 665 ; which defaults to five.
654 666 #sqlalchemy.db1.max_overflow = 10
655 667
656 668 ; Connection check ping, used to detect broken database connections
657 669 ; could be enabled to better handle cases if MySQL has gone away errors
658 670 #sqlalchemy.db1.ping_connection = true
659 671
660 672 ; ##########
661 673 ; VCS CONFIG
662 674 ; ##########
663 675 vcs.server.enable = true
664 676 vcs.server = vcsserver:10010
665 677
666 678 ; Web server connectivity protocol, responsible for web based VCS operations
667 679 ; Available protocols are:
668 680 ; `http` - use http-rpc backend (default)
669 681 vcs.server.protocol = http
670 682
671 683 ; Push/Pull operations protocol, available options are:
672 684 ; `http` - use http-rpc backend (default)
673 685 vcs.scm_app_implementation = http
674 686
675 687 ; Push/Pull operations hooks protocol, available options are:
676 688 ; `http` - use http-rpc backend (default)
677 689 ; `celery` - use celery based hooks
678 690 #DEPRECATED:vcs.hooks.protocol = http
679 691 vcs.hooks.protocol.v2 = celery
680 692
681 693 ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be
682 694 ; accessible via network.
683 695 ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker)
684 696 vcs.hooks.host = *
685 697
686 698 ; Start VCSServer with this instance as a subprocess, useful for development
687 699 vcs.start_server = false
688 700
689 701 ; List of enabled VCS backends, available options are:
690 702 ; `hg` - mercurial
691 703 ; `git` - git
692 704 ; `svn` - subversion
693 705 vcs.backends = hg, git, svn
694 706
695 707 ; Wait this number of seconds before killing connection to the vcsserver
696 708 vcs.connection_timeout = 3600
697 709
698 710 ; Cache flag to cache vcsserver remote calls locally
699 711 ; It uses cache_region `cache_repo`
700 712 vcs.methods.cache = true
701 713
702 714 ; Filesystem location where Git lfs objects should be stored
703 715 vcs.git.lfs.storage_location = /var/opt/rhodecode_repo_store/.cache/git_lfs_store
704 716
705 717 ; Filesystem location where Mercurial largefile objects should be stored
706 718 vcs.hg.largefiles.storage_location = /var/opt/rhodecode_repo_store/.cache/hg_largefiles_store
707 719
708 720 ; ####################################################
709 721 ; Subversion proxy support (mod_dav_svn)
710 722 ; Maps RhodeCode repo groups into SVN paths for Apache
711 723 ; ####################################################
712 724
713 725 ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out.
714 726 ; Set a numeric version for your current SVN e.g 1.8, or 1.12
715 727 ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible
716 728 #vcs.svn.compatible_version = 1.8
717 729
718 730 ; Redis connection settings for svn integrations logic
719 731 ; This connection string needs to be the same on ce and vcsserver
720 732 vcs.svn.redis_conn = redis://redis:6379/0
721 733
722 734 ; Enable SVN proxy of requests over HTTP
723 735 vcs.svn.proxy.enabled = true
724 736
725 737 ; host to connect to running SVN subsystem
726 738 vcs.svn.proxy.host = http://svn:8090
727 739
728 740 ; Enable or disable the config file generation.
729 741 svn.proxy.generate_config = true
730 742
731 743 ; Generate config file with `SVNListParentPath` set to `On`.
732 744 svn.proxy.list_parent_path = true
733 745
734 746 ; Set location and file name of generated config file.
735 747 svn.proxy.config_file_path = /etc/rhodecode/conf/svn/mod_dav_svn.conf
736 748
737 749 ; alternative mod_dav config template. This needs to be a valid mako template
738 750 ; Example template can be found in the source code:
739 751 ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako
740 752 #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako
741 753
742 754 ; Used as a prefix to the `Location` block in the generated config file.
743 755 ; In most cases it should be set to `/`.
744 756 svn.proxy.location_root = /
745 757
746 758 ; Command to reload the mod dav svn configuration on change.
747 759 ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh
748 760 ; Make sure user who runs RhodeCode process is allowed to reload Apache
749 761 #svn.proxy.reload_cmd = /etc/init.d/apache2 reload
750 762
751 763 ; If the timeout expires before the reload command finishes, the command will
752 764 ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds.
753 765 #svn.proxy.reload_timeout = 10
754 766
755 767 ; ####################
756 768 ; SSH Support Settings
757 769 ; ####################
758 770
759 771 ; Defines if a custom authorized_keys file should be created and written on
760 772 ; any change user ssh keys. Setting this to false also disables possibility
761 773 ; of adding SSH keys by users from web interface. Super admins can still
762 774 ; manage SSH Keys.
763 775 ssh.generate_authorized_keyfile = true
764 776
765 777 ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding`
766 778 # ssh.authorized_keys_ssh_opts =
767 779
768 780 ; Path to the authorized_keys file where the generate entries are placed.
769 781 ; It is possible to have multiple key files specified in `sshd_config` e.g.
770 782 ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode
771 783 ssh.authorized_keys_file_path = /etc/rhodecode/conf/ssh/authorized_keys_rhodecode
772 784
773 785 ; Command to execute the SSH wrapper. The binary is available in the
774 786 ; RhodeCode installation directory.
775 787 ; legacy: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper
776 788 ; new rewrite: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2
777 789 #DEPRECATED: ssh.wrapper_cmd = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper
778 790 ssh.wrapper_cmd.v2 = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2
779 791
780 792 ; Allow shell when executing the ssh-wrapper command
781 793 ssh.wrapper_cmd_allow_shell = false
782 794
783 795 ; Enables logging, and detailed output send back to the client during SSH
784 796 ; operations. Useful for debugging, shouldn't be used in production.
785 797 ssh.enable_debug_logging = true
786 798
787 799 ; Paths to binary executable, by default they are the names, but we can
788 800 ; override them if we want to use a custom one
789 801 ssh.executable.hg = /usr/local/bin/rhodecode_bin/vcs_bin/hg
790 802 ssh.executable.git = /usr/local/bin/rhodecode_bin/vcs_bin/git
791 803 ssh.executable.svn = /usr/local/bin/rhodecode_bin/vcs_bin/svnserve
792 804
793 805 ; Enables SSH key generator web interface. Disabling this still allows users
794 806 ; to add their own keys.
795 807 ssh.enable_ui_key_generator = true
796 808
797 809 ; Statsd client config, this is used to send metrics to statsd
798 810 ; We recommend setting statsd_exported and scrape them using Prometheus
799 811 #statsd.enabled = false
800 812 #statsd.statsd_host = 0.0.0.0
801 813 #statsd.statsd_port = 8125
802 814 #statsd.statsd_prefix =
803 815 #statsd.statsd_ipv6 = false
804 816
805 817 ; configure logging automatically at server startup set to false
806 818 ; to use the below custom logging config.
807 819 ; RC_LOGGING_FORMATTER
808 820 ; RC_LOGGING_LEVEL
809 821 ; env variables can control the settings for logging in case of autoconfigure
810 822
811 823 #logging.autoconfigure = true
812 824
813 825 ; specify your own custom logging config file to configure logging
814 826 #logging.logging_conf_file = /path/to/custom_logging.ini
815 827
816 828 ; Dummy marker to add new entries after.
817 829 ; Add any custom entries below. Please don't remove this marker.
818 830 custom.conf = 1
819 831
820 832
821 833 ; #####################
822 834 ; LOGGING CONFIGURATION
823 835 ; #####################
824 836
825 837 [loggers]
826 838 keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper
827 839
828 840 [handlers]
829 841 keys = console, console_sql
830 842
831 843 [formatters]
832 844 keys = generic, json, color_formatter, color_formatter_sql
833 845
834 846 ; #######
835 847 ; LOGGERS
836 848 ; #######
837 849 [logger_root]
838 850 level = NOTSET
839 851 handlers = console
840 852
841 853 [logger_sqlalchemy]
842 854 level = INFO
843 855 handlers = console_sql
844 856 qualname = sqlalchemy.engine
845 857 propagate = 0
846 858
847 859 [logger_beaker]
848 860 level = DEBUG
849 861 handlers =
850 862 qualname = beaker.container
851 863 propagate = 1
852 864
853 865 [logger_rhodecode]
854 866 level = DEBUG
855 867 handlers =
856 868 qualname = rhodecode
857 869 propagate = 1
858 870
859 871 [logger_ssh_wrapper]
860 872 level = DEBUG
861 873 handlers =
862 874 qualname = ssh_wrapper
863 875 propagate = 1
864 876
865 877 [logger_celery]
866 878 level = DEBUG
867 879 handlers =
868 880 qualname = celery
869 881
870 882
871 883 ; ########
872 884 ; HANDLERS
873 885 ; ########
874 886
875 887 [handler_console]
876 888 class = StreamHandler
877 889 args = (sys.stderr, )
878 890 level = DEBUG
879 891 ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json'
880 892 ; This allows sending properly formatted logs to grafana loki or elasticsearch
881 893 formatter = color_formatter
882 894
883 895 [handler_console_sql]
884 896 ; "level = DEBUG" logs SQL queries and results.
885 897 ; "level = INFO" logs SQL queries.
886 898 ; "level = WARN" logs neither. (Recommended for production systems.)
887 899 class = StreamHandler
888 900 args = (sys.stderr, )
889 901 level = WARN
890 902 ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json'
891 903 ; This allows sending properly formatted logs to grafana loki or elasticsearch
892 904 formatter = color_formatter_sql
893 905
894 906 ; ##########
895 907 ; FORMATTERS
896 908 ; ##########
897 909
898 910 [formatter_generic]
899 911 class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter
900 912 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
901 913 datefmt = %Y-%m-%d %H:%M:%S
902 914
903 915 [formatter_color_formatter]
904 916 class = rhodecode.lib.logging_formatter.ColorFormatter
905 917 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
906 918 datefmt = %Y-%m-%d %H:%M:%S
907 919
908 920 [formatter_color_formatter_sql]
909 921 class = rhodecode.lib.logging_formatter.ColorFormatterSql
910 922 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
911 923 datefmt = %Y-%m-%d %H:%M:%S
912 924
913 925 [formatter_json]
914 926 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
915 927 class = rhodecode.lib._vendor.jsonlogger.JsonFormatter
@@ -1,883 +1,895 b''
1 1
2 2 ; #########################################
3 3 ; RHODECODE COMMUNITY EDITION CONFIGURATION
4 4 ; #########################################
5 5
6 6 [DEFAULT]
7 7 ; Debug flag sets all loggers to debug, and enables request tracking
8 8 debug = false
9 9
10 10 ; ########################################################################
11 11 ; EMAIL CONFIGURATION
12 12 ; These settings will be used by the RhodeCode mailing system
13 13 ; ########################################################################
14 14
15 15 ; prefix all emails subjects with given prefix, helps filtering out emails
16 16 #email_prefix = [RhodeCode]
17 17
18 18 ; email FROM address all mails will be sent
19 19 #app_email_from = rhodecode-noreply@localhost
20 20
21 21 #smtp_server = mail.server.com
22 22 #smtp_username =
23 23 #smtp_password =
24 24 #smtp_port =
25 25 #smtp_use_tls = false
26 26 #smtp_use_ssl = true
27 27
28 28 [server:main]
29 29 ; COMMON HOST/IP CONFIG, This applies mostly to develop setup,
30 30 ; Host port for gunicorn are controlled by gunicorn_conf.py
31 31 host = 127.0.0.1
32 32 port = 10020
33 33
34 34
35 35 ; ###########################
36 36 ; GUNICORN APPLICATION SERVER
37 37 ; ###########################
38 38
39 39 ; run with gunicorn --config gunicorn_conf.py --paste rhodecode.ini
40 40
41 41 ; Module to use, this setting shouldn't be changed
42 42 use = egg:gunicorn#main
43 43
44 44 ; Prefix middleware for RhodeCode.
45 45 ; recommended when using proxy setup.
46 46 ; allows to set RhodeCode under a prefix in server.
47 47 ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well.
48 48 ; And set your prefix like: `prefix = /custom_prefix`
49 49 ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need
50 50 ; to make your cookies only work on prefix url
51 51 [filter:proxy-prefix]
52 52 use = egg:PasteDeploy#prefix
53 53 prefix = /
54 54
55 55 [app:main]
56 56 ; The %(here)s variable will be replaced with the absolute path of parent directory
57 57 ; of this file
58 58 ; Each option in the app:main can be override by an environmental variable
59 59 ;
60 60 ;To override an option:
61 61 ;
62 62 ;RC_<KeyName>
63 63 ;Everything should be uppercase, . and - should be replaced by _.
64 64 ;For example, if you have these configuration settings:
65 65 ;rc_cache.repo_object.backend = foo
66 66 ;can be overridden by
67 67 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
68 68
69 69 use = egg:rhodecode-enterprise-ce
70 70
71 71 ; enable proxy prefix middleware, defined above
72 72 #filter-with = proxy-prefix
73 73
74 74 ; control if environmental variables to be expanded into the .ini settings
75 75 #rhodecode.env_expand = true
76 76
77 77 ; encryption key used to encrypt social plugin tokens,
78 78 ; remote_urls with credentials etc, if not set it defaults to
79 79 ; `beaker.session.secret`
80 80 #rhodecode.encrypted_values.secret =
81 81
82 82 ; decryption strict mode (enabled by default). It controls if decryption raises
83 83 ; `SignatureVerificationError` in case of wrong key, or damaged encryption data.
84 84 #rhodecode.encrypted_values.strict = false
85 85
86 86 ; Pick algorithm for encryption. Either fernet (more secure) or aes (default)
87 87 ; fernet is safer, and we strongly recommend switching to it.
88 88 ; Due to backward compatibility aes is used as default.
89 89 #rhodecode.encrypted_values.algorithm = fernet
90 90
91 91 ; Return gzipped responses from RhodeCode (static files/application)
92 92 gzip_responses = false
93 93
94 94 ; Auto-generate javascript routes file on startup
95 95 generate_js_files = false
96 96
97 97 ; System global default language.
98 98 ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh
99 99 lang = en
100 100
101 101 ; Perform a full repository scan and import on each server start.
102 102 ; Settings this to true could lead to very long startup time.
103 103 startup.import_repos = false
104 104
105 105 ; URL at which the application is running. This is used for Bootstrapping
106 106 ; requests in context when no web request is available. Used in ishell, or
107 107 ; SSH calls. Set this for events to receive proper url for SSH calls.
108 108 app.base_url = http://rhodecode.local
109 109
110 110 ; Host at which the Service API is running.
111 111 app.service_api.host = http://rhodecode.local:10020
112 112
113 113 ; Secret for Service API authentication.
114 114 app.service_api.token =
115 115
116 116 ; Unique application ID. Should be a random unique string for security.
117 117 app_instance_uuid = rc-production
118 118
119 119 ; Cut off limit for large diffs (size in bytes). If overall diff size on
120 120 ; commit, or pull request exceeds this limit this diff will be displayed
121 121 ; partially. E.g 512000 == 512Kb
122 122 cut_off_limit_diff = 512000
123 123
124 124 ; Cut off limit for large files inside diffs (size in bytes). Each individual
125 125 ; file inside diff which exceeds this limit will be displayed partially.
126 126 ; E.g 128000 == 128Kb
127 127 cut_off_limit_file = 128000
128 128
129 129 ; Use cached version of vcs repositories everywhere. Recommended to be `true`
130 130 vcs_full_cache = true
131 131
132 132 ; Force https in RhodeCode, fixes https redirects, assumes it's always https.
133 133 ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache
134 134 force_https = false
135 135
136 136 ; use Strict-Transport-Security headers
137 137 use_htsts = false
138 138
139 139 ; Set to true if your repos are exposed using the dumb protocol
140 140 git_update_server_info = false
141 141
142 142 ; RSS/ATOM feed options
143 143 rss_cut_off_limit = 256000
144 144 rss_items_per_page = 10
145 145 rss_include_diff = false
146 146
147 147 ; gist URL alias, used to create nicer urls for gist. This should be an
148 148 ; url that does rewrites to _admin/gists/{gistid}.
149 149 ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal
150 150 ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid}
151 151 gist_alias_url =
152 152
153 153 ; List of views (using glob pattern syntax) that AUTH TOKENS could be
154 154 ; used for access.
155 155 ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it
156 156 ; came from the the logged in user who own this authentication token.
157 157 ; Additionally @TOKEN syntax can be used to bound the view to specific
158 158 ; authentication token. Such view would be only accessible when used together
159 159 ; with this authentication token
160 160 ; list of all views can be found under `/_admin/permissions/auth_token_access`
161 161 ; The list should be "," separated and on a single line.
162 162 ; Most common views to enable:
163 163
164 164 # RepoCommitsView:repo_commit_download
165 165 # RepoCommitsView:repo_commit_patch
166 166 # RepoCommitsView:repo_commit_raw
167 167 # RepoCommitsView:repo_commit_raw@TOKEN
168 168 # RepoFilesView:repo_files_diff
169 169 # RepoFilesView:repo_archivefile
170 170 # RepoFilesView:repo_file_raw
171 171 # GistView:*
172 172 api_access_controllers_whitelist =
173 173
174 174 ; Default encoding used to convert from and to unicode
175 175 ; can be also a comma separated list of encoding in case of mixed encodings
176 176 default_encoding = UTF-8
177 177
178 178 ; instance-id prefix
179 179 ; a prefix key for this instance used for cache invalidation when running
180 180 ; multiple instances of RhodeCode, make sure it's globally unique for
181 181 ; all running RhodeCode instances. Leave empty if you don't use it
182 182 instance_id =
183 183
184 184 ; Fallback authentication plugin. Set this to a plugin ID to force the usage
185 185 ; of an authentication plugin also if it is disabled by it's settings.
186 186 ; This could be useful if you are unable to log in to the system due to broken
187 187 ; authentication settings. Then you can enable e.g. the internal RhodeCode auth
188 188 ; module to log in again and fix the settings.
189 189 ; Available builtin plugin IDs (hash is part of the ID):
190 190 ; egg:rhodecode-enterprise-ce#rhodecode
191 191 ; egg:rhodecode-enterprise-ce#pam
192 192 ; egg:rhodecode-enterprise-ce#ldap
193 193 ; egg:rhodecode-enterprise-ce#jasig_cas
194 194 ; egg:rhodecode-enterprise-ce#headers
195 195 ; egg:rhodecode-enterprise-ce#crowd
196 196
197 197 #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode
198 198
199 199 ; Flag to control loading of legacy plugins in py:/path format
200 200 auth_plugin.import_legacy_plugins = true
201 201
202 202 ; alternative return HTTP header for failed authentication. Default HTTP
203 203 ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with
204 204 ; handling that causing a series of failed authentication calls.
205 205 ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code
206 206 ; This will be served instead of default 401 on bad authentication
207 207 auth_ret_code =
208 208
209 209 ; use special detection method when serving auth_ret_code, instead of serving
210 210 ; ret_code directly, use 401 initially (Which triggers credentials prompt)
211 211 ; and then serve auth_ret_code to clients
212 212 auth_ret_code_detection = false
213 213
214 214 ; locking return code. When repository is locked return this HTTP code. 2XX
215 215 ; codes don't break the transactions while 4XX codes do
216 216 lock_ret_code = 423
217 217
218 218 ; Filesystem location were repositories should be stored
219 219 repo_store.path = /var/opt/rhodecode_repo_store
220 220
221 221 ; allows to setup custom hooks in settings page
222 222 allow_custom_hooks_settings = true
223 223
224 224 ; Generated license token required for EE edition license.
225 225 ; New generated token value can be found in Admin > settings > license page.
226 226 license_token =
227 227
228 228 ; This flag hides sensitive information on the license page such as token, and license data
229 229 license.hide_license_info = false
230 230
231 231 ; Import EE license from this license path
232 232 #license.import_path = %(here)s/rhodecode_enterprise.license
233 233
234 234 ; import license 'if-missing' or 'force' (always override)
235 235 ; if-missing means apply license if it doesn't exist. 'force' option always overrides it
236 236 license.import_path_mode = if-missing
237 237
238 238 ; supervisor connection uri, for managing supervisor and logs.
239 239 supervisor.uri =
240 240
241 241 ; supervisord group name/id we only want this RC instance to handle
242 242 supervisor.group_id = prod
243 243
244 244 ; Display extended labs settings
245 245 labs_settings_active = true
246 246
247 247 ; Custom exception store path, defaults to TMPDIR
248 248 ; This is used to store exception from RhodeCode in shared directory
249 249 #exception_tracker.store_path =
250 250
251 251 ; Send email with exception details when it happens
252 252 #exception_tracker.send_email = false
253 253
254 254 ; Comma separated list of recipients for exception emails,
255 255 ; e.g admin@rhodecode.com,devops@rhodecode.com
256 256 ; Can be left empty, then emails will be sent to ALL super-admins
257 257 #exception_tracker.send_email_recipients =
258 258
259 259 ; optional prefix to Add to email Subject
260 260 #exception_tracker.email_prefix = [RHODECODE ERROR]
261 261
262 262 ; NOTE: this setting IS DEPRECATED:
263 263 ; file_store backend is always enabled
264 264 #file_store.enabled = true
265 265
266 266 ; NOTE: this setting IS DEPRECATED:
267 267 ; file_store.backend = X -> use `file_store.backend.type = filesystem_v2` instead
268 268 ; Storage backend, available options are: local
269 269 #file_store.backend = local
270 270
271 271 ; NOTE: this setting IS DEPRECATED:
272 272 ; file_store.storage_path = X -> use `file_store.filesystem_v2.storage_path = X` instead
273 273 ; path to store the uploaded binaries and artifacts
274 274 #file_store.storage_path = /var/opt/rhodecode_data/file_store
275 275
276 276 ; Artifacts file-store, is used to store comment attachments and artifacts uploads.
277 277 ; file_store backend type: filesystem_v1, filesystem_v2 or objectstore (s3-based) are available as options
278 278 ; filesystem_v1 is backwards compat with pre 5.1 storage changes
279 279 ; new installations should choose filesystem_v2 or objectstore (s3-based), pick filesystem when migrating from
280 280 ; previous installations to keep the artifacts without a need of migration
281 281 #file_store.backend.type = filesystem_v2
282 282
283 283 ; filesystem options...
284 284 #file_store.filesystem_v1.storage_path = /var/opt/rhodecode_data/artifacts_file_store
285 285
286 286 ; filesystem_v2 options...
287 287 #file_store.filesystem_v2.storage_path = /var/opt/rhodecode_data/artifacts_file_store
288 288 #file_store.filesystem_v2.shards = 8
289 289
290 290 ; objectstore options...
291 291 ; url for s3 compatible storage that allows to upload artifacts
292 292 ; e.g http://minio:9000
293 293 #file_store.backend.type = objectstore
294 294 #file_store.objectstore.url = http://s3-minio:9000
295 295
296 296 ; a top-level bucket to put all other shards in
297 297 ; objects will be stored in rhodecode-file-store/shard-N based on the bucket_shards number
298 298 #file_store.objectstore.bucket = rhodecode-file-store
299 299
300 300 ; number of sharded buckets to create to distribute archives across
301 301 ; default is 8 shards
302 302 #file_store.objectstore.bucket_shards = 8
303 303
304 304 ; key for s3 auth
305 305 #file_store.objectstore.key = s3admin
306 306
307 307 ; secret for s3 auth
308 308 #file_store.objectstore.secret = s3secret4
309 309
310 310 ;region for s3 storage
311 311 #file_store.objectstore.region = eu-central-1
312 312
313 313 ; Redis url to acquire/check generation of archives locks
314 314 archive_cache.locking.url = redis://redis:6379/1
315 315
316 316 ; Storage backend, only 'filesystem' and 'objectstore' are available now
317 317 archive_cache.backend.type = filesystem
318 318
319 319 ; url for s3 compatible storage that allows to upload artifacts
320 320 ; e.g http://minio:9000
321 321 archive_cache.objectstore.url = http://s3-minio:9000
322 322
323 323 ; key for s3 auth
324 324 archive_cache.objectstore.key = key
325 325
326 326 ; secret for s3 auth
327 327 archive_cache.objectstore.secret = secret
328 328
329 329 ;region for s3 storage
330 330 archive_cache.objectstore.region = eu-central-1
331 331
332 332 ; number of sharded buckets to create to distribute archives across
333 333 ; default is 8 shards
334 334 archive_cache.objectstore.bucket_shards = 8
335 335
336 336 ; a top-level bucket to put all other shards in
337 337 ; objects will be stored in rhodecode-archive-cache/shard-N based on the bucket_shards number
338 338 archive_cache.objectstore.bucket = rhodecode-archive-cache
339 339
340 340 ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time
341 341 archive_cache.objectstore.retry = false
342 342
343 343 ; number of seconds to wait for next try using retry
344 344 archive_cache.objectstore.retry_backoff = 1
345 345
346 346 ; how many tries do do a retry fetch from this backend
347 347 archive_cache.objectstore.retry_attempts = 10
348 348
349 349 ; Default is $cache_dir/archive_cache if not set
350 350 ; Generated repo archives will be cached at this location
351 351 ; and served from the cache during subsequent requests for the same archive of
352 352 ; the repository. This path is important to be shared across filesystems and with
353 353 ; RhodeCode and vcsserver
354 354 archive_cache.filesystem.store_dir = /var/opt/rhodecode_data/archive_cache
355 355
356 356 ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb
357 357 archive_cache.filesystem.cache_size_gb = 40
358 358
359 359 ; Eviction policy used to clear out after cache_size_gb limit is reached
360 360 archive_cache.filesystem.eviction_policy = least-recently-stored
361 361
362 362 ; By default cache uses sharding technique, this specifies how many shards are there
363 363 ; default is 8 shards
364 364 archive_cache.filesystem.cache_shards = 8
365 365
366 366 ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time
367 367 archive_cache.filesystem.retry = false
368 368
369 369 ; number of seconds to wait for next try using retry
370 370 archive_cache.filesystem.retry_backoff = 1
371 371
372 372 ; how many tries do do a retry fetch from this backend
373 373 archive_cache.filesystem.retry_attempts = 10
374 374
375 375
376 376 ; #############
377 377 ; CELERY CONFIG
378 378 ; #############
379 379
380 380 ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini
381 381
382 382 use_celery = true
383 383
384 384 ; path to store schedule database
385 385 #celerybeat-schedule.path =
386 386
387 387 ; connection url to the message broker (default redis)
388 388 celery.broker_url = redis://redis:6379/8
389 389
390 390 ; results backend to get results for (default redis)
391 391 celery.result_backend = redis://redis:6379/8
392 392
393 393 ; rabbitmq example
394 394 #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost
395 395
396 396 ; maximum tasks to execute before worker restart
397 397 celery.max_tasks_per_child = 20
398 398
399 399 ; tasks will never be sent to the queue, but executed locally instead.
400 400 celery.task_always_eager = false
401 401
402 402 ; #############
403 403 ; DOGPILE CACHE
404 404 ; #############
405 405
406 406 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
407 407 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
408 408 cache_dir = /var/opt/rhodecode_data
409 409
410 410 ; *********************************************
411 411 ; `sql_cache_short` cache for heavy SQL queries
412 412 ; Only supported backend is `memory_lru`
413 413 ; *********************************************
414 414 rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru
415 415 rc_cache.sql_cache_short.expiration_time = 30
416 416
417 417
418 418 ; *****************************************************
419 419 ; `cache_repo_longterm` cache for repo object instances
420 420 ; Only supported backend is `memory_lru`
421 421 ; *****************************************************
422 422 rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru
423 423 ; by default we use 30 Days, cache is still invalidated on push
424 424 rc_cache.cache_repo_longterm.expiration_time = 2592000
425 425 ; max items in LRU cache, set to smaller number to save memory, and expire last used caches
426 426 rc_cache.cache_repo_longterm.max_size = 10000
427 427
428 428
429 429 ; *********************************************
430 430 ; `cache_general` cache for general purpose use
431 431 ; for simplicity use rc.file_namespace backend,
432 432 ; for performance and scale use rc.redis
433 433 ; *********************************************
434 434 rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace
435 435 rc_cache.cache_general.expiration_time = 43200
436 436 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
437 437 #rc_cache.cache_general.arguments.filename = /tmp/cache_general_db
438 438
439 439 ; alternative `cache_general` redis backend with distributed lock
440 440 #rc_cache.cache_general.backend = dogpile.cache.rc.redis
441 441 #rc_cache.cache_general.expiration_time = 300
442 442
443 443 ; redis_expiration_time needs to be greater then expiration_time
444 444 #rc_cache.cache_general.arguments.redis_expiration_time = 7200
445 445
446 446 #rc_cache.cache_general.arguments.host = localhost
447 447 #rc_cache.cache_general.arguments.port = 6379
448 448 #rc_cache.cache_general.arguments.db = 0
449 449 #rc_cache.cache_general.arguments.socket_timeout = 30
450 450 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
451 451 #rc_cache.cache_general.arguments.distributed_lock = true
452 452
453 453 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
454 454 #rc_cache.cache_general.arguments.lock_auto_renewal = true
455 455
456 ; prefix for redis keys used for this cache backend, the final key is constructed using {custom-prefix}{key}
457 #rc_cache.cache_general.arguments.key_prefix = custom-prefix-
458
459
456 460 ; *************************************************
457 461 ; `cache_perms` cache for permission tree, auth TTL
458 462 ; for simplicity use rc.file_namespace backend,
459 463 ; for performance and scale use rc.redis
460 464 ; *************************************************
461 465 rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace
462 466 rc_cache.cache_perms.expiration_time = 3600
463 467 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
464 468 #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms_db
465 469
466 470 ; alternative `cache_perms` redis backend with distributed lock
467 471 #rc_cache.cache_perms.backend = dogpile.cache.rc.redis
468 472 #rc_cache.cache_perms.expiration_time = 300
469 473
470 474 ; redis_expiration_time needs to be greater then expiration_time
471 475 #rc_cache.cache_perms.arguments.redis_expiration_time = 7200
472 476
473 477 #rc_cache.cache_perms.arguments.host = localhost
474 478 #rc_cache.cache_perms.arguments.port = 6379
475 479 #rc_cache.cache_perms.arguments.db = 0
476 480 #rc_cache.cache_perms.arguments.socket_timeout = 30
477 481 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
478 482 #rc_cache.cache_perms.arguments.distributed_lock = true
479 483
480 484 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
481 485 #rc_cache.cache_perms.arguments.lock_auto_renewal = true
482 486
487 ; prefix for redis keys used for this cache backend, the final key is constructed using {custom-prefix}{key}
488 #rc_cache.cache_perms.arguments.key_prefix = custom-prefix-
489
490
483 491 ; ***************************************************
484 492 ; `cache_repo` cache for file tree, Readme, RSS FEEDS
485 493 ; for simplicity use rc.file_namespace backend,
486 494 ; for performance and scale use rc.redis
487 495 ; ***************************************************
488 496 rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace
489 497 rc_cache.cache_repo.expiration_time = 2592000
490 498 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
491 499 #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo_db
492 500
493 501 ; alternative `cache_repo` redis backend with distributed lock
494 502 #rc_cache.cache_repo.backend = dogpile.cache.rc.redis
495 503 #rc_cache.cache_repo.expiration_time = 2592000
496 504
497 505 ; redis_expiration_time needs to be greater then expiration_time
498 506 #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400
499 507
500 508 #rc_cache.cache_repo.arguments.host = localhost
501 509 #rc_cache.cache_repo.arguments.port = 6379
502 510 #rc_cache.cache_repo.arguments.db = 1
503 511 #rc_cache.cache_repo.arguments.socket_timeout = 30
504 512 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
505 513 #rc_cache.cache_repo.arguments.distributed_lock = true
506 514
507 515 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
508 516 #rc_cache.cache_repo.arguments.lock_auto_renewal = true
509 517
518 ; prefix for redis keys used for this cache backend, the final key is constructed using {custom-prefix}{key}
519 #rc_cache.cache_repo.arguments.key_prefix = custom-prefix-
520
521
510 522 ; ##############
511 523 ; BEAKER SESSION
512 524 ; ##############
513 525
514 526 ; beaker.session.type is type of storage options for the logged users sessions. Current allowed
515 527 ; types are file, ext:redis, ext:database, ext:memcached
516 528 ; Fastest ones are ext:redis and ext:database, DO NOT use memory type for session
517 529 #beaker.session.type = file
518 530 #beaker.session.data_dir = /var/opt/rhodecode_data/sessions
519 531
520 532 ; Redis based sessions
521 533 beaker.session.type = ext:redis
522 534 beaker.session.url = redis://redis:6379/2
523 535
524 536 ; DB based session, fast, and allows easy management over logged in users
525 537 #beaker.session.type = ext:database
526 538 #beaker.session.table_name = db_session
527 539 #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode
528 540 #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode
529 541 #beaker.session.sa.pool_recycle = 3600
530 542 #beaker.session.sa.echo = false
531 543
532 544 beaker.session.key = rhodecode
533 545 beaker.session.secret = production-rc-uytcxaz
534 546 beaker.session.lock_dir = /data_ramdisk/lock
535 547
536 548 ; Secure encrypted cookie. Requires AES and AES python libraries
537 549 ; you must disable beaker.session.secret to use this
538 550 #beaker.session.encrypt_key = key_for_encryption
539 551 #beaker.session.validate_key = validation_key
540 552
541 553 ; Sets session as invalid (also logging out user) if it haven not been
542 554 ; accessed for given amount of time in seconds
543 555 beaker.session.timeout = 2592000
544 556 beaker.session.httponly = true
545 557
546 558 ; Path to use for the cookie. Set to prefix if you use prefix middleware
547 559 #beaker.session.cookie_path = /custom_prefix
548 560
549 561 ; Set https secure cookie
550 562 beaker.session.secure = false
551 563
552 564 ; default cookie expiration time in seconds, set to `true` to set expire
553 565 ; at browser close
554 566 #beaker.session.cookie_expires = 3600
555 567
556 568 ; #############################
557 569 ; SEARCH INDEXING CONFIGURATION
558 570 ; #############################
559 571
560 572 ; Full text search indexer is available in rhodecode-tools under
561 573 ; `rhodecode-tools index` command
562 574
563 575 ; WHOOSH Backend, doesn't require additional services to run
564 576 ; it works good with few dozen repos
565 577 search.module = rhodecode.lib.index.whoosh
566 578 search.location = %(here)s/data/index
567 579
568 580 ; ####################
569 581 ; CHANNELSTREAM CONFIG
570 582 ; ####################
571 583
572 584 ; channelstream enables persistent connections and live notification
573 585 ; in the system. It's also used by the chat system
574 586
575 587 channelstream.enabled = true
576 588
577 589 ; server address for channelstream server on the backend
578 590 channelstream.server = channelstream:9800
579 591
580 592 ; location of the channelstream server from outside world
581 593 ; use ws:// for http or wss:// for https. This address needs to be handled
582 594 ; by external HTTP server such as Nginx or Apache
583 595 ; see Nginx/Apache configuration examples in our docs
584 596 channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream
585 597 channelstream.secret = ENV_GENERATED
586 598 channelstream.history.location = /var/opt/rhodecode_data/channelstream_history
587 599
588 600 ; Internal application path that Javascript uses to connect into.
589 601 ; If you use proxy-prefix the prefix should be added before /_channelstream
590 602 channelstream.proxy_path = /_channelstream
591 603
592 604
593 605 ; ##############################
594 606 ; MAIN RHODECODE DATABASE CONFIG
595 607 ; ##############################
596 608
597 609 #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
598 610 #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
599 611 #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8
600 612 ; pymysql is an alternative driver for MySQL, use in case of problems with default one
601 613 #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode
602 614
603 615 sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
604 616
605 617 ; see sqlalchemy docs for other advanced settings
606 618 ; print the sql statements to output
607 619 sqlalchemy.db1.echo = false
608 620
609 621 ; recycle the connections after this amount of seconds
610 622 sqlalchemy.db1.pool_recycle = 3600
611 623
612 624 ; the number of connections to keep open inside the connection pool.
613 625 ; 0 indicates no limit
614 626 ; the general calculus with gevent is:
615 627 ; if your system allows 500 concurrent greenlets (max_connections) that all do database access,
616 628 ; then increase pool size + max overflow so that they add up to 500.
617 629 #sqlalchemy.db1.pool_size = 5
618 630
619 631 ; The number of connections to allow in connection pool "overflow", that is
620 632 ; connections that can be opened above and beyond the pool_size setting,
621 633 ; which defaults to five.
622 634 #sqlalchemy.db1.max_overflow = 10
623 635
624 636 ; Connection check ping, used to detect broken database connections
625 637 ; could be enabled to better handle cases if MySQL has gone away errors
626 638 #sqlalchemy.db1.ping_connection = true
627 639
628 640 ; ##########
629 641 ; VCS CONFIG
630 642 ; ##########
631 643 vcs.server.enable = true
632 644 vcs.server = vcsserver:10010
633 645
634 646 ; Web server connectivity protocol, responsible for web based VCS operations
635 647 ; Available protocols are:
636 648 ; `http` - use http-rpc backend (default)
637 649 vcs.server.protocol = http
638 650
639 651 ; Push/Pull operations protocol, available options are:
640 652 ; `http` - use http-rpc backend (default)
641 653 vcs.scm_app_implementation = http
642 654
643 655 ; Push/Pull operations hooks protocol, available options are:
644 656 ; `http` - use http-rpc backend (default)
645 657 ; `celery` - use celery based hooks
646 658 #DEPRECATED:vcs.hooks.protocol = http
647 659 vcs.hooks.protocol.v2 = celery
648 660
649 661 ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be
650 662 ; accessible via network.
651 663 ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker)
652 664 vcs.hooks.host = *
653 665
654 666 ; Start VCSServer with this instance as a subprocess, useful for development
655 667 vcs.start_server = false
656 668
657 669 ; List of enabled VCS backends, available options are:
658 670 ; `hg` - mercurial
659 671 ; `git` - git
660 672 ; `svn` - subversion
661 673 vcs.backends = hg, git, svn
662 674
663 675 ; Wait this number of seconds before killing connection to the vcsserver
664 676 vcs.connection_timeout = 3600
665 677
666 678 ; Cache flag to cache vcsserver remote calls locally
667 679 ; It uses cache_region `cache_repo`
668 680 vcs.methods.cache = true
669 681
670 682 ; Filesystem location where Git lfs objects should be stored
671 683 vcs.git.lfs.storage_location = /var/opt/rhodecode_repo_store/.cache/git_lfs_store
672 684
673 685 ; Filesystem location where Mercurial largefile objects should be stored
674 686 vcs.hg.largefiles.storage_location = /var/opt/rhodecode_repo_store/.cache/hg_largefiles_store
675 687
676 688 ; ####################################################
677 689 ; Subversion proxy support (mod_dav_svn)
678 690 ; Maps RhodeCode repo groups into SVN paths for Apache
679 691 ; ####################################################
680 692
681 693 ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out.
682 694 ; Set a numeric version for your current SVN e.g 1.8, or 1.12
683 695 ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible
684 696 #vcs.svn.compatible_version = 1.8
685 697
686 698 ; Redis connection settings for svn integrations logic
687 699 ; This connection string needs to be the same on ce and vcsserver
688 700 vcs.svn.redis_conn = redis://redis:6379/0
689 701
690 702 ; Enable SVN proxy of requests over HTTP
691 703 vcs.svn.proxy.enabled = true
692 704
693 705 ; host to connect to running SVN subsystem
694 706 vcs.svn.proxy.host = http://svn:8090
695 707
696 708 ; Enable or disable the config file generation.
697 709 svn.proxy.generate_config = true
698 710
699 711 ; Generate config file with `SVNListParentPath` set to `On`.
700 712 svn.proxy.list_parent_path = true
701 713
702 714 ; Set location and file name of generated config file.
703 715 svn.proxy.config_file_path = /etc/rhodecode/conf/svn/mod_dav_svn.conf
704 716
705 717 ; alternative mod_dav config template. This needs to be a valid mako template
706 718 ; Example template can be found in the source code:
707 719 ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako
708 720 #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako
709 721
710 722 ; Used as a prefix to the `Location` block in the generated config file.
711 723 ; In most cases it should be set to `/`.
712 724 svn.proxy.location_root = /
713 725
714 726 ; Command to reload the mod dav svn configuration on change.
715 727 ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh
716 728 ; Make sure user who runs RhodeCode process is allowed to reload Apache
717 729 #svn.proxy.reload_cmd = /etc/init.d/apache2 reload
718 730
719 731 ; If the timeout expires before the reload command finishes, the command will
720 732 ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds.
721 733 #svn.proxy.reload_timeout = 10
722 734
723 735 ; ####################
724 736 ; SSH Support Settings
725 737 ; ####################
726 738
727 739 ; Defines if a custom authorized_keys file should be created and written on
728 740 ; any change user ssh keys. Setting this to false also disables possibility
729 741 ; of adding SSH keys by users from web interface. Super admins can still
730 742 ; manage SSH Keys.
731 743 ssh.generate_authorized_keyfile = true
732 744
733 745 ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding`
734 746 # ssh.authorized_keys_ssh_opts =
735 747
736 748 ; Path to the authorized_keys file where the generate entries are placed.
737 749 ; It is possible to have multiple key files specified in `sshd_config` e.g.
738 750 ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode
739 751 ssh.authorized_keys_file_path = /etc/rhodecode/conf/ssh/authorized_keys_rhodecode
740 752
741 753 ; Command to execute the SSH wrapper. The binary is available in the
742 754 ; RhodeCode installation directory.
743 755 ; legacy: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper
744 756 ; new rewrite: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2
745 757 #DEPRECATED: ssh.wrapper_cmd = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper
746 758 ssh.wrapper_cmd.v2 = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2
747 759
748 760 ; Allow shell when executing the ssh-wrapper command
749 761 ssh.wrapper_cmd_allow_shell = false
750 762
751 763 ; Enables logging, and detailed output send back to the client during SSH
752 764 ; operations. Useful for debugging, shouldn't be used in production.
753 765 ssh.enable_debug_logging = false
754 766
755 767 ; Paths to binary executable, by default they are the names, but we can
756 768 ; override them if we want to use a custom one
757 769 ssh.executable.hg = /usr/local/bin/rhodecode_bin/vcs_bin/hg
758 770 ssh.executable.git = /usr/local/bin/rhodecode_bin/vcs_bin/git
759 771 ssh.executable.svn = /usr/local/bin/rhodecode_bin/vcs_bin/svnserve
760 772
761 773 ; Enables SSH key generator web interface. Disabling this still allows users
762 774 ; to add their own keys.
763 775 ssh.enable_ui_key_generator = true
764 776
765 777 ; Statsd client config, this is used to send metrics to statsd
766 778 ; We recommend setting statsd_exported and scrape them using Prometheus
767 779 #statsd.enabled = false
768 780 #statsd.statsd_host = 0.0.0.0
769 781 #statsd.statsd_port = 8125
770 782 #statsd.statsd_prefix =
771 783 #statsd.statsd_ipv6 = false
772 784
773 785 ; configure logging automatically at server startup set to false
774 786 ; to use the below custom logging config.
775 787 ; RC_LOGGING_FORMATTER
776 788 ; RC_LOGGING_LEVEL
777 789 ; env variables can control the settings for logging in case of autoconfigure
778 790
779 791 #logging.autoconfigure = true
780 792
781 793 ; specify your own custom logging config file to configure logging
782 794 #logging.logging_conf_file = /path/to/custom_logging.ini
783 795
784 796 ; Dummy marker to add new entries after.
785 797 ; Add any custom entries below. Please don't remove this marker.
786 798 custom.conf = 1
787 799
788 800
789 801 ; #####################
790 802 ; LOGGING CONFIGURATION
791 803 ; #####################
792 804
793 805 [loggers]
794 806 keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper
795 807
796 808 [handlers]
797 809 keys = console, console_sql
798 810
799 811 [formatters]
800 812 keys = generic, json, color_formatter, color_formatter_sql
801 813
802 814 ; #######
803 815 ; LOGGERS
804 816 ; #######
805 817 [logger_root]
806 818 level = NOTSET
807 819 handlers = console
808 820
809 821 [logger_sqlalchemy]
810 822 level = INFO
811 823 handlers = console_sql
812 824 qualname = sqlalchemy.engine
813 825 propagate = 0
814 826
815 827 [logger_beaker]
816 828 level = DEBUG
817 829 handlers =
818 830 qualname = beaker.container
819 831 propagate = 1
820 832
821 833 [logger_rhodecode]
822 834 level = DEBUG
823 835 handlers =
824 836 qualname = rhodecode
825 837 propagate = 1
826 838
827 839 [logger_ssh_wrapper]
828 840 level = DEBUG
829 841 handlers =
830 842 qualname = ssh_wrapper
831 843 propagate = 1
832 844
833 845 [logger_celery]
834 846 level = DEBUG
835 847 handlers =
836 848 qualname = celery
837 849
838 850
839 851 ; ########
840 852 ; HANDLERS
841 853 ; ########
842 854
843 855 [handler_console]
844 856 class = StreamHandler
845 857 args = (sys.stderr, )
846 858 level = INFO
847 859 ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json'
848 860 ; This allows sending properly formatted logs to grafana loki or elasticsearch
849 861 formatter = generic
850 862
851 863 [handler_console_sql]
852 864 ; "level = DEBUG" logs SQL queries and results.
853 865 ; "level = INFO" logs SQL queries.
854 866 ; "level = WARN" logs neither. (Recommended for production systems.)
855 867 class = StreamHandler
856 868 args = (sys.stderr, )
857 869 level = WARN
858 870 ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json'
859 871 ; This allows sending properly formatted logs to grafana loki or elasticsearch
860 872 formatter = generic
861 873
862 874 ; ##########
863 875 ; FORMATTERS
864 876 ; ##########
865 877
866 878 [formatter_generic]
867 879 class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter
868 880 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
869 881 datefmt = %Y-%m-%d %H:%M:%S
870 882
871 883 [formatter_color_formatter]
872 884 class = rhodecode.lib.logging_formatter.ColorFormatter
873 885 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
874 886 datefmt = %Y-%m-%d %H:%M:%S
875 887
876 888 [formatter_color_formatter_sql]
877 889 class = rhodecode.lib.logging_formatter.ColorFormatterSql
878 890 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
879 891 datefmt = %Y-%m-%d %H:%M:%S
880 892
881 893 [formatter_json]
882 894 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
883 895 class = rhodecode.lib._vendor.jsonlogger.JsonFormatter
@@ -1,342 +1,345 b''
1 1 # Copyright (C) 2015-2023 RhodeCode GmbH
2 2 #
3 3 # This program is free software: you can redistribute it and/or modify
4 4 # it under the terms of the GNU Affero General Public License, version 3
5 5 # (only), as published by the Free Software Foundation.
6 6 #
7 7 # This program is distributed in the hope that it will be useful,
8 8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 10 # GNU General Public License for more details.
11 11 #
12 12 # You should have received a copy of the GNU Affero General Public License
13 13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
14 14 #
15 15 # This program is dual-licensed. If you wish to learn more about the
16 16 # RhodeCode Enterprise Edition, including its added features, Support services,
17 17 # and proprietary license terms, please see https://rhodecode.com/licenses/
18 18
19 19 import errno
20 20 import fcntl
21 21 import functools
22 22 import logging
23 23 import os
24 24 import pickle
25 25 import time
26 26
27 27 import gevent
28 28 import msgpack
29 29 import redis
30 30
31 31 flock_org = fcntl.flock
32 32 from typing import Union
33 33
34 34 from dogpile.cache.api import Deserializer, Serializer
35 35 from dogpile.cache.backends import file as file_backend
36 36 from dogpile.cache.backends import memory as memory_backend
37 37 from dogpile.cache.backends import redis as redis_backend
38 38 from dogpile.cache.backends.file import FileLock
39 39 from dogpile.cache.util import memoized_property
40 40
41 41 from ...lib.memory_lru_dict import LRUDict, LRUDictDebug
42 42 from ...lib.str_utils import safe_bytes, safe_str
43 43 from ...lib.type_utils import str2bool
44 44
45 45 _default_max_size = 1024
46 46
47 47 log = logging.getLogger(__name__)
48 48
49 49
50 50 class LRUMemoryBackend(memory_backend.MemoryBackend):
51 51 key_prefix = 'lru_mem_backend'
52 52 pickle_values = False
53 53
54 54 def __init__(self, arguments):
55 55 self.max_size = arguments.pop('max_size', _default_max_size)
56 56
57 57 LRUDictClass = LRUDict
58 58 if arguments.pop('log_key_count', None):
59 59 LRUDictClass = LRUDictDebug
60 60
61 61 arguments['cache_dict'] = LRUDictClass(self.max_size)
62 62 super().__init__(arguments)
63 63
64 64 def __repr__(self):
65 65 return f'{self.__class__}(maxsize=`{self.max_size}`)'
66 66
67 67 def __str__(self):
68 68 return self.__repr__()
69 69
70 70 def delete(self, key):
71 71 try:
72 72 del self._cache[key]
73 73 except KeyError:
74 74 # we don't care if key isn't there at deletion
75 75 pass
76 76
77 77 def list_keys(self, prefix):
78 78 return list(self._cache.keys())
79 79
80 80 def delete_multi(self, keys):
81 81 for key in keys:
82 82 self.delete(key)
83 83
84 84 def delete_multi_by_prefix(self, prefix):
85 85 cache_keys = self.list_keys(prefix=prefix)
86 86 num_affected_keys = len(cache_keys)
87 87 if num_affected_keys:
88 88 self.delete_multi(cache_keys)
89 89 return num_affected_keys
90 90
91 91
92 92 class PickleSerializer:
93 93 serializer: None | Serializer = staticmethod( # type: ignore
94 94 functools.partial(pickle.dumps, protocol=pickle.HIGHEST_PROTOCOL)
95 95 )
96 96 deserializer: None | Deserializer = staticmethod( # type: ignore
97 97 functools.partial(pickle.loads)
98 98 )
99 99
100 100
101 101 class MsgPackSerializer:
102 102 serializer: None | Serializer = staticmethod( # type: ignore
103 103 msgpack.packb
104 104 )
105 105 deserializer: None | Deserializer = staticmethod( # type: ignore
106 106 functools.partial(msgpack.unpackb, use_list=False)
107 107 )
108 108
109 109
110 110 class CustomLockFactory(FileLock):
111 111
112 112 @memoized_property
113 113 def _module(self):
114 114
115 115 def gevent_flock(fd, operation):
116 116 """
117 117 Gevent compatible flock
118 118 """
119 119 # set non-blocking, this will cause an exception if we cannot acquire a lock
120 120 operation |= fcntl.LOCK_NB
121 121 start_lock_time = time.time()
122 122 timeout = 60 * 15 # 15min
123 123 while True:
124 124 try:
125 125 flock_org(fd, operation)
126 126 # lock has been acquired
127 127 break
128 128 except (OSError, IOError) as e:
129 129 # raise on other errors than Resource temporarily unavailable
130 130 if e.errno != errno.EAGAIN:
131 131 raise
132 132 elif (time.time() - start_lock_time) > timeout:
133 133 # waited to much time on a lock, better fail than loop for ever
134 134 log.error('Failed to acquire lock on `%s` after waiting %ss',
135 135 self.filename, timeout)
136 136 raise
137 137 wait_timeout = 0.03
138 138 log.debug('Failed to acquire lock on `%s`, retry in %ss',
139 139 self.filename, wait_timeout)
140 140 gevent.sleep(wait_timeout)
141 141
142 142 fcntl.flock = gevent_flock
143 143 return fcntl
144 144
145 145
146 146 class FileNamespaceBackend(PickleSerializer, file_backend.DBMBackend):
147 147 key_prefix = 'file_backend'
148 148
149 149 def __init__(self, arguments):
150 150 arguments['lock_factory'] = CustomLockFactory
151 151 db_file = arguments.get('filename')
152 152
153 153 log.debug('initialing cache-backend=%s db in %s', self.__class__.__name__, db_file)
154 154 db_file_dir = os.path.dirname(db_file)
155 155 if not os.path.isdir(db_file_dir):
156 156 os.makedirs(db_file_dir)
157 157
158 158 try:
159 159 super().__init__(arguments)
160 160 except Exception:
161 161 log.exception('Failed to initialize db at: %s', db_file)
162 162 raise
163 163
164 164 def __repr__(self):
165 165 return f'{self.__class__}(file=`{self.filename}`)'
166 166
167 167 def __str__(self):
168 168 return self.__repr__()
169 169
170 170 def _get_keys_pattern(self, prefix: bytes = b''):
171 171 return b'%b:%b' % (safe_bytes(self.key_prefix), safe_bytes(prefix))
172 172
173 173 def list_keys(self, prefix: bytes = b''):
174 174 prefix = self._get_keys_pattern(prefix)
175 175
176 176 def cond(dbm_key: bytes):
177 177 if not prefix:
178 178 return True
179 179
180 180 if dbm_key.startswith(prefix):
181 181 return True
182 182 return False
183 183
184 184 with self._dbm_file(True) as dbm:
185 185 try:
186 186 return list(filter(cond, dbm.keys()))
187 187 except Exception:
188 188 log.error('Failed to fetch DBM keys from DB: %s', self.get_store())
189 189 raise
190 190
191 191 def delete_multi_by_prefix(self, prefix):
192 192 cache_keys = self.list_keys(prefix=prefix)
193 193 num_affected_keys = len(cache_keys)
194 194 if num_affected_keys:
195 195 self.delete_multi(cache_keys)
196 196 return num_affected_keys
197 197
198 198 def get_store(self):
199 199 return self.filename
200 200
201 201 def cleanup_store(self):
202 202 for ext in ("db", "dat", "pag", "dir"):
203 203 final_filename = self.filename + os.extsep + ext
204 204 if os.path.exists(final_filename):
205 205 os.remove(final_filename)
206 206 log.warning('Removed dbm file %s', final_filename)
207 207
208 208
209 209 class BaseRedisBackend(redis_backend.RedisBackend):
210 210 key_prefix = ''
211 211
212 212 def __init__(self, arguments):
213 213 self.db_conn = arguments.get('host', '') or arguments.get('url', '') or 'redis-host'
214 214 super().__init__(arguments)
215 215
216 216 self._lock_timeout = self.lock_timeout
217 217 self._lock_auto_renewal = str2bool(arguments.pop("lock_auto_renewal", True))
218 218
219 self._store_key_prefix = arguments.pop('key_prefix', '')
220 self.key_prefix = f'{self._store_key_prefix}{self.key_prefix}'
221
219 222 if self._lock_auto_renewal and not self._lock_timeout:
220 223 # set default timeout for auto_renewal
221 224 self._lock_timeout = 30
222 225
223 226 def __repr__(self):
224 227 return f'{self.__class__}(conn=`{self.db_conn}`)'
225 228
226 229 def __str__(self):
227 230 return self.__repr__()
228 231
229 232 def _create_client(self):
230 233 args = {}
231 234
232 235 if self.url is not None:
233 236 args.update(url=self.url)
234 237
235 238 else:
236 239 args.update(
237 240 host=self.host, password=self.password,
238 241 port=self.port, db=self.db
239 242 )
240 243
241 244 connection_pool = redis.ConnectionPool(**args)
242 245 self.writer_client = redis.StrictRedis(
243 246 connection_pool=connection_pool
244 247 )
245 248 self.reader_client = self.writer_client
246 249
247 250 def _get_keys_pattern(self, prefix: bytes = b''):
248 251 return b'%b:%b*' % (safe_bytes(self.key_prefix), safe_bytes(prefix))
249 252
250 253 def list_keys(self, prefix: bytes = b''):
251 254 prefix = self._get_keys_pattern(prefix)
252 255 return self.reader_client.keys(prefix)
253 256
254 257 def delete_multi_by_prefix(self, prefix, use_lua=False):
255 258 if use_lua:
256 259 # high efficient LUA script to delete ALL keys by prefix...
257 260 lua = """local keys = redis.call('keys', ARGV[1])
258 261 for i=1,#keys,5000 do
259 262 redis.call('del', unpack(keys, i, math.min(i+(5000-1), #keys)))
260 263 end
261 264 return #keys"""
262 265 num_affected_keys = self.writer_client.eval(
263 266 lua,
264 267 0,
265 268 f"{prefix}*")
266 269 else:
267 270 cache_keys = self.list_keys(prefix=prefix)
268 271 num_affected_keys = len(cache_keys)
269 272 if num_affected_keys:
270 273 self.delete_multi(cache_keys)
271 274 return num_affected_keys
272 275
273 276 def get_store(self):
274 277 return self.reader_client.connection_pool
275 278
276 279 def get_mutex(self, key):
277 280 if self.distributed_lock:
278 lock_key = f'_lock_{safe_str(key)}'
281 lock_key = f'{self._store_key_prefix}_lock_{safe_str(key)}'
279 282 return get_mutex_lock(
280 283 self.writer_client, lock_key,
281 284 self._lock_timeout,
282 285 auto_renewal=self._lock_auto_renewal
283 286 )
284 287 else:
285 288 return None
286 289
287 290
288 291 class RedisPickleBackend(PickleSerializer, BaseRedisBackend):
289 292 key_prefix = 'redis_pickle_backend'
290 293 pass
291 294
292 295
293 296 class RedisMsgPackBackend(MsgPackSerializer, BaseRedisBackend):
294 297 key_prefix = 'redis_msgpack_backend'
295 298 pass
296 299
297 300
298 301 def get_mutex_lock(client, lock_key, lock_timeout, auto_renewal=False):
299 302 from ...lib._vendor import redis_lock
300 303
301 304 class _RedisLockWrapper:
302 305 """LockWrapper for redis_lock"""
303 306
304 307 @classmethod
305 308 def get_lock(cls):
306 309 return redis_lock.Lock(
307 310 redis_client=client,
308 311 name=lock_key,
309 312 expire=lock_timeout,
310 313 auto_renewal=auto_renewal,
311 314 strict=True,
312 315 )
313 316
314 317 def __repr__(self):
315 318 return f"{self.__class__.__name__}:{lock_key}"
316 319
317 320 def __str__(self):
318 321 return f"{self.__class__.__name__}:{lock_key}"
319 322
320 323 def __init__(self):
321 324 self.lock = self.get_lock()
322 325 self.lock_key = lock_key
323 326
324 327 def acquire(self, wait=True):
325 328 log.debug('Trying to acquire Redis lock for key %s', self.lock_key)
326 329 try:
327 330 acquired = self.lock.acquire(wait)
328 331 log.debug('Got lock for key %s, %s', self.lock_key, acquired)
329 332 return acquired
330 333 except redis_lock.AlreadyAcquired:
331 334 return False
332 335 except redis_lock.AlreadyStarted:
333 336 # refresh thread exists, but it also means we acquired the lock
334 337 return True
335 338
336 339 def release(self):
337 340 try:
338 341 self.lock.release()
339 342 except redis_lock.NotAcquired:
340 343 pass
341 344
342 345 return _RedisLockWrapper()
General Comments 0
You need to be logged in to leave comments. Login now