##// END OF EJS Templates
feat(archive-cache): objectstore now makes bucket required, and use shards as folders inside it
super-admin -
r5447:ba9c215d default
parent child Browse files
Show More
@@ -1,850 +1,849 b''
1 1
2 2 ; #########################################
3 3 ; RHODECODE COMMUNITY EDITION CONFIGURATION
4 4 ; #########################################
5 5
6 6 [DEFAULT]
7 7 ; Debug flag sets all loggers to debug, and enables request tracking
8 8 debug = true
9 9
10 10 ; ########################################################################
11 11 ; EMAIL CONFIGURATION
12 12 ; These settings will be used by the RhodeCode mailing system
13 13 ; ########################################################################
14 14
15 15 ; prefix all emails subjects with given prefix, helps filtering out emails
16 16 #email_prefix = [RhodeCode]
17 17
18 18 ; email FROM address all mails will be sent
19 19 #app_email_from = rhodecode-noreply@localhost
20 20
21 21 #smtp_server = mail.server.com
22 22 #smtp_username =
23 23 #smtp_password =
24 24 #smtp_port =
25 25 #smtp_use_tls = false
26 26 #smtp_use_ssl = true
27 27
28 28 [server:main]
29 29 ; COMMON HOST/IP CONFIG, This applies mostly to develop setup,
30 30 ; Host port for gunicorn are controlled by gunicorn_conf.py
31 31 host = 127.0.0.1
32 32 port = 10020
33 33
34 34
35 35 ; ###########################
36 36 ; GUNICORN APPLICATION SERVER
37 37 ; ###########################
38 38
39 39 ; run with gunicorn --paste rhodecode.ini --config gunicorn_conf.py
40 40
41 41 ; Module to use, this setting shouldn't be changed
42 42 use = egg:gunicorn#main
43 43
44 44 ; Prefix middleware for RhodeCode.
45 45 ; recommended when using proxy setup.
46 46 ; allows to set RhodeCode under a prefix in server.
47 47 ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well.
48 48 ; And set your prefix like: `prefix = /custom_prefix`
49 49 ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need
50 50 ; to make your cookies only work on prefix url
51 51 [filter:proxy-prefix]
52 52 use = egg:PasteDeploy#prefix
53 53 prefix = /
54 54
55 55 [app:main]
56 56 ; The %(here)s variable will be replaced with the absolute path of parent directory
57 57 ; of this file
58 58 ; Each option in the app:main can be override by an environmental variable
59 59 ;
60 60 ;To override an option:
61 61 ;
62 62 ;RC_<KeyName>
63 63 ;Everything should be uppercase, . and - should be replaced by _.
64 64 ;For example, if you have these configuration settings:
65 65 ;rc_cache.repo_object.backend = foo
66 66 ;can be overridden by
67 67 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
68 68
69 69 use = egg:rhodecode-enterprise-ce
70 70
71 71 ; enable proxy prefix middleware, defined above
72 72 #filter-with = proxy-prefix
73 73
74 74 ; #############
75 75 ; DEBUG OPTIONS
76 76 ; #############
77 77
78 78 pyramid.reload_templates = true
79 79
80 80 # During development the we want to have the debug toolbar enabled
81 81 pyramid.includes =
82 82 pyramid_debugtoolbar
83 83
84 84 debugtoolbar.hosts = 0.0.0.0/0
85 85 debugtoolbar.exclude_prefixes =
86 86 /css
87 87 /fonts
88 88 /images
89 89 /js
90 90
91 91 ## RHODECODE PLUGINS ##
92 92 rhodecode.includes =
93 93 rhodecode.api
94 94
95 95
96 96 # api prefix url
97 97 rhodecode.api.url = /_admin/api
98 98
99 99 ; enable debug style page
100 100 debug_style = true
101 101
102 102 ; #################
103 103 ; END DEBUG OPTIONS
104 104 ; #################
105 105
106 106 ; encryption key used to encrypt social plugin tokens,
107 107 ; remote_urls with credentials etc, if not set it defaults to
108 108 ; `beaker.session.secret`
109 109 #rhodecode.encrypted_values.secret =
110 110
111 111 ; decryption strict mode (enabled by default). It controls if decryption raises
112 112 ; `SignatureVerificationError` in case of wrong key, or damaged encryption data.
113 113 #rhodecode.encrypted_values.strict = false
114 114
115 115 ; Pick algorithm for encryption. Either fernet (more secure) or aes (default)
116 116 ; fernet is safer, and we strongly recommend switching to it.
117 117 ; Due to backward compatibility aes is used as default.
118 118 #rhodecode.encrypted_values.algorithm = fernet
119 119
120 120 ; Return gzipped responses from RhodeCode (static files/application)
121 121 gzip_responses = false
122 122
123 123 ; Auto-generate javascript routes file on startup
124 124 generate_js_files = false
125 125
126 126 ; System global default language.
127 127 ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh
128 128 lang = en
129 129
130 130 ; Perform a full repository scan and import on each server start.
131 131 ; Settings this to true could lead to very long startup time.
132 132 startup.import_repos = false
133 133
134 134 ; URL at which the application is running. This is used for Bootstrapping
135 135 ; requests in context when no web request is available. Used in ishell, or
136 136 ; SSH calls. Set this for events to receive proper url for SSH calls.
137 137 app.base_url = http://rhodecode.local
138 138
139 139 ; Host at which the Service API is running.
140 140 app.service_api.host = http://rhodecode.local:10020
141 141
142 142 ; Secret for Service API authentication.
143 143 app.service_api.token =
144 144
145 145 ; Unique application ID. Should be a random unique string for security.
146 146 app_instance_uuid = rc-production
147 147
148 148 ; Cut off limit for large diffs (size in bytes). If overall diff size on
149 149 ; commit, or pull request exceeds this limit this diff will be displayed
150 150 ; partially. E.g 512000 == 512Kb
151 151 cut_off_limit_diff = 512000
152 152
153 153 ; Cut off limit for large files inside diffs (size in bytes). Each individual
154 154 ; file inside diff which exceeds this limit will be displayed partially.
155 155 ; E.g 128000 == 128Kb
156 156 cut_off_limit_file = 128000
157 157
158 158 ; Use cached version of vcs repositories everywhere. Recommended to be `true`
159 159 vcs_full_cache = true
160 160
161 161 ; Force https in RhodeCode, fixes https redirects, assumes it's always https.
162 162 ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache
163 163 force_https = false
164 164
165 165 ; use Strict-Transport-Security headers
166 166 use_htsts = false
167 167
168 168 ; Set to true if your repos are exposed using the dumb protocol
169 169 git_update_server_info = false
170 170
171 171 ; RSS/ATOM feed options
172 172 rss_cut_off_limit = 256000
173 173 rss_items_per_page = 10
174 174 rss_include_diff = false
175 175
176 176 ; gist URL alias, used to create nicer urls for gist. This should be an
177 177 ; url that does rewrites to _admin/gists/{gistid}.
178 178 ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal
179 179 ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid}
180 180 gist_alias_url =
181 181
182 182 ; List of views (using glob pattern syntax) that AUTH TOKENS could be
183 183 ; used for access.
184 184 ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it
185 185 ; came from the the logged in user who own this authentication token.
186 186 ; Additionally @TOKEN syntax can be used to bound the view to specific
187 187 ; authentication token. Such view would be only accessible when used together
188 188 ; with this authentication token
189 189 ; list of all views can be found under `/_admin/permissions/auth_token_access`
190 190 ; The list should be "," separated and on a single line.
191 191 ; Most common views to enable:
192 192
193 193 # RepoCommitsView:repo_commit_download
194 194 # RepoCommitsView:repo_commit_patch
195 195 # RepoCommitsView:repo_commit_raw
196 196 # RepoCommitsView:repo_commit_raw@TOKEN
197 197 # RepoFilesView:repo_files_diff
198 198 # RepoFilesView:repo_archivefile
199 199 # RepoFilesView:repo_file_raw
200 200 # GistView:*
201 201 api_access_controllers_whitelist =
202 202
203 203 ; Default encoding used to convert from and to unicode
204 204 ; can be also a comma separated list of encoding in case of mixed encodings
205 205 default_encoding = UTF-8
206 206
207 207 ; instance-id prefix
208 208 ; a prefix key for this instance used for cache invalidation when running
209 209 ; multiple instances of RhodeCode, make sure it's globally unique for
210 210 ; all running RhodeCode instances. Leave empty if you don't use it
211 211 instance_id =
212 212
213 213 ; Fallback authentication plugin. Set this to a plugin ID to force the usage
214 214 ; of an authentication plugin also if it is disabled by it's settings.
215 215 ; This could be useful if you are unable to log in to the system due to broken
216 216 ; authentication settings. Then you can enable e.g. the internal RhodeCode auth
217 217 ; module to log in again and fix the settings.
218 218 ; Available builtin plugin IDs (hash is part of the ID):
219 219 ; egg:rhodecode-enterprise-ce#rhodecode
220 220 ; egg:rhodecode-enterprise-ce#pam
221 221 ; egg:rhodecode-enterprise-ce#ldap
222 222 ; egg:rhodecode-enterprise-ce#jasig_cas
223 223 ; egg:rhodecode-enterprise-ce#headers
224 224 ; egg:rhodecode-enterprise-ce#crowd
225 225
226 226 #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode
227 227
228 228 ; Flag to control loading of legacy plugins in py:/path format
229 229 auth_plugin.import_legacy_plugins = true
230 230
231 231 ; alternative return HTTP header for failed authentication. Default HTTP
232 232 ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with
233 233 ; handling that causing a series of failed authentication calls.
234 234 ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code
235 235 ; This will be served instead of default 401 on bad authentication
236 236 auth_ret_code =
237 237
238 238 ; use special detection method when serving auth_ret_code, instead of serving
239 239 ; ret_code directly, use 401 initially (Which triggers credentials prompt)
240 240 ; and then serve auth_ret_code to clients
241 241 auth_ret_code_detection = false
242 242
243 243 ; locking return code. When repository is locked return this HTTP code. 2XX
244 244 ; codes don't break the transactions while 4XX codes do
245 245 lock_ret_code = 423
246 246
247 247 ; Filesystem location were repositories should be stored
248 248 repo_store.path = /var/opt/rhodecode_repo_store
249 249
250 250 ; allows to setup custom hooks in settings page
251 251 allow_custom_hooks_settings = true
252 252
253 253 ; Generated license token required for EE edition license.
254 254 ; New generated token value can be found in Admin > settings > license page.
255 255 license_token =
256 256
257 257 ; This flag hides sensitive information on the license page such as token, and license data
258 258 license.hide_license_info = false
259 259
260 260 ; supervisor connection uri, for managing supervisor and logs.
261 261 supervisor.uri =
262 262
263 263 ; supervisord group name/id we only want this RC instance to handle
264 264 supervisor.group_id = dev
265 265
266 266 ; Display extended labs settings
267 267 labs_settings_active = true
268 268
269 269 ; Custom exception store path, defaults to TMPDIR
270 270 ; This is used to store exception from RhodeCode in shared directory
271 271 #exception_tracker.store_path =
272 272
273 273 ; Send email with exception details when it happens
274 274 #exception_tracker.send_email = false
275 275
276 276 ; Comma separated list of recipients for exception emails,
277 277 ; e.g admin@rhodecode.com,devops@rhodecode.com
278 278 ; Can be left empty, then emails will be sent to ALL super-admins
279 279 #exception_tracker.send_email_recipients =
280 280
281 281 ; optional prefix to Add to email Subject
282 282 #exception_tracker.email_prefix = [RHODECODE ERROR]
283 283
284 284 ; File store configuration. This is used to store and serve uploaded files
285 285 file_store.enabled = true
286 286
287 287 ; Storage backend, available options are: local
288 288 file_store.backend = local
289 289
290 290 ; path to store the uploaded binaries and artifacts
291 291 file_store.storage_path = /var/opt/rhodecode_data/file_store
292 292
293 293
294 294 ; Redis url to acquire/check generation of archives locks
295 295 archive_cache.locking.url = redis://redis:6379/1
296 296
297 297 ; Storage backend, only 'filesystem' and 'objectstore' are available now
298 298 archive_cache.backend.type = filesystem
299 299
300 300 ; url for s3 compatible storage that allows to upload artifacts
301 301 ; e.g http://minio:9000
302 302 archive_cache.objectstore.url = http://s3-minio:9000
303 303
304 304 ; key for s3 auth
305 305 archive_cache.objectstore.key = key
306 306
307 307 ; secret for s3 auth
308 308 archive_cache.objectstore.secret = secret
309 309
310 310 ; number of sharded buckets to create to distribute archives across
311 311 ; default is 8 shards
312 312 archive_cache.objectstore.bucket_shards = 8
313 313
314 ; a top-level bucket to put all other sharded buckets in
315 ; in case it's empty all buckets will be created in top-level (not recommended)
316 ; objects will be stored in rhodecode-archive-cache/shard-bucket-N based on the bucket_shards number
317 archive_cache.objectstore.bucket_root = rhodecode-archive-cache
314 ; a top-level bucket to put all other shards in
315 ; objects will be stored in rhodecode-archive-cache/shard-N based on the bucket_shards number
316 archive_cache.objectstore.bucket = rhodecode-archive-cache
318 317
319 318 ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time
320 319 archive_cache.objectstore.retry = false
321 320
322 321 ; number of seconds to wait for next try using retry
323 322 archive_cache.objectstore.retry_backoff = 1
324 323
325 324 ; how many tries do do a retry fetch from this backend
326 325 archive_cache.objectstore.retry_attempts = 10
327 326
328 327 ; Default is $cache_dir/archive_cache if not set
329 328 ; Generated repo archives will be cached at this location
330 329 ; and served from the cache during subsequent requests for the same archive of
331 330 ; the repository. This path is important to be shared across filesystems and with
332 331 ; RhodeCode and vcsserver
333 332 archive_cache.filesystem.store_dir = /var/opt/rhodecode_data/archive_cache
334 333
335 334 ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb
336 335 archive_cache.filesystem.cache_size_gb = 1
337 336
338 337 ; Eviction policy used to clear out after cache_size_gb limit is reached
339 338 archive_cache.filesystem.eviction_policy = least-recently-stored
340 339
341 340 ; By default cache uses sharding technique, this specifies how many shards are there
342 341 ; default is 8 shards
343 342 archive_cache.filesystem.cache_shards = 8
344 343
345 344 ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time
346 345 archive_cache.filesystem.retry = false
347 346
348 347 ; number of seconds to wait for next try using retry
349 348 archive_cache.filesystem.retry_backoff = 1
350 349
351 350 ; how many tries do do a retry fetch from this backend
352 351 archive_cache.filesystem.retry_attempts = 10
353 352
354 353
355 354 ; #############
356 355 ; CELERY CONFIG
357 356 ; #############
358 357
359 358 ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini
360 359
361 360 use_celery = true
362 361
363 362 ; path to store schedule database
364 363 #celerybeat-schedule.path =
365 364
366 365 ; connection url to the message broker (default redis)
367 366 celery.broker_url = redis://redis:6379/8
368 367
369 368 ; results backend to get results for (default redis)
370 369 celery.result_backend = redis://redis:6379/8
371 370
372 371 ; rabbitmq example
373 372 #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost
374 373
375 374 ; maximum tasks to execute before worker restart
376 375 celery.max_tasks_per_child = 20
377 376
378 377 ; tasks will never be sent to the queue, but executed locally instead.
379 378 celery.task_always_eager = false
380 379
381 380 ; #############
382 381 ; DOGPILE CACHE
383 382 ; #############
384 383
385 384 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
386 385 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
387 386 cache_dir = /var/opt/rhodecode_data
388 387
389 388 ; *********************************************
390 389 ; `sql_cache_short` cache for heavy SQL queries
391 390 ; Only supported backend is `memory_lru`
392 391 ; *********************************************
393 392 rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru
394 393 rc_cache.sql_cache_short.expiration_time = 30
395 394
396 395
397 396 ; *****************************************************
398 397 ; `cache_repo_longterm` cache for repo object instances
399 398 ; Only supported backend is `memory_lru`
400 399 ; *****************************************************
401 400 rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru
402 401 ; by default we use 30 Days, cache is still invalidated on push
403 402 rc_cache.cache_repo_longterm.expiration_time = 2592000
404 403 ; max items in LRU cache, set to smaller number to save memory, and expire last used caches
405 404 rc_cache.cache_repo_longterm.max_size = 10000
406 405
407 406
408 407 ; *********************************************
409 408 ; `cache_general` cache for general purpose use
410 409 ; for simplicity use rc.file_namespace backend,
411 410 ; for performance and scale use rc.redis
412 411 ; *********************************************
413 412 rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace
414 413 rc_cache.cache_general.expiration_time = 43200
415 414 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
416 415 #rc_cache.cache_general.arguments.filename = /tmp/cache_general_db
417 416
418 417 ; alternative `cache_general` redis backend with distributed lock
419 418 #rc_cache.cache_general.backend = dogpile.cache.rc.redis
420 419 #rc_cache.cache_general.expiration_time = 300
421 420
422 421 ; redis_expiration_time needs to be greater then expiration_time
423 422 #rc_cache.cache_general.arguments.redis_expiration_time = 7200
424 423
425 424 #rc_cache.cache_general.arguments.host = localhost
426 425 #rc_cache.cache_general.arguments.port = 6379
427 426 #rc_cache.cache_general.arguments.db = 0
428 427 #rc_cache.cache_general.arguments.socket_timeout = 30
429 428 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
430 429 #rc_cache.cache_general.arguments.distributed_lock = true
431 430
432 431 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
433 432 #rc_cache.cache_general.arguments.lock_auto_renewal = true
434 433
435 434 ; *************************************************
436 435 ; `cache_perms` cache for permission tree, auth TTL
437 436 ; for simplicity use rc.file_namespace backend,
438 437 ; for performance and scale use rc.redis
439 438 ; *************************************************
440 439 rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace
441 440 rc_cache.cache_perms.expiration_time = 3600
442 441 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
443 442 #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms_db
444 443
445 444 ; alternative `cache_perms` redis backend with distributed lock
446 445 #rc_cache.cache_perms.backend = dogpile.cache.rc.redis
447 446 #rc_cache.cache_perms.expiration_time = 300
448 447
449 448 ; redis_expiration_time needs to be greater then expiration_time
450 449 #rc_cache.cache_perms.arguments.redis_expiration_time = 7200
451 450
452 451 #rc_cache.cache_perms.arguments.host = localhost
453 452 #rc_cache.cache_perms.arguments.port = 6379
454 453 #rc_cache.cache_perms.arguments.db = 0
455 454 #rc_cache.cache_perms.arguments.socket_timeout = 30
456 455 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
457 456 #rc_cache.cache_perms.arguments.distributed_lock = true
458 457
459 458 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
460 459 #rc_cache.cache_perms.arguments.lock_auto_renewal = true
461 460
462 461 ; ***************************************************
463 462 ; `cache_repo` cache for file tree, Readme, RSS FEEDS
464 463 ; for simplicity use rc.file_namespace backend,
465 464 ; for performance and scale use rc.redis
466 465 ; ***************************************************
467 466 rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace
468 467 rc_cache.cache_repo.expiration_time = 2592000
469 468 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
470 469 #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo_db
471 470
472 471 ; alternative `cache_repo` redis backend with distributed lock
473 472 #rc_cache.cache_repo.backend = dogpile.cache.rc.redis
474 473 #rc_cache.cache_repo.expiration_time = 2592000
475 474
476 475 ; redis_expiration_time needs to be greater then expiration_time
477 476 #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400
478 477
479 478 #rc_cache.cache_repo.arguments.host = localhost
480 479 #rc_cache.cache_repo.arguments.port = 6379
481 480 #rc_cache.cache_repo.arguments.db = 1
482 481 #rc_cache.cache_repo.arguments.socket_timeout = 30
483 482 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
484 483 #rc_cache.cache_repo.arguments.distributed_lock = true
485 484
486 485 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
487 486 #rc_cache.cache_repo.arguments.lock_auto_renewal = true
488 487
489 488 ; ##############
490 489 ; BEAKER SESSION
491 490 ; ##############
492 491
493 492 ; beaker.session.type is type of storage options for the logged users sessions. Current allowed
494 493 ; types are file, ext:redis, ext:database, ext:memcached
495 494 ; Fastest ones are ext:redis and ext:database, DO NOT use memory type for session
496 495 #beaker.session.type = file
497 496 #beaker.session.data_dir = %(here)s/data/sessions
498 497
499 498 ; Redis based sessions
500 499 beaker.session.type = ext:redis
501 500 beaker.session.url = redis://redis:6379/2
502 501
503 502 ; DB based session, fast, and allows easy management over logged in users
504 503 #beaker.session.type = ext:database
505 504 #beaker.session.table_name = db_session
506 505 #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode
507 506 #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode
508 507 #beaker.session.sa.pool_recycle = 3600
509 508 #beaker.session.sa.echo = false
510 509
511 510 beaker.session.key = rhodecode
512 511 beaker.session.secret = develop-rc-uytcxaz
513 512 beaker.session.lock_dir = /data_ramdisk/lock
514 513
515 514 ; Secure encrypted cookie. Requires AES and AES python libraries
516 515 ; you must disable beaker.session.secret to use this
517 516 #beaker.session.encrypt_key = key_for_encryption
518 517 #beaker.session.validate_key = validation_key
519 518
520 519 ; Sets session as invalid (also logging out user) if it haven not been
521 520 ; accessed for given amount of time in seconds
522 521 beaker.session.timeout = 2592000
523 522 beaker.session.httponly = true
524 523
525 524 ; Path to use for the cookie. Set to prefix if you use prefix middleware
526 525 #beaker.session.cookie_path = /custom_prefix
527 526
528 527 ; Set https secure cookie
529 528 beaker.session.secure = false
530 529
531 530 ; default cookie expiration time in seconds, set to `true` to set expire
532 531 ; at browser close
533 532 #beaker.session.cookie_expires = 3600
534 533
535 534 ; #############################
536 535 ; SEARCH INDEXING CONFIGURATION
537 536 ; #############################
538 537
539 538 ; Full text search indexer is available in rhodecode-tools under
540 539 ; `rhodecode-tools index` command
541 540
542 541 ; WHOOSH Backend, doesn't require additional services to run
543 542 ; it works good with few dozen repos
544 543 search.module = rhodecode.lib.index.whoosh
545 544 search.location = %(here)s/data/index
546 545
547 546 ; ####################
548 547 ; CHANNELSTREAM CONFIG
549 548 ; ####################
550 549
551 550 ; channelstream enables persistent connections and live notification
552 551 ; in the system. It's also used by the chat system
553 552
554 553 channelstream.enabled = true
555 554
556 555 ; server address for channelstream server on the backend
557 556 channelstream.server = channelstream:9800
558 557
559 558 ; location of the channelstream server from outside world
560 559 ; use ws:// for http or wss:// for https. This address needs to be handled
561 560 ; by external HTTP server such as Nginx or Apache
562 561 ; see Nginx/Apache configuration examples in our docs
563 562 channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream
564 563 channelstream.secret = ENV_GENERATED
565 564 channelstream.history.location = /var/opt/rhodecode_data/channelstream_history
566 565
567 566 ; Internal application path that Javascript uses to connect into.
568 567 ; If you use proxy-prefix the prefix should be added before /_channelstream
569 568 channelstream.proxy_path = /_channelstream
570 569
571 570
572 571 ; ##############################
573 572 ; MAIN RHODECODE DATABASE CONFIG
574 573 ; ##############################
575 574
576 575 #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
577 576 #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
578 577 #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8
579 578 ; pymysql is an alternative driver for MySQL, use in case of problems with default one
580 579 #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode
581 580
582 581 sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
583 582
584 583 ; see sqlalchemy docs for other advanced settings
585 584 ; print the sql statements to output
586 585 sqlalchemy.db1.echo = false
587 586
588 587 ; recycle the connections after this amount of seconds
589 588 sqlalchemy.db1.pool_recycle = 3600
590 589
591 590 ; the number of connections to keep open inside the connection pool.
592 591 ; 0 indicates no limit
593 592 ; the general calculus with gevent is:
594 593 ; if your system allows 500 concurrent greenlets (max_connections) that all do database access,
595 594 ; then increase pool size + max overflow so that they add up to 500.
596 595 #sqlalchemy.db1.pool_size = 5
597 596
598 597 ; The number of connections to allow in connection pool "overflow", that is
599 598 ; connections that can be opened above and beyond the pool_size setting,
600 599 ; which defaults to five.
601 600 #sqlalchemy.db1.max_overflow = 10
602 601
603 602 ; Connection check ping, used to detect broken database connections
604 603 ; could be enabled to better handle cases if MySQL has gone away errors
605 604 #sqlalchemy.db1.ping_connection = true
606 605
607 606 ; ##########
608 607 ; VCS CONFIG
609 608 ; ##########
610 609 vcs.server.enable = true
611 610 vcs.server = vcsserver:10010
612 611
613 612 ; Web server connectivity protocol, responsible for web based VCS operations
614 613 ; Available protocols are:
615 614 ; `http` - use http-rpc backend (default)
616 615 vcs.server.protocol = http
617 616
618 617 ; Push/Pull operations protocol, available options are:
619 618 ; `http` - use http-rpc backend (default)
620 619 vcs.scm_app_implementation = http
621 620
622 621 ; Push/Pull operations hooks protocol, available options are:
623 622 ; `http` - use http-rpc backend (default)
624 623 ; `celery` - use celery based hooks
625 624 vcs.hooks.protocol = http
626 625
627 626 ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be
628 627 ; accessible via network.
629 628 ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker)
630 629 vcs.hooks.host = *
631 630
632 631 ; Start VCSServer with this instance as a subprocess, useful for development
633 632 vcs.start_server = false
634 633
635 634 ; List of enabled VCS backends, available options are:
636 635 ; `hg` - mercurial
637 636 ; `git` - git
638 637 ; `svn` - subversion
639 638 vcs.backends = hg, git, svn
640 639
641 640 ; Wait this number of seconds before killing connection to the vcsserver
642 641 vcs.connection_timeout = 3600
643 642
644 643 ; Cache flag to cache vcsserver remote calls locally
645 644 ; It uses cache_region `cache_repo`
646 645 vcs.methods.cache = true
647 646
648 647 ; ####################################################
649 648 ; Subversion proxy support (mod_dav_svn)
650 649 ; Maps RhodeCode repo groups into SVN paths for Apache
651 650 ; ####################################################
652 651
653 652 ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out.
654 653 ; Set a numeric version for your current SVN e.g 1.8, or 1.12
655 654 ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible
656 655 #vcs.svn.compatible_version = 1.8
657 656
658 657 ; Enable SVN proxy of requests over HTTP
659 658 vcs.svn.proxy.enabled = true
660 659
661 660 ; host to connect to running SVN subsystem
662 661 vcs.svn.proxy.host = http://svn:8090
663 662
664 663 ; Enable or disable the config file generation.
665 664 svn.proxy.generate_config = true
666 665
667 666 ; Generate config file with `SVNListParentPath` set to `On`.
668 667 svn.proxy.list_parent_path = true
669 668
670 669 ; Set location and file name of generated config file.
671 670 svn.proxy.config_file_path = /etc/rhodecode/conf/svn/mod_dav_svn.conf
672 671
673 672 ; alternative mod_dav config template. This needs to be a valid mako template
674 673 ; Example template can be found in the source code:
675 674 ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako
676 675 #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako
677 676
678 677 ; Used as a prefix to the `Location` block in the generated config file.
679 678 ; In most cases it should be set to `/`.
680 679 svn.proxy.location_root = /
681 680
682 681 ; Command to reload the mod dav svn configuration on change.
683 682 ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh
684 683 ; Make sure user who runs RhodeCode process is allowed to reload Apache
685 684 #svn.proxy.reload_cmd = /etc/init.d/apache2 reload
686 685
687 686 ; If the timeout expires before the reload command finishes, the command will
688 687 ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds.
689 688 #svn.proxy.reload_timeout = 10
690 689
691 690 ; ####################
692 691 ; SSH Support Settings
693 692 ; ####################
694 693
695 694 ; Defines if a custom authorized_keys file should be created and written on
696 695 ; any change user ssh keys. Setting this to false also disables possibility
697 696 ; of adding SSH keys by users from web interface. Super admins can still
698 697 ; manage SSH Keys.
699 698 ssh.generate_authorized_keyfile = true
700 699
701 700 ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding`
702 701 # ssh.authorized_keys_ssh_opts =
703 702
704 703 ; Path to the authorized_keys file where the generate entries are placed.
705 704 ; It is possible to have multiple key files specified in `sshd_config` e.g.
706 705 ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode
707 706 ssh.authorized_keys_file_path = /etc/rhodecode/conf/ssh/authorized_keys_rhodecode
708 707
709 708 ; Command to execute the SSH wrapper. The binary is available in the
710 709 ; RhodeCode installation directory.
711 710 ; legacy: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper
712 711 ; new rewrite: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2
713 712 ssh.wrapper_cmd = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper
714 713
715 714 ; Allow shell when executing the ssh-wrapper command
716 715 ssh.wrapper_cmd_allow_shell = false
717 716
718 717 ; Enables logging, and detailed output send back to the client during SSH
719 718 ; operations. Useful for debugging, shouldn't be used in production.
720 719 ssh.enable_debug_logging = true
721 720
722 721 ; Paths to binary executable, by default they are the names, but we can
723 722 ; override them if we want to use a custom one
724 723 ssh.executable.hg = /usr/local/bin/rhodecode_bin/vcs_bin/hg
725 724 ssh.executable.git = /usr/local/bin/rhodecode_bin/vcs_bin/git
726 725 ssh.executable.svn = /usr/local/bin/rhodecode_bin/vcs_bin/svnserve
727 726
728 727 ; Enables SSH key generator web interface. Disabling this still allows users
729 728 ; to add their own keys.
730 729 ssh.enable_ui_key_generator = true
731 730
732 731 ; Statsd client config, this is used to send metrics to statsd
733 732 ; We recommend setting statsd_exported and scrape them using Prometheus
734 733 #statsd.enabled = false
735 734 #statsd.statsd_host = 0.0.0.0
736 735 #statsd.statsd_port = 8125
737 736 #statsd.statsd_prefix =
738 737 #statsd.statsd_ipv6 = false
739 738
740 739 ; configure logging automatically at server startup set to false
741 740 ; to use the below custom logging config.
742 741 ; RC_LOGGING_FORMATTER
743 742 ; RC_LOGGING_LEVEL
744 743 ; env variables can control the settings for logging in case of autoconfigure
745 744
746 745 #logging.autoconfigure = true
747 746
748 747 ; specify your own custom logging config file to configure logging
749 748 #logging.logging_conf_file = /path/to/custom_logging.ini
750 749
751 750 ; Dummy marker to add new entries after.
752 751 ; Add any custom entries below. Please don't remove this marker.
753 752 custom.conf = 1
754 753
755 754
756 755 ; #####################
757 756 ; LOGGING CONFIGURATION
758 757 ; #####################
759 758
760 759 [loggers]
761 760 keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper
762 761
763 762 [handlers]
764 763 keys = console, console_sql
765 764
766 765 [formatters]
767 766 keys = generic, json, color_formatter, color_formatter_sql
768 767
769 768 ; #######
770 769 ; LOGGERS
771 770 ; #######
772 771 [logger_root]
773 772 level = NOTSET
774 773 handlers = console
775 774
776 775 [logger_sqlalchemy]
777 776 level = INFO
778 777 handlers = console_sql
779 778 qualname = sqlalchemy.engine
780 779 propagate = 0
781 780
782 781 [logger_beaker]
783 782 level = DEBUG
784 783 handlers =
785 784 qualname = beaker.container
786 785 propagate = 1
787 786
788 787 [logger_rhodecode]
789 788 level = DEBUG
790 789 handlers =
791 790 qualname = rhodecode
792 791 propagate = 1
793 792
794 793 [logger_ssh_wrapper]
795 794 level = DEBUG
796 795 handlers =
797 796 qualname = ssh_wrapper
798 797 propagate = 1
799 798
800 799 [logger_celery]
801 800 level = DEBUG
802 801 handlers =
803 802 qualname = celery
804 803
805 804
806 805 ; ########
807 806 ; HANDLERS
808 807 ; ########
809 808
810 809 [handler_console]
811 810 class = StreamHandler
812 811 args = (sys.stderr, )
813 812 level = DEBUG
814 813 ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json'
815 814 ; This allows sending properly formatted logs to grafana loki or elasticsearch
816 815 formatter = color_formatter
817 816
818 817 [handler_console_sql]
819 818 ; "level = DEBUG" logs SQL queries and results.
820 819 ; "level = INFO" logs SQL queries.
821 820 ; "level = WARN" logs neither. (Recommended for production systems.)
822 821 class = StreamHandler
823 822 args = (sys.stderr, )
824 823 level = WARN
825 824 ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json'
826 825 ; This allows sending properly formatted logs to grafana loki or elasticsearch
827 826 formatter = color_formatter_sql
828 827
829 828 ; ##########
830 829 ; FORMATTERS
831 830 ; ##########
832 831
833 832 [formatter_generic]
834 833 class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter
835 834 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
836 835 datefmt = %Y-%m-%d %H:%M:%S
837 836
838 837 [formatter_color_formatter]
839 838 class = rhodecode.lib.logging_formatter.ColorFormatter
840 839 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
841 840 datefmt = %Y-%m-%d %H:%M:%S
842 841
843 842 [formatter_color_formatter_sql]
844 843 class = rhodecode.lib.logging_formatter.ColorFormatterSql
845 844 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
846 845 datefmt = %Y-%m-%d %H:%M:%S
847 846
848 847 [formatter_json]
849 848 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
850 849 class = rhodecode.lib._vendor.jsonlogger.JsonFormatter
@@ -1,818 +1,817 b''
1 1
2 2 ; #########################################
3 3 ; RHODECODE COMMUNITY EDITION CONFIGURATION
4 4 ; #########################################
5 5
6 6 [DEFAULT]
7 7 ; Debug flag sets all loggers to debug, and enables request tracking
8 8 debug = false
9 9
10 10 ; ########################################################################
11 11 ; EMAIL CONFIGURATION
12 12 ; These settings will be used by the RhodeCode mailing system
13 13 ; ########################################################################
14 14
15 15 ; prefix all emails subjects with given prefix, helps filtering out emails
16 16 #email_prefix = [RhodeCode]
17 17
18 18 ; email FROM address all mails will be sent
19 19 #app_email_from = rhodecode-noreply@localhost
20 20
21 21 #smtp_server = mail.server.com
22 22 #smtp_username =
23 23 #smtp_password =
24 24 #smtp_port =
25 25 #smtp_use_tls = false
26 26 #smtp_use_ssl = true
27 27
28 28 [server:main]
29 29 ; COMMON HOST/IP CONFIG, This applies mostly to develop setup,
30 30 ; Host port for gunicorn are controlled by gunicorn_conf.py
31 31 host = 127.0.0.1
32 32 port = 10020
33 33
34 34
35 35 ; ###########################
36 36 ; GUNICORN APPLICATION SERVER
37 37 ; ###########################
38 38
39 39 ; run with gunicorn --paste rhodecode.ini --config gunicorn_conf.py
40 40
41 41 ; Module to use, this setting shouldn't be changed
42 42 use = egg:gunicorn#main
43 43
44 44 ; Prefix middleware for RhodeCode.
45 45 ; recommended when using proxy setup.
46 46 ; allows to set RhodeCode under a prefix in server.
47 47 ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well.
48 48 ; And set your prefix like: `prefix = /custom_prefix`
49 49 ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need
50 50 ; to make your cookies only work on prefix url
51 51 [filter:proxy-prefix]
52 52 use = egg:PasteDeploy#prefix
53 53 prefix = /
54 54
55 55 [app:main]
56 56 ; The %(here)s variable will be replaced with the absolute path of parent directory
57 57 ; of this file
58 58 ; Each option in the app:main can be override by an environmental variable
59 59 ;
60 60 ;To override an option:
61 61 ;
62 62 ;RC_<KeyName>
63 63 ;Everything should be uppercase, . and - should be replaced by _.
64 64 ;For example, if you have these configuration settings:
65 65 ;rc_cache.repo_object.backend = foo
66 66 ;can be overridden by
67 67 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
68 68
69 69 use = egg:rhodecode-enterprise-ce
70 70
71 71 ; enable proxy prefix middleware, defined above
72 72 #filter-with = proxy-prefix
73 73
74 74 ; encryption key used to encrypt social plugin tokens,
75 75 ; remote_urls with credentials etc, if not set it defaults to
76 76 ; `beaker.session.secret`
77 77 #rhodecode.encrypted_values.secret =
78 78
79 79 ; decryption strict mode (enabled by default). It controls if decryption raises
80 80 ; `SignatureVerificationError` in case of wrong key, or damaged encryption data.
81 81 #rhodecode.encrypted_values.strict = false
82 82
83 83 ; Pick algorithm for encryption. Either fernet (more secure) or aes (default)
84 84 ; fernet is safer, and we strongly recommend switching to it.
85 85 ; Due to backward compatibility aes is used as default.
86 86 #rhodecode.encrypted_values.algorithm = fernet
87 87
88 88 ; Return gzipped responses from RhodeCode (static files/application)
89 89 gzip_responses = false
90 90
91 91 ; Auto-generate javascript routes file on startup
92 92 generate_js_files = false
93 93
94 94 ; System global default language.
95 95 ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh
96 96 lang = en
97 97
98 98 ; Perform a full repository scan and import on each server start.
99 99 ; Settings this to true could lead to very long startup time.
100 100 startup.import_repos = false
101 101
102 102 ; URL at which the application is running. This is used for Bootstrapping
103 103 ; requests in context when no web request is available. Used in ishell, or
104 104 ; SSH calls. Set this for events to receive proper url for SSH calls.
105 105 app.base_url = http://rhodecode.local
106 106
107 107 ; Host at which the Service API is running.
108 108 app.service_api.host = http://rhodecode.local:10020
109 109
110 110 ; Secret for Service API authentication.
111 111 app.service_api.token =
112 112
113 113 ; Unique application ID. Should be a random unique string for security.
114 114 app_instance_uuid = rc-production
115 115
116 116 ; Cut off limit for large diffs (size in bytes). If overall diff size on
117 117 ; commit, or pull request exceeds this limit this diff will be displayed
118 118 ; partially. E.g 512000 == 512Kb
119 119 cut_off_limit_diff = 512000
120 120
121 121 ; Cut off limit for large files inside diffs (size in bytes). Each individual
122 122 ; file inside diff which exceeds this limit will be displayed partially.
123 123 ; E.g 128000 == 128Kb
124 124 cut_off_limit_file = 128000
125 125
126 126 ; Use cached version of vcs repositories everywhere. Recommended to be `true`
127 127 vcs_full_cache = true
128 128
129 129 ; Force https in RhodeCode, fixes https redirects, assumes it's always https.
130 130 ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache
131 131 force_https = false
132 132
133 133 ; use Strict-Transport-Security headers
134 134 use_htsts = false
135 135
136 136 ; Set to true if your repos are exposed using the dumb protocol
137 137 git_update_server_info = false
138 138
139 139 ; RSS/ATOM feed options
140 140 rss_cut_off_limit = 256000
141 141 rss_items_per_page = 10
142 142 rss_include_diff = false
143 143
144 144 ; gist URL alias, used to create nicer urls for gist. This should be an
145 145 ; url that does rewrites to _admin/gists/{gistid}.
146 146 ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal
147 147 ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid}
148 148 gist_alias_url =
149 149
150 150 ; List of views (using glob pattern syntax) that AUTH TOKENS could be
151 151 ; used for access.
152 152 ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it
153 153 ; came from the the logged in user who own this authentication token.
154 154 ; Additionally @TOKEN syntax can be used to bound the view to specific
155 155 ; authentication token. Such view would be only accessible when used together
156 156 ; with this authentication token
157 157 ; list of all views can be found under `/_admin/permissions/auth_token_access`
158 158 ; The list should be "," separated and on a single line.
159 159 ; Most common views to enable:
160 160
161 161 # RepoCommitsView:repo_commit_download
162 162 # RepoCommitsView:repo_commit_patch
163 163 # RepoCommitsView:repo_commit_raw
164 164 # RepoCommitsView:repo_commit_raw@TOKEN
165 165 # RepoFilesView:repo_files_diff
166 166 # RepoFilesView:repo_archivefile
167 167 # RepoFilesView:repo_file_raw
168 168 # GistView:*
169 169 api_access_controllers_whitelist =
170 170
171 171 ; Default encoding used to convert from and to unicode
172 172 ; can be also a comma separated list of encoding in case of mixed encodings
173 173 default_encoding = UTF-8
174 174
175 175 ; instance-id prefix
176 176 ; a prefix key for this instance used for cache invalidation when running
177 177 ; multiple instances of RhodeCode, make sure it's globally unique for
178 178 ; all running RhodeCode instances. Leave empty if you don't use it
179 179 instance_id =
180 180
181 181 ; Fallback authentication plugin. Set this to a plugin ID to force the usage
182 182 ; of an authentication plugin also if it is disabled by it's settings.
183 183 ; This could be useful if you are unable to log in to the system due to broken
184 184 ; authentication settings. Then you can enable e.g. the internal RhodeCode auth
185 185 ; module to log in again and fix the settings.
186 186 ; Available builtin plugin IDs (hash is part of the ID):
187 187 ; egg:rhodecode-enterprise-ce#rhodecode
188 188 ; egg:rhodecode-enterprise-ce#pam
189 189 ; egg:rhodecode-enterprise-ce#ldap
190 190 ; egg:rhodecode-enterprise-ce#jasig_cas
191 191 ; egg:rhodecode-enterprise-ce#headers
192 192 ; egg:rhodecode-enterprise-ce#crowd
193 193
194 194 #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode
195 195
196 196 ; Flag to control loading of legacy plugins in py:/path format
197 197 auth_plugin.import_legacy_plugins = true
198 198
199 199 ; alternative return HTTP header for failed authentication. Default HTTP
200 200 ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with
201 201 ; handling that causing a series of failed authentication calls.
202 202 ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code
203 203 ; This will be served instead of default 401 on bad authentication
204 204 auth_ret_code =
205 205
206 206 ; use special detection method when serving auth_ret_code, instead of serving
207 207 ; ret_code directly, use 401 initially (Which triggers credentials prompt)
208 208 ; and then serve auth_ret_code to clients
209 209 auth_ret_code_detection = false
210 210
211 211 ; locking return code. When repository is locked return this HTTP code. 2XX
212 212 ; codes don't break the transactions while 4XX codes do
213 213 lock_ret_code = 423
214 214
215 215 ; Filesystem location were repositories should be stored
216 216 repo_store.path = /var/opt/rhodecode_repo_store
217 217
218 218 ; allows to setup custom hooks in settings page
219 219 allow_custom_hooks_settings = true
220 220
221 221 ; Generated license token required for EE edition license.
222 222 ; New generated token value can be found in Admin > settings > license page.
223 223 license_token =
224 224
225 225 ; This flag hides sensitive information on the license page such as token, and license data
226 226 license.hide_license_info = false
227 227
228 228 ; supervisor connection uri, for managing supervisor and logs.
229 229 supervisor.uri =
230 230
231 231 ; supervisord group name/id we only want this RC instance to handle
232 232 supervisor.group_id = prod
233 233
234 234 ; Display extended labs settings
235 235 labs_settings_active = true
236 236
237 237 ; Custom exception store path, defaults to TMPDIR
238 238 ; This is used to store exception from RhodeCode in shared directory
239 239 #exception_tracker.store_path =
240 240
241 241 ; Send email with exception details when it happens
242 242 #exception_tracker.send_email = false
243 243
244 244 ; Comma separated list of recipients for exception emails,
245 245 ; e.g admin@rhodecode.com,devops@rhodecode.com
246 246 ; Can be left empty, then emails will be sent to ALL super-admins
247 247 #exception_tracker.send_email_recipients =
248 248
249 249 ; optional prefix to Add to email Subject
250 250 #exception_tracker.email_prefix = [RHODECODE ERROR]
251 251
252 252 ; File store configuration. This is used to store and serve uploaded files
253 253 file_store.enabled = true
254 254
255 255 ; Storage backend, available options are: local
256 256 file_store.backend = local
257 257
258 258 ; path to store the uploaded binaries and artifacts
259 259 file_store.storage_path = /var/opt/rhodecode_data/file_store
260 260
261 261
262 262 ; Redis url to acquire/check generation of archives locks
263 263 archive_cache.locking.url = redis://redis:6379/1
264 264
265 265 ; Storage backend, only 'filesystem' and 'objectstore' are available now
266 266 archive_cache.backend.type = filesystem
267 267
268 268 ; url for s3 compatible storage that allows to upload artifacts
269 269 ; e.g http://minio:9000
270 270 archive_cache.objectstore.url = http://s3-minio:9000
271 271
272 272 ; key for s3 auth
273 273 archive_cache.objectstore.key = key
274 274
275 275 ; secret for s3 auth
276 276 archive_cache.objectstore.secret = secret
277 277
278 278 ; number of sharded buckets to create to distribute archives across
279 279 ; default is 8 shards
280 280 archive_cache.objectstore.bucket_shards = 8
281 281
282 ; a top-level bucket to put all other sharded buckets in
283 ; in case it's empty all buckets will be created in top-level (not recommended)
284 ; objects will be stored in rhodecode-archive-cache/shard-bucket-N based on the bucket_shards number
285 archive_cache.objectstore.bucket_root = rhodecode-archive-cache
282 ; a top-level bucket to put all other shards in
283 ; objects will be stored in rhodecode-archive-cache/shard-N based on the bucket_shards number
284 archive_cache.objectstore.bucket = rhodecode-archive-cache
286 285
287 286 ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time
288 287 archive_cache.objectstore.retry = false
289 288
290 289 ; number of seconds to wait for next try using retry
291 290 archive_cache.objectstore.retry_backoff = 1
292 291
293 292 ; how many tries do do a retry fetch from this backend
294 293 archive_cache.objectstore.retry_attempts = 10
295 294
296 295 ; Default is $cache_dir/archive_cache if not set
297 296 ; Generated repo archives will be cached at this location
298 297 ; and served from the cache during subsequent requests for the same archive of
299 298 ; the repository. This path is important to be shared across filesystems and with
300 299 ; RhodeCode and vcsserver
301 300 archive_cache.filesystem.store_dir = /var/opt/rhodecode_data/archive_cache
302 301
303 302 ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb
304 303 archive_cache.filesystem.cache_size_gb = 40
305 304
306 305 ; Eviction policy used to clear out after cache_size_gb limit is reached
307 306 archive_cache.filesystem.eviction_policy = least-recently-stored
308 307
309 308 ; By default cache uses sharding technique, this specifies how many shards are there
310 309 ; default is 8 shards
311 310 archive_cache.filesystem.cache_shards = 8
312 311
313 312 ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time
314 313 archive_cache.filesystem.retry = false
315 314
316 315 ; number of seconds to wait for next try using retry
317 316 archive_cache.filesystem.retry_backoff = 1
318 317
319 318 ; how many tries do do a retry fetch from this backend
320 319 archive_cache.filesystem.retry_attempts = 10
321 320
322 321
323 322 ; #############
324 323 ; CELERY CONFIG
325 324 ; #############
326 325
327 326 ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini
328 327
329 328 use_celery = true
330 329
331 330 ; path to store schedule database
332 331 #celerybeat-schedule.path =
333 332
334 333 ; connection url to the message broker (default redis)
335 334 celery.broker_url = redis://redis:6379/8
336 335
337 336 ; results backend to get results for (default redis)
338 337 celery.result_backend = redis://redis:6379/8
339 338
340 339 ; rabbitmq example
341 340 #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost
342 341
343 342 ; maximum tasks to execute before worker restart
344 343 celery.max_tasks_per_child = 20
345 344
346 345 ; tasks will never be sent to the queue, but executed locally instead.
347 346 celery.task_always_eager = false
348 347
349 348 ; #############
350 349 ; DOGPILE CACHE
351 350 ; #############
352 351
353 352 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
354 353 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
355 354 cache_dir = /var/opt/rhodecode_data
356 355
357 356 ; *********************************************
358 357 ; `sql_cache_short` cache for heavy SQL queries
359 358 ; Only supported backend is `memory_lru`
360 359 ; *********************************************
361 360 rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru
362 361 rc_cache.sql_cache_short.expiration_time = 30
363 362
364 363
365 364 ; *****************************************************
366 365 ; `cache_repo_longterm` cache for repo object instances
367 366 ; Only supported backend is `memory_lru`
368 367 ; *****************************************************
369 368 rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru
370 369 ; by default we use 30 Days, cache is still invalidated on push
371 370 rc_cache.cache_repo_longterm.expiration_time = 2592000
372 371 ; max items in LRU cache, set to smaller number to save memory, and expire last used caches
373 372 rc_cache.cache_repo_longterm.max_size = 10000
374 373
375 374
376 375 ; *********************************************
377 376 ; `cache_general` cache for general purpose use
378 377 ; for simplicity use rc.file_namespace backend,
379 378 ; for performance and scale use rc.redis
380 379 ; *********************************************
381 380 rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace
382 381 rc_cache.cache_general.expiration_time = 43200
383 382 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
384 383 #rc_cache.cache_general.arguments.filename = /tmp/cache_general_db
385 384
386 385 ; alternative `cache_general` redis backend with distributed lock
387 386 #rc_cache.cache_general.backend = dogpile.cache.rc.redis
388 387 #rc_cache.cache_general.expiration_time = 300
389 388
390 389 ; redis_expiration_time needs to be greater then expiration_time
391 390 #rc_cache.cache_general.arguments.redis_expiration_time = 7200
392 391
393 392 #rc_cache.cache_general.arguments.host = localhost
394 393 #rc_cache.cache_general.arguments.port = 6379
395 394 #rc_cache.cache_general.arguments.db = 0
396 395 #rc_cache.cache_general.arguments.socket_timeout = 30
397 396 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
398 397 #rc_cache.cache_general.arguments.distributed_lock = true
399 398
400 399 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
401 400 #rc_cache.cache_general.arguments.lock_auto_renewal = true
402 401
403 402 ; *************************************************
404 403 ; `cache_perms` cache for permission tree, auth TTL
405 404 ; for simplicity use rc.file_namespace backend,
406 405 ; for performance and scale use rc.redis
407 406 ; *************************************************
408 407 rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace
409 408 rc_cache.cache_perms.expiration_time = 3600
410 409 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
411 410 #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms_db
412 411
413 412 ; alternative `cache_perms` redis backend with distributed lock
414 413 #rc_cache.cache_perms.backend = dogpile.cache.rc.redis
415 414 #rc_cache.cache_perms.expiration_time = 300
416 415
417 416 ; redis_expiration_time needs to be greater then expiration_time
418 417 #rc_cache.cache_perms.arguments.redis_expiration_time = 7200
419 418
420 419 #rc_cache.cache_perms.arguments.host = localhost
421 420 #rc_cache.cache_perms.arguments.port = 6379
422 421 #rc_cache.cache_perms.arguments.db = 0
423 422 #rc_cache.cache_perms.arguments.socket_timeout = 30
424 423 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
425 424 #rc_cache.cache_perms.arguments.distributed_lock = true
426 425
427 426 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
428 427 #rc_cache.cache_perms.arguments.lock_auto_renewal = true
429 428
430 429 ; ***************************************************
431 430 ; `cache_repo` cache for file tree, Readme, RSS FEEDS
432 431 ; for simplicity use rc.file_namespace backend,
433 432 ; for performance and scale use rc.redis
434 433 ; ***************************************************
435 434 rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace
436 435 rc_cache.cache_repo.expiration_time = 2592000
437 436 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
438 437 #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo_db
439 438
440 439 ; alternative `cache_repo` redis backend with distributed lock
441 440 #rc_cache.cache_repo.backend = dogpile.cache.rc.redis
442 441 #rc_cache.cache_repo.expiration_time = 2592000
443 442
444 443 ; redis_expiration_time needs to be greater then expiration_time
445 444 #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400
446 445
447 446 #rc_cache.cache_repo.arguments.host = localhost
448 447 #rc_cache.cache_repo.arguments.port = 6379
449 448 #rc_cache.cache_repo.arguments.db = 1
450 449 #rc_cache.cache_repo.arguments.socket_timeout = 30
451 450 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
452 451 #rc_cache.cache_repo.arguments.distributed_lock = true
453 452
454 453 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
455 454 #rc_cache.cache_repo.arguments.lock_auto_renewal = true
456 455
457 456 ; ##############
458 457 ; BEAKER SESSION
459 458 ; ##############
460 459
461 460 ; beaker.session.type is type of storage options for the logged users sessions. Current allowed
462 461 ; types are file, ext:redis, ext:database, ext:memcached
463 462 ; Fastest ones are ext:redis and ext:database, DO NOT use memory type for session
464 463 #beaker.session.type = file
465 464 #beaker.session.data_dir = %(here)s/data/sessions
466 465
467 466 ; Redis based sessions
468 467 beaker.session.type = ext:redis
469 468 beaker.session.url = redis://redis:6379/2
470 469
471 470 ; DB based session, fast, and allows easy management over logged in users
472 471 #beaker.session.type = ext:database
473 472 #beaker.session.table_name = db_session
474 473 #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode
475 474 #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode
476 475 #beaker.session.sa.pool_recycle = 3600
477 476 #beaker.session.sa.echo = false
478 477
479 478 beaker.session.key = rhodecode
480 479 beaker.session.secret = production-rc-uytcxaz
481 480 beaker.session.lock_dir = /data_ramdisk/lock
482 481
483 482 ; Secure encrypted cookie. Requires AES and AES python libraries
484 483 ; you must disable beaker.session.secret to use this
485 484 #beaker.session.encrypt_key = key_for_encryption
486 485 #beaker.session.validate_key = validation_key
487 486
488 487 ; Sets session as invalid (also logging out user) if it haven not been
489 488 ; accessed for given amount of time in seconds
490 489 beaker.session.timeout = 2592000
491 490 beaker.session.httponly = true
492 491
493 492 ; Path to use for the cookie. Set to prefix if you use prefix middleware
494 493 #beaker.session.cookie_path = /custom_prefix
495 494
496 495 ; Set https secure cookie
497 496 beaker.session.secure = false
498 497
499 498 ; default cookie expiration time in seconds, set to `true` to set expire
500 499 ; at browser close
501 500 #beaker.session.cookie_expires = 3600
502 501
503 502 ; #############################
504 503 ; SEARCH INDEXING CONFIGURATION
505 504 ; #############################
506 505
507 506 ; Full text search indexer is available in rhodecode-tools under
508 507 ; `rhodecode-tools index` command
509 508
510 509 ; WHOOSH Backend, doesn't require additional services to run
511 510 ; it works good with few dozen repos
512 511 search.module = rhodecode.lib.index.whoosh
513 512 search.location = %(here)s/data/index
514 513
515 514 ; ####################
516 515 ; CHANNELSTREAM CONFIG
517 516 ; ####################
518 517
519 518 ; channelstream enables persistent connections and live notification
520 519 ; in the system. It's also used by the chat system
521 520
522 521 channelstream.enabled = true
523 522
524 523 ; server address for channelstream server on the backend
525 524 channelstream.server = channelstream:9800
526 525
527 526 ; location of the channelstream server from outside world
528 527 ; use ws:// for http or wss:// for https. This address needs to be handled
529 528 ; by external HTTP server such as Nginx or Apache
530 529 ; see Nginx/Apache configuration examples in our docs
531 530 channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream
532 531 channelstream.secret = ENV_GENERATED
533 532 channelstream.history.location = /var/opt/rhodecode_data/channelstream_history
534 533
535 534 ; Internal application path that Javascript uses to connect into.
536 535 ; If you use proxy-prefix the prefix should be added before /_channelstream
537 536 channelstream.proxy_path = /_channelstream
538 537
539 538
540 539 ; ##############################
541 540 ; MAIN RHODECODE DATABASE CONFIG
542 541 ; ##############################
543 542
544 543 #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
545 544 #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
546 545 #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8
547 546 ; pymysql is an alternative driver for MySQL, use in case of problems with default one
548 547 #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode
549 548
550 549 sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
551 550
552 551 ; see sqlalchemy docs for other advanced settings
553 552 ; print the sql statements to output
554 553 sqlalchemy.db1.echo = false
555 554
556 555 ; recycle the connections after this amount of seconds
557 556 sqlalchemy.db1.pool_recycle = 3600
558 557
559 558 ; the number of connections to keep open inside the connection pool.
560 559 ; 0 indicates no limit
561 560 ; the general calculus with gevent is:
562 561 ; if your system allows 500 concurrent greenlets (max_connections) that all do database access,
563 562 ; then increase pool size + max overflow so that they add up to 500.
564 563 #sqlalchemy.db1.pool_size = 5
565 564
566 565 ; The number of connections to allow in connection pool "overflow", that is
567 566 ; connections that can be opened above and beyond the pool_size setting,
568 567 ; which defaults to five.
569 568 #sqlalchemy.db1.max_overflow = 10
570 569
571 570 ; Connection check ping, used to detect broken database connections
572 571 ; could be enabled to better handle cases if MySQL has gone away errors
573 572 #sqlalchemy.db1.ping_connection = true
574 573
575 574 ; ##########
576 575 ; VCS CONFIG
577 576 ; ##########
578 577 vcs.server.enable = true
579 578 vcs.server = vcsserver:10010
580 579
581 580 ; Web server connectivity protocol, responsible for web based VCS operations
582 581 ; Available protocols are:
583 582 ; `http` - use http-rpc backend (default)
584 583 vcs.server.protocol = http
585 584
586 585 ; Push/Pull operations protocol, available options are:
587 586 ; `http` - use http-rpc backend (default)
588 587 vcs.scm_app_implementation = http
589 588
590 589 ; Push/Pull operations hooks protocol, available options are:
591 590 ; `http` - use http-rpc backend (default)
592 591 ; `celery` - use celery based hooks
593 592 vcs.hooks.protocol = http
594 593
595 594 ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be
596 595 ; accessible via network.
597 596 ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker)
598 597 vcs.hooks.host = *
599 598
600 599 ; Start VCSServer with this instance as a subprocess, useful for development
601 600 vcs.start_server = false
602 601
603 602 ; List of enabled VCS backends, available options are:
604 603 ; `hg` - mercurial
605 604 ; `git` - git
606 605 ; `svn` - subversion
607 606 vcs.backends = hg, git, svn
608 607
609 608 ; Wait this number of seconds before killing connection to the vcsserver
610 609 vcs.connection_timeout = 3600
611 610
612 611 ; Cache flag to cache vcsserver remote calls locally
613 612 ; It uses cache_region `cache_repo`
614 613 vcs.methods.cache = true
615 614
616 615 ; ####################################################
617 616 ; Subversion proxy support (mod_dav_svn)
618 617 ; Maps RhodeCode repo groups into SVN paths for Apache
619 618 ; ####################################################
620 619
621 620 ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out.
622 621 ; Set a numeric version for your current SVN e.g 1.8, or 1.12
623 622 ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible
624 623 #vcs.svn.compatible_version = 1.8
625 624
626 625 ; Enable SVN proxy of requests over HTTP
627 626 vcs.svn.proxy.enabled = true
628 627
629 628 ; host to connect to running SVN subsystem
630 629 vcs.svn.proxy.host = http://svn:8090
631 630
632 631 ; Enable or disable the config file generation.
633 632 svn.proxy.generate_config = true
634 633
635 634 ; Generate config file with `SVNListParentPath` set to `On`.
636 635 svn.proxy.list_parent_path = true
637 636
638 637 ; Set location and file name of generated config file.
639 638 svn.proxy.config_file_path = /etc/rhodecode/conf/svn/mod_dav_svn.conf
640 639
641 640 ; alternative mod_dav config template. This needs to be a valid mako template
642 641 ; Example template can be found in the source code:
643 642 ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako
644 643 #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako
645 644
646 645 ; Used as a prefix to the `Location` block in the generated config file.
647 646 ; In most cases it should be set to `/`.
648 647 svn.proxy.location_root = /
649 648
650 649 ; Command to reload the mod dav svn configuration on change.
651 650 ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh
652 651 ; Make sure user who runs RhodeCode process is allowed to reload Apache
653 652 #svn.proxy.reload_cmd = /etc/init.d/apache2 reload
654 653
655 654 ; If the timeout expires before the reload command finishes, the command will
656 655 ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds.
657 656 #svn.proxy.reload_timeout = 10
658 657
659 658 ; ####################
660 659 ; SSH Support Settings
661 660 ; ####################
662 661
663 662 ; Defines if a custom authorized_keys file should be created and written on
664 663 ; any change user ssh keys. Setting this to false also disables possibility
665 664 ; of adding SSH keys by users from web interface. Super admins can still
666 665 ; manage SSH Keys.
667 666 ssh.generate_authorized_keyfile = true
668 667
669 668 ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding`
670 669 # ssh.authorized_keys_ssh_opts =
671 670
672 671 ; Path to the authorized_keys file where the generate entries are placed.
673 672 ; It is possible to have multiple key files specified in `sshd_config` e.g.
674 673 ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode
675 674 ssh.authorized_keys_file_path = /etc/rhodecode/conf/ssh/authorized_keys_rhodecode
676 675
677 676 ; Command to execute the SSH wrapper. The binary is available in the
678 677 ; RhodeCode installation directory.
679 678 ; legacy: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper
680 679 ; new rewrite: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2
681 680 ssh.wrapper_cmd = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper
682 681
683 682 ; Allow shell when executing the ssh-wrapper command
684 683 ssh.wrapper_cmd_allow_shell = false
685 684
686 685 ; Enables logging, and detailed output send back to the client during SSH
687 686 ; operations. Useful for debugging, shouldn't be used in production.
688 687 ssh.enable_debug_logging = false
689 688
690 689 ; Paths to binary executable, by default they are the names, but we can
691 690 ; override them if we want to use a custom one
692 691 ssh.executable.hg = /usr/local/bin/rhodecode_bin/vcs_bin/hg
693 692 ssh.executable.git = /usr/local/bin/rhodecode_bin/vcs_bin/git
694 693 ssh.executable.svn = /usr/local/bin/rhodecode_bin/vcs_bin/svnserve
695 694
696 695 ; Enables SSH key generator web interface. Disabling this still allows users
697 696 ; to add their own keys.
698 697 ssh.enable_ui_key_generator = true
699 698
700 699 ; Statsd client config, this is used to send metrics to statsd
701 700 ; We recommend setting statsd_exported and scrape them using Prometheus
702 701 #statsd.enabled = false
703 702 #statsd.statsd_host = 0.0.0.0
704 703 #statsd.statsd_port = 8125
705 704 #statsd.statsd_prefix =
706 705 #statsd.statsd_ipv6 = false
707 706
708 707 ; configure logging automatically at server startup set to false
709 708 ; to use the below custom logging config.
710 709 ; RC_LOGGING_FORMATTER
711 710 ; RC_LOGGING_LEVEL
712 711 ; env variables can control the settings for logging in case of autoconfigure
713 712
714 713 #logging.autoconfigure = true
715 714
716 715 ; specify your own custom logging config file to configure logging
717 716 #logging.logging_conf_file = /path/to/custom_logging.ini
718 717
719 718 ; Dummy marker to add new entries after.
720 719 ; Add any custom entries below. Please don't remove this marker.
721 720 custom.conf = 1
722 721
723 722
724 723 ; #####################
725 724 ; LOGGING CONFIGURATION
726 725 ; #####################
727 726
728 727 [loggers]
729 728 keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper
730 729
731 730 [handlers]
732 731 keys = console, console_sql
733 732
734 733 [formatters]
735 734 keys = generic, json, color_formatter, color_formatter_sql
736 735
737 736 ; #######
738 737 ; LOGGERS
739 738 ; #######
740 739 [logger_root]
741 740 level = NOTSET
742 741 handlers = console
743 742
744 743 [logger_sqlalchemy]
745 744 level = INFO
746 745 handlers = console_sql
747 746 qualname = sqlalchemy.engine
748 747 propagate = 0
749 748
750 749 [logger_beaker]
751 750 level = DEBUG
752 751 handlers =
753 752 qualname = beaker.container
754 753 propagate = 1
755 754
756 755 [logger_rhodecode]
757 756 level = DEBUG
758 757 handlers =
759 758 qualname = rhodecode
760 759 propagate = 1
761 760
762 761 [logger_ssh_wrapper]
763 762 level = DEBUG
764 763 handlers =
765 764 qualname = ssh_wrapper
766 765 propagate = 1
767 766
768 767 [logger_celery]
769 768 level = DEBUG
770 769 handlers =
771 770 qualname = celery
772 771
773 772
774 773 ; ########
775 774 ; HANDLERS
776 775 ; ########
777 776
778 777 [handler_console]
779 778 class = StreamHandler
780 779 args = (sys.stderr, )
781 780 level = INFO
782 781 ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json'
783 782 ; This allows sending properly formatted logs to grafana loki or elasticsearch
784 783 formatter = generic
785 784
786 785 [handler_console_sql]
787 786 ; "level = DEBUG" logs SQL queries and results.
788 787 ; "level = INFO" logs SQL queries.
789 788 ; "level = WARN" logs neither. (Recommended for production systems.)
790 789 class = StreamHandler
791 790 args = (sys.stderr, )
792 791 level = WARN
793 792 ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json'
794 793 ; This allows sending properly formatted logs to grafana loki or elasticsearch
795 794 formatter = generic
796 795
797 796 ; ##########
798 797 ; FORMATTERS
799 798 ; ##########
800 799
801 800 [formatter_generic]
802 801 class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter
803 802 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
804 803 datefmt = %Y-%m-%d %H:%M:%S
805 804
806 805 [formatter_color_formatter]
807 806 class = rhodecode.lib.logging_formatter.ColorFormatter
808 807 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
809 808 datefmt = %Y-%m-%d %H:%M:%S
810 809
811 810 [formatter_color_formatter_sql]
812 811 class = rhodecode.lib.logging_formatter.ColorFormatterSql
813 812 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
814 813 datefmt = %Y-%m-%d %H:%M:%S
815 814
816 815 [formatter_json]
817 816 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
818 817 class = rhodecode.lib._vendor.jsonlogger.JsonFormatter
@@ -1,222 +1,222 b''
1 1 # Copyright (C) 2010-2023 RhodeCode GmbH
2 2 #
3 3 # This program is free software: you can redistribute it and/or modify
4 4 # it under the terms of the GNU Affero General Public License, version 3
5 5 # (only), as published by the Free Software Foundation.
6 6 #
7 7 # This program is distributed in the hope that it will be useful,
8 8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 10 # GNU General Public License for more details.
11 11 #
12 12 # You should have received a copy of the GNU Affero General Public License
13 13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
14 14 #
15 15 # This program is dual-licensed. If you wish to learn more about the
16 16 # RhodeCode Enterprise Edition, including its added features, Support services,
17 17 # and proprietary license terms, please see https://rhodecode.com/licenses/
18 18
19 19 import os
20 20 import tempfile
21 21 import logging
22 22
23 23 from pyramid.settings import asbool
24 24
25 25 from rhodecode.config.settings_maker import SettingsMaker
26 26 from rhodecode.config import utils as config_utils
27 27
28 28 log = logging.getLogger(__name__)
29 29
30 30
31 31 def sanitize_settings_and_apply_defaults(global_config, settings):
32 32 """
33 33 Applies settings defaults and does all type conversion.
34 34
35 35 We would move all settings parsing and preparation into this place, so that
36 36 we have only one place left which deals with this part. The remaining parts
37 37 of the application would start to rely fully on well-prepared settings.
38 38
39 39 This piece would later be split up per topic to avoid a big fat monster
40 40 function.
41 41 """
42 42 jn = os.path.join
43 43
44 44 global_settings_maker = SettingsMaker(global_config)
45 45 global_settings_maker.make_setting('debug', default=False, parser='bool')
46 46 debug_enabled = asbool(global_config.get('debug'))
47 47
48 48 settings_maker = SettingsMaker(settings)
49 49
50 50 settings_maker.make_setting(
51 51 'logging.autoconfigure',
52 52 default=False,
53 53 parser='bool')
54 54
55 55 logging_conf = jn(os.path.dirname(global_config.get('__file__')), 'logging.ini')
56 56 settings_maker.enable_logging(logging_conf, level='INFO' if debug_enabled else 'DEBUG')
57 57
58 58 # Default includes, possible to change as a user
59 59 pyramid_includes = settings_maker.make_setting('pyramid.includes', [], parser='list:newline')
60 60 log.debug(
61 61 "Using the following pyramid.includes: %s",
62 62 pyramid_includes)
63 63
64 64 settings_maker.make_setting('rhodecode.edition', 'Community Edition')
65 65 settings_maker.make_setting('rhodecode.edition_id', 'CE')
66 66
67 67 if 'mako.default_filters' not in settings:
68 68 # set custom default filters if we don't have it defined
69 69 settings['mako.imports'] = 'from rhodecode.lib.base import h_filter'
70 70 settings['mako.default_filters'] = 'h_filter'
71 71
72 72 if 'mako.directories' not in settings:
73 73 mako_directories = settings.setdefault('mako.directories', [
74 74 # Base templates of the original application
75 75 'rhodecode:templates',
76 76 ])
77 77 log.debug(
78 78 "Using the following Mako template directories: %s",
79 79 mako_directories)
80 80
81 81 # NOTE(marcink): fix redis requirement for schema of connection since 3.X
82 82 if 'beaker.session.type' in settings and settings['beaker.session.type'] == 'ext:redis':
83 83 raw_url = settings['beaker.session.url']
84 84 if not raw_url.startswith(('redis://', 'rediss://', 'unix://')):
85 85 settings['beaker.session.url'] = 'redis://' + raw_url
86 86
87 87 settings_maker.make_setting('__file__', global_config.get('__file__'))
88 88
89 89 # TODO: johbo: Re-think this, usually the call to config.include
90 90 # should allow to pass in a prefix.
91 91 settings_maker.make_setting('rhodecode.api.url', '/_admin/api')
92 92
93 93 # Sanitize generic settings.
94 94 settings_maker.make_setting('default_encoding', 'UTF-8', parser='list')
95 95 settings_maker.make_setting('gzip_responses', False, parser='bool')
96 96 settings_maker.make_setting('startup.import_repos', 'false', parser='bool')
97 97
98 98 # statsd
99 99 settings_maker.make_setting('statsd.enabled', False, parser='bool')
100 100 settings_maker.make_setting('statsd.statsd_host', 'statsd-exporter', parser='string')
101 101 settings_maker.make_setting('statsd.statsd_port', 9125, parser='int')
102 102 settings_maker.make_setting('statsd.statsd_prefix', '')
103 103 settings_maker.make_setting('statsd.statsd_ipv6', False, parser='bool')
104 104
105 105 settings_maker.make_setting('vcs.svn.compatible_version', '')
106 106 settings_maker.make_setting('vcs.svn.proxy.enabled', True, parser='bool')
107 107 settings_maker.make_setting('vcs.svn.proxy.host', 'http://svn:8090', parser='string')
108 108 settings_maker.make_setting('vcs.hooks.protocol', 'http')
109 109 settings_maker.make_setting('vcs.hooks.host', '*')
110 110 settings_maker.make_setting('vcs.scm_app_implementation', 'http')
111 111 settings_maker.make_setting('vcs.server', '')
112 112 settings_maker.make_setting('vcs.server.protocol', 'http')
113 113 settings_maker.make_setting('vcs.server.enable', 'true', parser='bool')
114 114 settings_maker.make_setting('vcs.hooks.direct_calls', 'false', parser='bool')
115 115 settings_maker.make_setting('vcs.start_server', 'false', parser='bool')
116 116 settings_maker.make_setting('vcs.backends', 'hg, git, svn', parser='list')
117 117 settings_maker.make_setting('vcs.connection_timeout', 3600, parser='int')
118 118
119 119 settings_maker.make_setting('vcs.methods.cache', True, parser='bool')
120 120
121 121 # repo_store path
122 122 settings_maker.make_setting('repo_store.path', '/var/opt/rhodecode_repo_store')
123 123 # Support legacy values of vcs.scm_app_implementation. Legacy
124 124 # configurations may use 'rhodecode.lib.middleware.utils.scm_app_http', or
125 125 # disabled since 4.13 'vcsserver.scm_app' which is now mapped to 'http'.
126 126 scm_app_impl = settings['vcs.scm_app_implementation']
127 127 if scm_app_impl in ['rhodecode.lib.middleware.utils.scm_app_http', 'vcsserver.scm_app']:
128 128 settings['vcs.scm_app_implementation'] = 'http'
129 129
130 130 settings_maker.make_setting('appenlight', False, parser='bool')
131 131
132 132 temp_store = tempfile.gettempdir()
133 133 tmp_cache_dir = jn(temp_store, 'rc_cache')
134 134
135 135 # save default, cache dir, and use it for all backends later.
136 136 default_cache_dir = settings_maker.make_setting(
137 137 'cache_dir',
138 138 default=tmp_cache_dir, default_when_empty=True,
139 139 parser='dir:ensured')
140 140
141 141 # exception store cache
142 142 settings_maker.make_setting(
143 143 'exception_tracker.store_path',
144 144 default=jn(default_cache_dir, 'exc_store'), default_when_empty=True,
145 145 parser='dir:ensured'
146 146 )
147 147
148 148 settings_maker.make_setting(
149 149 'celerybeat-schedule.path',
150 150 default=jn(default_cache_dir, 'celerybeat_schedule', 'celerybeat-schedule.db'), default_when_empty=True,
151 151 parser='file:ensured'
152 152 )
153 153
154 154 settings_maker.make_setting('exception_tracker.send_email', False, parser='bool')
155 155 settings_maker.make_setting('exception_tracker.email_prefix', '[RHODECODE ERROR]', default_when_empty=True)
156 156
157 157 # sessions, ensure file since no-value is memory
158 158 settings_maker.make_setting('beaker.session.type', 'file')
159 159 settings_maker.make_setting('beaker.session.data_dir', jn(default_cache_dir, 'session_data'))
160 160
161 161 # cache_general
162 162 settings_maker.make_setting('rc_cache.cache_general.backend', 'dogpile.cache.rc.file_namespace')
163 163 settings_maker.make_setting('rc_cache.cache_general.expiration_time', 60 * 60 * 12, parser='int')
164 164 settings_maker.make_setting('rc_cache.cache_general.arguments.filename', jn(default_cache_dir, 'rhodecode_cache_general.db'))
165 165
166 166 # cache_perms
167 167 settings_maker.make_setting('rc_cache.cache_perms.backend', 'dogpile.cache.rc.file_namespace')
168 168 settings_maker.make_setting('rc_cache.cache_perms.expiration_time', 60 * 60, parser='int')
169 169 settings_maker.make_setting('rc_cache.cache_perms.arguments.filename', jn(default_cache_dir, 'rhodecode_cache_perms_db'))
170 170
171 171 # cache_repo
172 172 settings_maker.make_setting('rc_cache.cache_repo.backend', 'dogpile.cache.rc.file_namespace')
173 173 settings_maker.make_setting('rc_cache.cache_repo.expiration_time', 60 * 60 * 24 * 30, parser='int')
174 174 settings_maker.make_setting('rc_cache.cache_repo.arguments.filename', jn(default_cache_dir, 'rhodecode_cache_repo_db'))
175 175
176 176 # cache_license
177 177 settings_maker.make_setting('rc_cache.cache_license.backend', 'dogpile.cache.rc.file_namespace')
178 178 settings_maker.make_setting('rc_cache.cache_license.expiration_time', 60 * 5, parser='int')
179 179 settings_maker.make_setting('rc_cache.cache_license.arguments.filename', jn(default_cache_dir, 'rhodecode_cache_license_db'))
180 180
181 181 # cache_repo_longterm memory, 96H
182 182 settings_maker.make_setting('rc_cache.cache_repo_longterm.backend', 'dogpile.cache.rc.memory_lru')
183 183 settings_maker.make_setting('rc_cache.cache_repo_longterm.expiration_time', 345600, parser='int')
184 184 settings_maker.make_setting('rc_cache.cache_repo_longterm.max_size', 10000, parser='int')
185 185
186 186 # sql_cache_short
187 187 settings_maker.make_setting('rc_cache.sql_cache_short.backend', 'dogpile.cache.rc.memory_lru')
188 188 settings_maker.make_setting('rc_cache.sql_cache_short.expiration_time', 30, parser='int')
189 189 settings_maker.make_setting('rc_cache.sql_cache_short.max_size', 10000, parser='int')
190 190
191 191 # archive_cache
192 192 settings_maker.make_setting('archive_cache.locking.url', 'redis://redis:6379/1')
193 193 settings_maker.make_setting('archive_cache.backend.type', 'filesystem')
194 194
195 195 settings_maker.make_setting('archive_cache.filesystem.store_dir', jn(default_cache_dir, 'archive_cache'), default_when_empty=True,)
196 196 settings_maker.make_setting('archive_cache.filesystem.cache_shards', 8, parser='int')
197 197 settings_maker.make_setting('archive_cache.filesystem.cache_size_gb', 10, parser='float')
198 198 settings_maker.make_setting('archive_cache.filesystem.eviction_policy', 'least-recently-stored')
199 199
200 200 settings_maker.make_setting('archive_cache.filesystem.retry', False, parser='bool')
201 201 settings_maker.make_setting('archive_cache.filesystem.retry_backoff', 1, parser='int')
202 202 settings_maker.make_setting('archive_cache.filesystem.retry_attempts', 10, parser='int')
203 203
204 204 settings_maker.make_setting('archive_cache.objectstore.url', jn(default_cache_dir, 'archive_cache'), default_when_empty=True,)
205 205 settings_maker.make_setting('archive_cache.objectstore.key', '')
206 206 settings_maker.make_setting('archive_cache.objectstore.secret', '')
207 settings_maker.make_setting('archive_cache.objectstore.bucket_root', 'rhodecode-archive-cache')
207 settings_maker.make_setting('archive_cache.objectstore.bucket', 'rhodecode-archive-cache', default_when_empty=True,)
208 208 settings_maker.make_setting('archive_cache.objectstore.bucket_shards', 8, parser='int')
209 209
210 210 settings_maker.make_setting('archive_cache.objectstore.cache_size_gb', 10, parser='float')
211 211 settings_maker.make_setting('archive_cache.objectstore.eviction_policy', 'least-recently-stored')
212 212
213 213 settings_maker.make_setting('archive_cache.objectstore.retry', False, parser='bool')
214 214 settings_maker.make_setting('archive_cache.objectstore.retry_backoff', 1, parser='int')
215 215 settings_maker.make_setting('archive_cache.objectstore.retry_attempts', 10, parser='int')
216 216
217 217 settings_maker.env_expand()
218 218
219 219 # configure instance id
220 220 config_utils.set_instance_id(settings)
221 221
222 222 return settings
@@ -1,352 +1,355 b''
1 1 # Copyright (C) 2015-2024 RhodeCode GmbH
2 2 #
3 3 # This program is free software: you can redistribute it and/or modify
4 4 # it under the terms of the GNU Affero General Public License, version 3
5 5 # (only), as published by the Free Software Foundation.
6 6 #
7 7 # This program is distributed in the hope that it will be useful,
8 8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 10 # GNU General Public License for more details.
11 11 #
12 12 # You should have received a copy of the GNU Affero General Public License
13 13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
14 14 #
15 15 # This program is dual-licensed. If you wish to learn more about the
16 16 # RhodeCode Enterprise Edition, including its added features, Support services,
17 17 # and proprietary license terms, please see https://rhodecode.com/licenses/
18 18
19 19 import os
20 20 import functools
21 21 import logging
22 22 import typing
23 23 import time
24 24 import zlib
25 25
26 26 from ...ext_json import json
27 27 from ..utils import StatsDB, NOT_GIVEN, ShardFileReader, EVICTION_POLICY, format_size
28 28 from ..lock import GenerationLock
29 29
30 30 log = logging.getLogger(__name__)
31 31
32 32
33 33 class BaseShard:
34 34 storage_type: str = ''
35 35 fs = None
36 36
37 37 @classmethod
38 38 def hash(cls, key):
39 39 """Compute portable hash for `key`.
40 40
41 41 :param key: key to hash
42 42 :return: hash value
43 43
44 44 """
45 45 mask = 0xFFFFFFFF
46 46 return zlib.adler32(key.encode('utf-8')) & mask # noqa
47 47
48 48 def _write_file(self, full_path, read_iterator, mode):
49 49 raise NotImplementedError
50 50
51 51 def _get_keyfile(self, key):
52 52 raise NotImplementedError
53 53
54 54 def random_filename(self):
55 55 raise NotImplementedError
56 56
57 57 def _store(self, key, value_reader, metadata, mode):
58 58 (filename, # hash-name
59 59 full_path # full-path/hash-name
60 60 ) = self.random_filename()
61 61
62 62 key_file, key_file_path = self._get_keyfile(key)
63 63
64 64 # STORE METADATA
65 65 _metadata = {
66 66 "version": "v1",
67 67
68 68 "key_file": key_file, # this is the .key.json file storing meta
69 69 "key_file_path": key_file_path, # full path to key_file
70 70 "archive_key": key, # original name we stored archive under, e.g my-archive.zip
71 71 "archive_filename": filename, # the actual filename we stored that file under
72 72 "archive_full_path": full_path,
73 73
74 74 "store_time": time.time(),
75 75 "access_count": 0,
76 76 "access_time": 0,
77 77
78 78 "size": 0
79 79 }
80 80 if metadata:
81 81 _metadata.update(metadata)
82 82
83 83 read_iterator = iter(functools.partial(value_reader.read, 2**22), b'')
84 84 size, sha256 = self._write_file(full_path, read_iterator, mode)
85 85 _metadata['size'] = size
86 86 _metadata['sha256'] = sha256
87 87
88 88 # after archive is finished, we create a key to save the presence of the binary file
89 89 with self.fs.open(key_file_path, 'wb') as f:
90 90 f.write(json.dumps(_metadata))
91 91
92 92 return key, filename, size, _metadata
93 93
94 94 def _fetch(self, key, retry, retry_attempts, retry_backoff):
95 95 if retry is NOT_GIVEN:
96 96 retry = False
97 97 if retry_attempts is NOT_GIVEN:
98 98 retry_attempts = 0
99 99
100 100 if retry and retry_attempts > 0:
101 101 for attempt in range(1, retry_attempts + 1):
102 102 if key in self:
103 103 break
104 104 # we didn't find the key, wait retry_backoff N seconds, and re-check
105 105 time.sleep(retry_backoff)
106 106
107 107 if key not in self:
108 108 log.exception(f'requested key={key} not found in {self} retry={retry}, attempts={retry_attempts}')
109 109 raise KeyError(key)
110 110
111 111 key_file, key_file_path = self._get_keyfile(key)
112 112 with self.fs.open(key_file_path, 'rb') as f:
113 113 metadata = json.loads(f.read())
114 114
115 115 archive_path = metadata['archive_full_path']
116 116
117 117 try:
118 118 return ShardFileReader(self.fs.open(archive_path, 'rb')), metadata
119 119 finally:
120 120 # update usage stats, count and accessed
121 121 metadata["access_count"] = metadata.get("access_count", 0) + 1
122 122 metadata["access_time"] = time.time()
123 123 log.debug('Updated %s with access snapshot, access_count=%s access_time=%s',
124 124 key_file, metadata['access_count'], metadata['access_time'])
125 125 with self.fs.open(key_file_path, 'wb') as f:
126 126 f.write(json.dumps(metadata))
127 127
128 128 def _remove(self, key):
129 129 if key not in self:
130 130 log.exception(f'requested key={key} not found in {self}')
131 131 raise KeyError(key)
132 132
133 133 key_file, key_file_path = self._get_keyfile(key)
134 134 with self.fs.open(key_file_path, 'rb') as f:
135 135 metadata = json.loads(f.read())
136 136
137 137 archive_path = metadata['archive_full_path']
138 138 self.fs.rm(archive_path)
139 139 self.fs.rm(key_file_path)
140 140 return 1
141 141
142 142 @property
143 143 def storage_medium(self):
144 144 return getattr(self, self.storage_type)
145 145
146 146 @property
147 147 def key_suffix(self):
148 148 return 'key.json'
149 149
150 150 def __contains__(self, key):
151 151 """Return `True` if `key` matching item is found in cache.
152 152
153 153 :param key: key matching item
154 154 :return: True if key matching item
155 155
156 156 """
157 157 key_file, key_file_path = self._get_keyfile(key)
158 158 return self.fs.exists(key_file_path)
159 159
160 160
161 161 class BaseCache:
162 162 _locking_url: str = ''
163 163 _storage_path: str = ''
164 164 _config = {}
165 165 retry = False
166 166 retry_attempts = 0
167 167 retry_backoff = 1
168 168 _shards = tuple()
169 shard_cls = BaseShard
169 170
170 171 def __contains__(self, key):
171 172 """Return `True` if `key` matching item is found in cache.
172 173
173 174 :param key: key matching item
174 175 :return: True if key matching item
175 176
176 177 """
177 178 return self.has_key(key)
178 179
179 180 def __repr__(self):
180 181 return f'<{self.__class__.__name__}(storage={self._storage_path})>'
181 182
182 183 @classmethod
183 184 def gb_to_bytes(cls, gb):
184 185 return gb * (1024 ** 3)
185 186
186 187 @property
187 188 def storage_path(self):
188 189 return self._storage_path
189 190
190 191 @classmethod
191 192 def get_stats_db(cls):
192 193 return StatsDB()
193 194
194 195 def get_conf(self, key, pop=False):
195 196 if key not in self._config:
196 197 raise ValueError(f"No configuration key '{key}', please make sure it exists in archive_cache config")
197 198 val = self._config[key]
198 199 if pop:
199 200 del self._config[key]
200 201 return val
201 202
202 def _get_shard(self, key):
203 raise NotImplementedError
203 def _get_shard(self, key) -> shard_cls:
204 index = self._hash(key) % self._shard_count
205 shard = self._shards[index]
206 return shard
204 207
205 208 def _get_size(self, shard, archive_path):
206 209 raise NotImplementedError
207 210
208 211 def store(self, key, value_reader, metadata=None):
209 212 shard = self._get_shard(key)
210 213 return shard.store(key, value_reader, metadata)
211 214
212 215 def fetch(self, key, retry=NOT_GIVEN, retry_attempts=NOT_GIVEN) -> tuple[typing.BinaryIO, dict]:
213 216 """
214 217 Return file handle corresponding to `key` from specific shard cache.
215 218 """
216 219 if retry is NOT_GIVEN:
217 220 retry = self.retry
218 221 if retry_attempts is NOT_GIVEN:
219 222 retry_attempts = self.retry_attempts
220 223 retry_backoff = self.retry_backoff
221 224
222 225 shard = self._get_shard(key)
223 226 return shard.fetch(key, retry=retry, retry_attempts=retry_attempts, retry_backoff=retry_backoff)
224 227
225 228 def remove(self, key):
226 229 shard = self._get_shard(key)
227 230 return shard.remove(key)
228 231
229 232 def has_key(self, archive_key):
230 233 """Return `True` if `key` matching item is found in cache.
231 234
232 235 :param archive_key: key for item, this is a unique archive name we want to store data under. e.g my-archive-svn.zip
233 236 :return: True if key is found
234 237
235 238 """
236 239 shard = self._get_shard(archive_key)
237 240 return archive_key in shard
238 241
239 242 def iter_keys(self):
240 243 for shard in self._shards:
241 244 if shard.fs.exists(shard.storage_medium):
242 245 for path, _dirs, _files in shard.fs.walk(shard.storage_medium):
243 246 for key_file_path in _files:
244 247 if key_file_path.endswith(shard.key_suffix):
245 248 yield shard, key_file_path
246 249
247 250 def get_lock(self, lock_key):
248 251 return GenerationLock(lock_key, self._locking_url)
249 252
250 253 def evict(self, policy=None, size_limit=None) -> dict:
251 254 """
252 255 Remove old items based on the conditions
253 256
254 257
255 258 explanation of this algo:
256 259 iterate over each shard, then for each shard iterate over the .key files
257 260 read the key files metadata stored. This gives us a full list of keys, cached_archived, their size and
258 261 access data, time creation, and access counts.
259 262
260 263 Store that into a memory DB in order we can run different sorting strategies easily.
261 264 Summing the size is a sum sql query.
262 265
263 266 Then we run a sorting strategy based on eviction policy.
264 267 We iterate over sorted keys, and remove each checking if we hit the overall limit.
265 268 """
266 269 removal_info = {
267 270 "removed_items": 0,
268 271 "removed_size": 0
269 272 }
270 273 policy = policy or self._eviction_policy
271 274 size_limit = size_limit or self._cache_size_limit
272 275
273 276 select_policy = EVICTION_POLICY[policy]['evict']
274 277
275 278 log.debug('Running eviction policy \'%s\', and checking for size limit: %s',
276 279 policy, format_size(size_limit))
277 280
278 281 if select_policy is None:
279 282 return removal_info
280 283
281 284 db = self.get_stats_db()
282 285
283 286 data = []
284 287 cnt = 1
285 288
286 289 for shard, key_file in self.iter_keys():
287 290 with shard.fs.open(os.path.join(shard.storage_medium, key_file), 'rb') as f:
288 291 metadata = json.loads(f.read())
289 292
290 293 key_file_path = os.path.join(shard.storage_medium, key_file)
291 294
292 295 archive_key = metadata['archive_key']
293 296 archive_path = metadata['archive_full_path']
294 297
295 298 size = metadata.get('size')
296 299 if not size:
297 300 # in case we don't have size re-calc it...
298 301 size = self._get_size(shard, archive_path)
299 302
300 303 data.append([
301 304 cnt,
302 305 key_file,
303 306 key_file_path,
304 307 archive_key,
305 308 archive_path,
306 309 metadata.get('store_time', 0),
307 310 metadata.get('access_time', 0),
308 311 metadata.get('access_count', 0),
309 312 size,
310 313 ])
311 314 cnt += 1
312 315
313 316 # Insert bulk data using executemany
314 317 db.bulk_insert(data)
315 318
316 319 total_size = db.get_total_size()
317 320 log.debug('Analyzed %s keys, occupying: %s, running eviction to match %s',
318 321 len(data), format_size(total_size), format_size(size_limit))
319 322
320 323 removed_items = 0
321 324 removed_size = 0
322 325 for key_file, archive_key, size in db.get_sorted_keys(select_policy):
323 326 # simulate removal impact BEFORE removal
324 327 total_size -= size
325 328
326 329 if total_size <= size_limit:
327 330 # we obtained what we wanted...
328 331 break
329 332
330 333 self.remove(archive_key)
331 334 removed_items += 1
332 335 removed_size += size
333 336 removal_info['removed_items'] = removed_items
334 337 removal_info['removed_size'] = removed_size
335 338 log.debug('Removed %s cache archives, and reduced size by: %s',
336 339 removed_items, format_size(removed_size))
337 340 return removal_info
338 341
339 342 def get_statistics(self):
340 343 total_files = 0
341 344 total_size = 0
342 345 meta = {}
343 346
344 347 for shard, key_file in self.iter_keys():
345 348 json_key = f"{shard.storage_medium}/{key_file}"
346 349 with shard.fs.open(json_key, 'rb') as f:
347 350 total_files += 1
348 351 metadata = json.loads(f.read())
349 352 total_size += metadata['size']
350 353
351 354 return total_files, total_size, meta
352 355
@@ -1,167 +1,174 b''
1 1 # Copyright (C) 2015-2024 RhodeCode GmbH
2 2 #
3 3 # This program is free software: you can redistribute it and/or modify
4 4 # it under the terms of the GNU Affero General Public License, version 3
5 5 # (only), as published by the Free Software Foundation.
6 6 #
7 7 # This program is distributed in the hope that it will be useful,
8 8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 10 # GNU General Public License for more details.
11 11 #
12 12 # You should have received a copy of the GNU Affero General Public License
13 13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
14 14 #
15 15 # This program is dual-licensed. If you wish to learn more about the
16 16 # RhodeCode Enterprise Edition, including its added features, Support services,
17 17 # and proprietary license terms, please see https://rhodecode.com/licenses/
18 18
19 19 import codecs
20 20 import hashlib
21 21 import logging
22 22 import os
23 23
24 24 import fsspec
25 25
26 26 from .base import BaseCache, BaseShard
27 27 from ..utils import ShardFileReader, NOT_GIVEN
28 28 from ...type_utils import str2bool
29 29
30 30 log = logging.getLogger(__name__)
31 31
32 32
33 33 class FileSystemShard(BaseShard):
34 34
35 def __init__(self, index, directory, **settings):
35 def __init__(self, index, directory, directory_folder, fs, **settings):
36 36 self._index = index
37 37 self._directory = directory
38 self._directory_folder = directory_folder
38 39 self.storage_type = 'directory'
39 self.fs = fsspec.filesystem('file')
40
41 self.fs = fs
40 42
41 43 @property
42 44 def directory(self):
43 """Cache directory."""
44 return self._directory
45 """Cache directory final path."""
46 return os.path.join(self._directory, self._directory_folder)
45 47
46 48 def _get_keyfile(self, archive_key) -> tuple[str, str]:
47 49 key_file = f'{archive_key}.{self.key_suffix}'
48 50 return key_file, os.path.join(self.directory, key_file)
49 51
50 52 def _get_writer(self, path, mode):
51 53 for count in range(1, 11):
52 54 try:
53 55 # Another cache may have deleted the directory before
54 56 # the file could be opened.
55 57 return self.fs.open(path, mode)
56 58 except OSError:
57 59 if count == 10:
58 60 # Give up after 10 tries to open the file.
59 61 raise
60 62 continue
61 63
62 64 def _write_file(self, full_path, iterator, mode):
63 65 # ensure dir exists
64 66 destination, _ = os.path.split(full_path)
65 67 if not self.fs.exists(destination):
66 68 self.fs.makedirs(destination)
67 69
68 70 writer = self._get_writer(full_path, mode)
69 71
70 72 digest = hashlib.sha256()
71 73 with writer:
72 74 size = 0
73 75 for chunk in iterator:
74 76 size += len(chunk)
75 77 digest.update(chunk)
76 78 writer.write(chunk)
77 79 writer.flush()
78 80 # Get the file descriptor
79 81 fd = writer.fileno()
80 82
81 83 # Sync the file descriptor to disk, helps with NFS cases...
82 84 os.fsync(fd)
83 85 sha256 = digest.hexdigest()
84 86 log.debug('written new archive cache under %s, sha256: %s', full_path, sha256)
85 87 return size, sha256
86 88
87 89 def store(self, key, value_reader, metadata: dict | None = None):
88 90 return self._store(key, value_reader, metadata, mode='xb')
89 91
90 92 def fetch(self, key, retry=NOT_GIVEN, retry_attempts=NOT_GIVEN, retry_backoff=1) -> tuple[ShardFileReader, dict]:
91 93 return self._fetch(key, retry, retry_attempts, retry_backoff)
92 94
93 95 def remove(self, key):
94 96 return self._remove(key)
95 97
96 98 def random_filename(self):
97 99 """Return filename and full-path tuple for file storage.
98 100
99 101 Filename will be a randomly generated 28 character hexadecimal string
100 102 with ".archive_cache" suffixed. Two levels of sub-directories will be used to
101 103 reduce the size of directories. On older filesystems, lookups in
102 104 directories with many files may be slow.
103 105 """
104 106
105 107 hex_name = codecs.encode(os.urandom(16), 'hex').decode('utf-8')
106 108
107 109 archive_name = hex_name[4:] + '.archive_cache'
108 110 filename = f"{hex_name[:2]}/{hex_name[2:4]}/{archive_name}"
109 111
110 112 full_path = os.path.join(self.directory, filename)
111 113 return archive_name, full_path
112 114
113 115 def __repr__(self):
114 116 return f'{self.__class__.__name__}(index={self._index}, dir={self.directory})'
115 117
116 118
117 119 class FileSystemFanoutCache(BaseCache):
118 120 shard_name = 'shard_%03d'
121 shard_cls = FileSystemShard
119 122
120 123 def __init__(self, locking_url, **settings):
121 124 """
122 125 Initialize file system cache instance.
123 126
124 127 :param str locking_url: redis url for a lock
125 128 :param settings: settings dict
126 129
127 130 """
128 131 self._locking_url = locking_url
129 132 self._config = settings
130 133 cache_dir = self.get_conf('archive_cache.filesystem.store_dir')
131 134 directory = str(cache_dir)
132 135 directory = os.path.expanduser(directory)
133 136 directory = os.path.expandvars(directory)
134 137 self._directory = directory
135 self._storage_path = directory
138 self._storage_path = directory # common path for all from BaseCache
136 139
137 # check if it's ok to write, and re-create the archive cache
138 if not os.path.isdir(self._directory):
139 os.makedirs(self._directory, exist_ok=True)
140
141 self._count = int(self.get_conf('archive_cache.filesystem.cache_shards', pop=True))
140 self._shard_count = int(self.get_conf('archive_cache.filesystem.cache_shards', pop=True))
141 if self._shard_count < 1:
142 raise ValueError('cache_shards must be 1 or more')
142 143
143 144 self._eviction_policy = self.get_conf('archive_cache.filesystem.eviction_policy', pop=True)
144 145 self._cache_size_limit = self.gb_to_bytes(int(self.get_conf('archive_cache.filesystem.cache_size_gb')))
145 146
146 147 self.retry = str2bool(self.get_conf('archive_cache.filesystem.retry', pop=True))
147 148 self.retry_attempts = int(self.get_conf('archive_cache.filesystem.retry_attempts', pop=True))
148 149 self.retry_backoff = int(self.get_conf('archive_cache.filesystem.retry_backoff', pop=True))
149 150
150 log.debug('Initializing archival cache instance under %s', self._directory)
151 log.debug('Initializing %s archival cache instance under %s', self)
152 fs = fsspec.filesystem('file')
153 # check if it's ok to write, and re-create the archive cache main dir
154 # A directory is the virtual equivalent of a physical file cabinet.
155 # In other words, it's a container for organizing digital data.
156 # Unlike a folder, which can only store files, a directory can store files,
157 # subdirectories, and other directories.
158 if not fs.exists(self._directory):
159 fs.makedirs(self._directory, exist_ok=True)
160
151 161 self._shards = tuple(
152 FileSystemShard(
162 self.shard_cls(
153 163 index=num,
154 directory=os.path.join(directory, self.shard_name % num),
164 directory=directory,
165 directory_folder=self.shard_name % num,
166 fs=fs,
155 167 **settings,
156 168 )
157 for num in range(self._count)
169 for num in range(self._shard_count)
158 170 )
159 171 self._hash = self._shards[0].hash
160 172
161 def _get_shard(self, key) -> FileSystemShard:
162 index = self._hash(key) % self._count
163 shard = self._shards[index]
164 return shard
165
166 173 def _get_size(self, shard, archive_path):
167 174 return os.stat(archive_path).st_size
@@ -1,158 +1,164 b''
1 1 # Copyright (C) 2015-2024 RhodeCode GmbH
2 2 #
3 3 # This program is free software: you can redistribute it and/or modify
4 4 # it under the terms of the GNU Affero General Public License, version 3
5 5 # (only), as published by the Free Software Foundation.
6 6 #
7 7 # This program is distributed in the hope that it will be useful,
8 8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 10 # GNU General Public License for more details.
11 11 #
12 12 # You should have received a copy of the GNU Affero General Public License
13 13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
14 14 #
15 15 # This program is dual-licensed. If you wish to learn more about the
16 16 # RhodeCode Enterprise Edition, including its added features, Support services,
17 17 # and proprietary license terms, please see https://rhodecode.com/licenses/
18 18
19 19 import codecs
20 20 import hashlib
21 21 import logging
22 22 import os
23 23
24 24 import fsspec
25 25
26 26 from .base import BaseCache, BaseShard
27 27 from ..utils import ShardFileReader, NOT_GIVEN
28 28 from ...type_utils import str2bool
29 29
30 30 log = logging.getLogger(__name__)
31 31
32 32
33 33 class S3Shard(BaseShard):
34 34
35 def __init__(self, index, bucket, **settings):
35 def __init__(self, index, bucket, bucket_folder, fs, **settings):
36 36 self._index = index
37 self._bucket = bucket
37 self._bucket_folder = bucket_folder
38 38 self.storage_type = 'bucket'
39 self._bucket_main = bucket
39 40
40 endpoint_url = settings.pop('archive_cache.objectstore.url')
41 key = settings.pop('archive_cache.objectstore.key')
42 secret = settings.pop('archive_cache.objectstore.secret')
43
44 # TODO: Add it all over the place...
45 self._bucket_root = settings.pop('archive_cache.objectstore.bucket_root')
46
47 self.fs = fsspec.filesystem('s3', anon=False, endpoint_url=endpoint_url, key=key, secret=secret)
41 self.fs = fs
48 42
49 43 @property
50 44 def bucket(self):
51 """Cache bucket."""
52 return os.path.join(self._bucket_root, self._bucket)
45 """Cache bucket final path."""
46 return os.path.join(self._bucket_main, self._bucket_folder)
53 47
54 48 def _get_keyfile(self, archive_key) -> tuple[str, str]:
55 49 key_file = f'{archive_key}-{self.key_suffix}'
56 50 return key_file, os.path.join(self.bucket, key_file)
57 51
58 52 def _get_writer(self, path, mode):
59 53 return self.fs.open(path, 'wb')
60 54
61 55 def _write_file(self, full_path, iterator, mode):
62 if self._bucket_root:
63 if not self.fs.exists(self._bucket_root):
64 self.fs.mkdir(self._bucket_root)
65 56
66 # ensure bucket exists
57 # ensure folder in bucket exists
67 58 destination = self.bucket
68 59 if not self.fs.exists(destination):
69 60 self.fs.mkdir(destination, s3_additional_kwargs={})
70 61
71 62 writer = self._get_writer(full_path, mode)
72 63
73 64 digest = hashlib.sha256()
74 65 with writer:
75 66 size = 0
76 67 for chunk in iterator:
77 68 size += len(chunk)
78 69 digest.update(chunk)
79 70 writer.write(chunk)
80 71
81 72 sha256 = digest.hexdigest()
82 73 log.debug('written new archive cache under %s, sha256: %s', full_path, sha256)
83 74 return size, sha256
84 75
85 76 def store(self, key, value_reader, metadata: dict | None = None):
86 77 return self._store(key, value_reader, metadata, mode='wb')
87 78
88 79 def fetch(self, key, retry=NOT_GIVEN, retry_attempts=NOT_GIVEN, retry_backoff=1) -> tuple[ShardFileReader, dict]:
89 80 return self._fetch(key, retry, retry_attempts, retry_backoff)
90 81
91 82 def remove(self, key):
92 83 return self._remove(key)
93 84
94 85 def random_filename(self):
95 86 """Return filename and full-path tuple for file storage.
96 87
97 88 Filename will be a randomly generated 28 character hexadecimal string
98 89 with ".archive_cache" suffixed. Two levels of sub-directories will be used to
99 90 reduce the size of directories. On older filesystems, lookups in
100 91 directories with many files may be slow.
101 92 """
102 93
103 94 hex_name = codecs.encode(os.urandom(16), 'hex').decode('utf-8')
104 95
105 96 archive_name = hex_name[4:] + '.archive_cache'
106 97 filename = f"{hex_name[:2]}-{hex_name[2:4]}-{archive_name}"
107 98
108 99 full_path = os.path.join(self.bucket, filename)
109 100 return archive_name, full_path
110 101
111 102 def __repr__(self):
112 103 return f'{self.__class__.__name__}(index={self._index}, bucket={self.bucket})'
113 104
114 105
115 106 class ObjectStoreCache(BaseCache):
116 shard_name = 'shard-bucket-%03d'
107 shard_name = 'shard-%03d'
108 shard_cls = S3Shard
117 109
118 110 def __init__(self, locking_url, **settings):
119 111 """
120 112 Initialize objectstore cache instance.
121 113
122 114 :param str locking_url: redis url for a lock
123 115 :param settings: settings dict
124 116
125 117 """
126 118 self._locking_url = locking_url
127 119 self._config = settings
128 120
129 121 objectstore_url = self.get_conf('archive_cache.objectstore.url')
130 self._storage_path = objectstore_url
122 self._storage_path = objectstore_url # common path for all from BaseCache
131 123
132 self._count = int(self.get_conf('archive_cache.objectstore.bucket_shards', pop=True))
124 self._shard_count = int(self.get_conf('archive_cache.objectstore.bucket_shards', pop=True))
125 if self._shard_count < 1:
126 raise ValueError('cache_shards must be 1 or more')
127
128 self._bucket = settings.pop('archive_cache.objectstore.bucket')
129 if not self._bucket:
130 raise ValueError('archive_cache.objectstore.bucket needs to have a value')
133 131
134 132 self._eviction_policy = self.get_conf('archive_cache.objectstore.eviction_policy', pop=True)
135 133 self._cache_size_limit = self.gb_to_bytes(int(self.get_conf('archive_cache.objectstore.cache_size_gb')))
136 134
137 135 self.retry = str2bool(self.get_conf('archive_cache.objectstore.retry', pop=True))
138 136 self.retry_attempts = int(self.get_conf('archive_cache.objectstore.retry_attempts', pop=True))
139 137 self.retry_backoff = int(self.get_conf('archive_cache.objectstore.retry_backoff', pop=True))
140 138
141 log.debug('Initializing archival cache instance under %s', objectstore_url)
139 endpoint_url = settings.pop('archive_cache.objectstore.url')
140 key = settings.pop('archive_cache.objectstore.key')
141 secret = settings.pop('archive_cache.objectstore.secret')
142
143 log.debug('Initializing %s archival cache instance under %s', self)
144
145 fs = fsspec.filesystem('s3', anon=False, endpoint_url=endpoint_url, key=key, secret=secret)
146
147 # init main bucket
148 if not fs.exists(self._bucket):
149 fs.mkdir(self._bucket)
150
142 151 self._shards = tuple(
143 S3Shard(
152 self.shard_cls(
144 153 index=num,
145 bucket=self.shard_name % num,
154 bucket=self._bucket,
155 bucket_folder=self.shard_name % num,
156 fs=fs,
146 157 **settings,
147 158 )
148 for num in range(self._count)
159 for num in range(self._shard_count)
149 160 )
150 161 self._hash = self._shards[0].hash
151 162
152 def _get_shard(self, key) -> S3Shard:
153 index = self._hash(key) % self._count
154 shard = self._shards[index]
155 return shard
156
157 163 def _get_size(self, shard, archive_path):
158 164 return shard.fs.info(archive_path)['size']
@@ -1,832 +1,831 b''
1 1
2 2 ; #########################################
3 3 ; RHODECODE COMMUNITY EDITION CONFIGURATION
4 4 ; #########################################
5 5
6 6 [DEFAULT]
7 7 ; Debug flag sets all loggers to debug, and enables request tracking
8 8 debug = true
9 9
10 10 ; ########################################################################
11 11 ; EMAIL CONFIGURATION
12 12 ; These settings will be used by the RhodeCode mailing system
13 13 ; ########################################################################
14 14
15 15 ; prefix all emails subjects with given prefix, helps filtering out emails
16 16 #email_prefix = [RhodeCode]
17 17
18 18 ; email FROM address all mails will be sent
19 19 #app_email_from = rhodecode-noreply@localhost
20 20
21 21 #smtp_server = mail.server.com
22 22 #smtp_username =
23 23 #smtp_password =
24 24 #smtp_port =
25 25 #smtp_use_tls = false
26 26 #smtp_use_ssl = true
27 27
28 28 [server:main]
29 29 ; COMMON HOST/IP CONFIG, This applies mostly to develop setup,
30 30 ; Host port for gunicorn are controlled by gunicorn_conf.py
31 31 host = 127.0.0.1
32 32 port = 10020
33 33
34 34
35 35 ; ###########################
36 36 ; GUNICORN APPLICATION SERVER
37 37 ; ###########################
38 38
39 39 ; run with gunicorn --paste rhodecode.ini --config gunicorn_conf.py
40 40
41 41 ; Module to use, this setting shouldn't be changed
42 42 use = egg:gunicorn#main
43 43
44 44 ; Prefix middleware for RhodeCode.
45 45 ; recommended when using proxy setup.
46 46 ; allows to set RhodeCode under a prefix in server.
47 47 ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well.
48 48 ; And set your prefix like: `prefix = /custom_prefix`
49 49 ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need
50 50 ; to make your cookies only work on prefix url
51 51 [filter:proxy-prefix]
52 52 use = egg:PasteDeploy#prefix
53 53 prefix = /
54 54
55 55 [app:main]
56 56 ; The %(here)s variable will be replaced with the absolute path of parent directory
57 57 ; of this file
58 58 ; Each option in the app:main can be override by an environmental variable
59 59 ;
60 60 ;To override an option:
61 61 ;
62 62 ;RC_<KeyName>
63 63 ;Everything should be uppercase, . and - should be replaced by _.
64 64 ;For example, if you have these configuration settings:
65 65 ;rc_cache.repo_object.backend = foo
66 66 ;can be overridden by
67 67 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
68 68
69 69 use = egg:rhodecode-enterprise-ce
70 70
71 71 ; enable proxy prefix middleware, defined above
72 72 #filter-with = proxy-prefix
73 73
74 74 ; encryption key used to encrypt social plugin tokens,
75 75 ; remote_urls with credentials etc, if not set it defaults to
76 76 ; `beaker.session.secret`
77 77 #rhodecode.encrypted_values.secret =
78 78
79 79 ; decryption strict mode (enabled by default). It controls if decryption raises
80 80 ; `SignatureVerificationError` in case of wrong key, or damaged encryption data.
81 81 #rhodecode.encrypted_values.strict = false
82 82
83 83 ; Pick algorithm for encryption. Either fernet (more secure) or aes (default)
84 84 ; fernet is safer, and we strongly recommend switching to it.
85 85 ; Due to backward compatibility aes is used as default.
86 86 #rhodecode.encrypted_values.algorithm = fernet
87 87
88 88 ; Return gzipped responses from RhodeCode (static files/application)
89 89 gzip_responses = false
90 90
91 91 ; Auto-generate javascript routes file on startup
92 92 generate_js_files = false
93 93
94 94 ; System global default language.
95 95 ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh
96 96 lang = en
97 97
98 98 ; Perform a full repository scan and import on each server start.
99 99 ; Settings this to true could lead to very long startup time.
100 100 startup.import_repos = true
101 101
102 102 ; URL at which the application is running. This is used for Bootstrapping
103 103 ; requests in context when no web request is available. Used in ishell, or
104 104 ; SSH calls. Set this for events to receive proper url for SSH calls.
105 105 app.base_url = http://rhodecode.local
106 106
107 107 ; Host at which the Service API is running.
108 108 app.service_api.host = http://rhodecode.local:10020
109 109
110 110 ; Secret for Service API authentication.
111 111 app.service_api.token =
112 112
113 113 ; Unique application ID. Should be a random unique string for security.
114 114 app_instance_uuid = rc-production
115 115
116 116 ; Cut off limit for large diffs (size in bytes). If overall diff size on
117 117 ; commit, or pull request exceeds this limit this diff will be displayed
118 118 ; partially. E.g 512000 == 512Kb
119 119 cut_off_limit_diff = 1024000
120 120
121 121 ; Cut off limit for large files inside diffs (size in bytes). Each individual
122 122 ; file inside diff which exceeds this limit will be displayed partially.
123 123 ; E.g 128000 == 128Kb
124 124 cut_off_limit_file = 256000
125 125
126 126 ; Use cached version of vcs repositories everywhere. Recommended to be `true`
127 127 vcs_full_cache = false
128 128
129 129 ; Force https in RhodeCode, fixes https redirects, assumes it's always https.
130 130 ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache
131 131 force_https = false
132 132
133 133 ; use Strict-Transport-Security headers
134 134 use_htsts = false
135 135
136 136 ; Set to true if your repos are exposed using the dumb protocol
137 137 git_update_server_info = false
138 138
139 139 ; RSS/ATOM feed options
140 140 rss_cut_off_limit = 256000
141 141 rss_items_per_page = 10
142 142 rss_include_diff = false
143 143
144 144 ; gist URL alias, used to create nicer urls for gist. This should be an
145 145 ; url that does rewrites to _admin/gists/{gistid}.
146 146 ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal
147 147 ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid}
148 148 gist_alias_url =
149 149
150 150 ; List of views (using glob pattern syntax) that AUTH TOKENS could be
151 151 ; used for access.
152 152 ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it
153 153 ; came from the the logged in user who own this authentication token.
154 154 ; Additionally @TOKEN syntax can be used to bound the view to specific
155 155 ; authentication token. Such view would be only accessible when used together
156 156 ; with this authentication token
157 157 ; list of all views can be found under `/_admin/permissions/auth_token_access`
158 158 ; The list should be "," separated and on a single line.
159 159 ; Most common views to enable:
160 160
161 161 # RepoCommitsView:repo_commit_download
162 162 # RepoCommitsView:repo_commit_patch
163 163 # RepoCommitsView:repo_commit_raw
164 164 # RepoCommitsView:repo_commit_raw@TOKEN
165 165 # RepoFilesView:repo_files_diff
166 166 # RepoFilesView:repo_archivefile
167 167 # RepoFilesView:repo_file_raw
168 168 # GistView:*
169 169 api_access_controllers_whitelist =
170 170
171 171 ; Default encoding used to convert from and to unicode
172 172 ; can be also a comma separated list of encoding in case of mixed encodings
173 173 default_encoding = UTF-8
174 174
175 175 ; instance-id prefix
176 176 ; a prefix key for this instance used for cache invalidation when running
177 177 ; multiple instances of RhodeCode, make sure it's globally unique for
178 178 ; all running RhodeCode instances. Leave empty if you don't use it
179 179 instance_id =
180 180
181 181 ; Fallback authentication plugin. Set this to a plugin ID to force the usage
182 182 ; of an authentication plugin also if it is disabled by it's settings.
183 183 ; This could be useful if you are unable to log in to the system due to broken
184 184 ; authentication settings. Then you can enable e.g. the internal RhodeCode auth
185 185 ; module to log in again and fix the settings.
186 186 ; Available builtin plugin IDs (hash is part of the ID):
187 187 ; egg:rhodecode-enterprise-ce#rhodecode
188 188 ; egg:rhodecode-enterprise-ce#pam
189 189 ; egg:rhodecode-enterprise-ce#ldap
190 190 ; egg:rhodecode-enterprise-ce#jasig_cas
191 191 ; egg:rhodecode-enterprise-ce#headers
192 192 ; egg:rhodecode-enterprise-ce#crowd
193 193
194 194 #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode
195 195
196 196 ; Flag to control loading of legacy plugins in py:/path format
197 197 auth_plugin.import_legacy_plugins = true
198 198
199 199 ; alternative return HTTP header for failed authentication. Default HTTP
200 200 ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with
201 201 ; handling that causing a series of failed authentication calls.
202 202 ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code
203 203 ; This will be served instead of default 401 on bad authentication
204 204 auth_ret_code =
205 205
206 206 ; use special detection method when serving auth_ret_code, instead of serving
207 207 ; ret_code directly, use 401 initially (Which triggers credentials prompt)
208 208 ; and then serve auth_ret_code to clients
209 209 auth_ret_code_detection = false
210 210
211 211 ; locking return code. When repository is locked return this HTTP code. 2XX
212 212 ; codes don't break the transactions while 4XX codes do
213 213 lock_ret_code = 423
214 214
215 215 ; Filesystem location were repositories should be stored
216 216 repo_store.path = /var/opt/rhodecode_repo_store
217 217
218 218 ; allows to setup custom hooks in settings page
219 219 allow_custom_hooks_settings = true
220 220
221 221 ; Generated license token required for EE edition license.
222 222 ; New generated token value can be found in Admin > settings > license page.
223 223 license_token = abra-cada-bra1-rce3
224 224
225 225 ; This flag hides sensitive information on the license page such as token, and license data
226 226 license.hide_license_info = false
227 227
228 228 ; supervisor connection uri, for managing supervisor and logs.
229 229 supervisor.uri =
230 230
231 231 ; supervisord group name/id we only want this RC instance to handle
232 232 supervisor.group_id = dev
233 233
234 234 ; Display extended labs settings
235 235 labs_settings_active = true
236 236
237 237 ; Custom exception store path, defaults to TMPDIR
238 238 ; This is used to store exception from RhodeCode in shared directory
239 239 #exception_tracker.store_path =
240 240
241 241 ; Send email with exception details when it happens
242 242 #exception_tracker.send_email = false
243 243
244 244 ; Comma separated list of recipients for exception emails,
245 245 ; e.g admin@rhodecode.com,devops@rhodecode.com
246 246 ; Can be left empty, then emails will be sent to ALL super-admins
247 247 #exception_tracker.send_email_recipients =
248 248
249 249 ; optional prefix to Add to email Subject
250 250 #exception_tracker.email_prefix = [RHODECODE ERROR]
251 251
252 252 ; File store configuration. This is used to store and serve uploaded files
253 253 file_store.enabled = true
254 254
255 255 ; Storage backend, available options are: local
256 256 file_store.backend = local
257 257
258 258 ; path to store the uploaded binaries and artifacts
259 259 file_store.storage_path = /var/opt/rhodecode_data/file_store
260 260
261 261
262 262 ; Redis url to acquire/check generation of archives locks
263 263 archive_cache.locking.url = redis://redis:6379/1
264 264
265 265 ; Storage backend, only 'filesystem' and 'objectstore' are available now
266 266 archive_cache.backend.type = filesystem
267 267
268 268 ; url for s3 compatible storage that allows to upload artifacts
269 269 ; e.g http://minio:9000
270 270 archive_cache.objectstore.url = http://s3-minio:9000
271 271
272 272 ; key for s3 auth
273 273 archive_cache.objectstore.key = key
274 274
275 275 ; secret for s3 auth
276 276 archive_cache.objectstore.secret = secret
277 277
278 278 ; number of sharded buckets to create to distribute archives across
279 279 ; default is 8 shards
280 280 archive_cache.objectstore.bucket_shards = 8
281 281
282 ; a top-level bucket to put all other sharded buckets in
283 ; in case it's empty all buckets will be created in top-level (not recommended)
284 ; objects will be stored in rhodecode-archive-cache/shard-bucket-N based on the bucket_shards number
285 archive_cache.objectstore.bucket_root = rhodecode-archive-cache
282 ; a top-level bucket to put all other shards in
283 ; objects will be stored in rhodecode-archive-cache/shard-N based on the bucket_shards number
284 archive_cache.objectstore.bucket = rhodecode-archive-cache
286 285
287 286 ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time
288 287 archive_cache.objectstore.retry = false
289 288
290 289 ; number of seconds to wait for next try using retry
291 290 archive_cache.objectstore.retry_backoff = 1
292 291
293 292 ; how many tries do do a retry fetch from this backend
294 293 archive_cache.objectstore.retry_attempts = 10
295 294
296 295 ; Default is $cache_dir/archive_cache if not set
297 296 ; Generated repo archives will be cached at this location
298 297 ; and served from the cache during subsequent requests for the same archive of
299 298 ; the repository. This path is important to be shared across filesystems and with
300 299 ; RhodeCode and vcsserver
301 300 archive_cache.filesystem.store_dir = %(here)s/rc-tests/archive_cache
302 301
303 302 ; The limit in GB sets how much data we cache before recycling last used, defaults to 10 gb
304 303 archive_cache.filesystem.cache_size_gb = 2
305 304
306 305 ; Eviction policy used to clear out after cache_size_gb limit is reached
307 306 archive_cache.filesystem.eviction_policy = least-recently-stored
308 307
309 308 ; By default cache uses sharding technique, this specifies how many shards are there
310 309 ; default is 8 shards
311 310 archive_cache.filesystem.cache_shards = 8
312 311
313 312 ; if true, this cache will try to retry with retry_attempts=N times waiting retry_backoff time
314 313 archive_cache.filesystem.retry = false
315 314
316 315 ; number of seconds to wait for next try using retry
317 316 archive_cache.filesystem.retry_backoff = 1
318 317
319 318 ; how many tries do do a retry fetch from this backend
320 319 archive_cache.filesystem.retry_attempts = 10
321 320
322 321
323 322 ; #############
324 323 ; CELERY CONFIG
325 324 ; #############
326 325
327 326 ; manually run celery: /path/to/celery worker --task-events --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini
328 327
329 328 use_celery = false
330 329
331 330 ; path to store schedule database
332 331 #celerybeat-schedule.path =
333 332
334 333 ; connection url to the message broker (default redis)
335 334 celery.broker_url = redis://redis:6379/8
336 335
337 336 ; results backend to get results for (default redis)
338 337 celery.result_backend = redis://redis:6379/8
339 338
340 339 ; rabbitmq example
341 340 #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost
342 341
343 342 ; maximum tasks to execute before worker restart
344 343 celery.max_tasks_per_child = 20
345 344
346 345 ; tasks will never be sent to the queue, but executed locally instead.
347 346 celery.task_always_eager = true
348 347 celery.task_store_eager_result = true
349 348
350 349 ; #############
351 350 ; DOGPILE CACHE
352 351 ; #############
353 352
354 353 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
355 354 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
356 355 cache_dir = %(here)s/rc-test-data
357 356
358 357 ; *********************************************
359 358 ; `sql_cache_short` cache for heavy SQL queries
360 359 ; Only supported backend is `memory_lru`
361 360 ; *********************************************
362 361 rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru
363 362 rc_cache.sql_cache_short.expiration_time = 0
364 363
365 364
366 365 ; *****************************************************
367 366 ; `cache_repo_longterm` cache for repo object instances
368 367 ; Only supported backend is `memory_lru`
369 368 ; *****************************************************
370 369 rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru
371 370 ; by default we use 30 Days, cache is still invalidated on push
372 371 rc_cache.cache_repo_longterm.expiration_time = 2592000
373 372 ; max items in LRU cache, set to smaller number to save memory, and expire last used caches
374 373 rc_cache.cache_repo_longterm.max_size = 10000
375 374
376 375
377 376 ; *********************************************
378 377 ; `cache_general` cache for general purpose use
379 378 ; for simplicity use rc.file_namespace backend,
380 379 ; for performance and scale use rc.redis
381 380 ; *********************************************
382 381 rc_cache.cache_general.backend = dogpile.cache.rc.file_namespace
383 382 rc_cache.cache_general.expiration_time = 43200
384 383 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
385 384 rc_cache.cache_general.arguments.filename = %(here)s/rc-tests/cache-backend/cache_general_db
386 385
387 386 ; alternative `cache_general` redis backend with distributed lock
388 387 #rc_cache.cache_general.backend = dogpile.cache.rc.redis
389 388 #rc_cache.cache_general.expiration_time = 300
390 389
391 390 ; redis_expiration_time needs to be greater then expiration_time
392 391 #rc_cache.cache_general.arguments.redis_expiration_time = 7200
393 392
394 393 #rc_cache.cache_general.arguments.host = localhost
395 394 #rc_cache.cache_general.arguments.port = 6379
396 395 #rc_cache.cache_general.arguments.db = 0
397 396 #rc_cache.cache_general.arguments.socket_timeout = 30
398 397 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
399 398 #rc_cache.cache_general.arguments.distributed_lock = true
400 399
401 400 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
402 401 #rc_cache.cache_general.arguments.lock_auto_renewal = true
403 402
404 403 ; *************************************************
405 404 ; `cache_perms` cache for permission tree, auth TTL
406 405 ; for simplicity use rc.file_namespace backend,
407 406 ; for performance and scale use rc.redis
408 407 ; *************************************************
409 408 rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace
410 409 rc_cache.cache_perms.expiration_time = 0
411 410 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
412 411 rc_cache.cache_perms.arguments.filename = %(here)s/rc-tests/cache-backend/cache_perms_db
413 412
414 413 ; alternative `cache_perms` redis backend with distributed lock
415 414 #rc_cache.cache_perms.backend = dogpile.cache.rc.redis
416 415 #rc_cache.cache_perms.expiration_time = 300
417 416
418 417 ; redis_expiration_time needs to be greater then expiration_time
419 418 #rc_cache.cache_perms.arguments.redis_expiration_time = 7200
420 419
421 420 #rc_cache.cache_perms.arguments.host = localhost
422 421 #rc_cache.cache_perms.arguments.port = 6379
423 422 #rc_cache.cache_perms.arguments.db = 0
424 423 #rc_cache.cache_perms.arguments.socket_timeout = 30
425 424 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
426 425 #rc_cache.cache_perms.arguments.distributed_lock = true
427 426
428 427 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
429 428 #rc_cache.cache_perms.arguments.lock_auto_renewal = true
430 429
431 430 ; ***************************************************
432 431 ; `cache_repo` cache for file tree, Readme, RSS FEEDS
433 432 ; for simplicity use rc.file_namespace backend,
434 433 ; for performance and scale use rc.redis
435 434 ; ***************************************************
436 435 rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace
437 436 rc_cache.cache_repo.expiration_time = 2592000
438 437 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
439 438 rc_cache.cache_repo.arguments.filename = %(here)s/rc-tests/cache-backend/cache_repo_db
440 439
441 440 ; alternative `cache_repo` redis backend with distributed lock
442 441 #rc_cache.cache_repo.backend = dogpile.cache.rc.redis
443 442 #rc_cache.cache_repo.expiration_time = 2592000
444 443
445 444 ; redis_expiration_time needs to be greater then expiration_time
446 445 #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400
447 446
448 447 #rc_cache.cache_repo.arguments.host = localhost
449 448 #rc_cache.cache_repo.arguments.port = 6379
450 449 #rc_cache.cache_repo.arguments.db = 1
451 450 #rc_cache.cache_repo.arguments.socket_timeout = 30
452 451 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
453 452 #rc_cache.cache_repo.arguments.distributed_lock = true
454 453
455 454 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
456 455 #rc_cache.cache_repo.arguments.lock_auto_renewal = true
457 456
458 457 ; ##############
459 458 ; BEAKER SESSION
460 459 ; ##############
461 460
462 461 ; beaker.session.type is type of storage options for the logged users sessions. Current allowed
463 462 ; types are file, ext:redis, ext:database, ext:memcached
464 463 ; Fastest ones are ext:redis and ext:database, DO NOT use memory type for session
465 464 beaker.session.type = file
466 465 beaker.session.data_dir = %(here)s/rc-tests/data/sessions
467 466
468 467 ; Redis based sessions
469 468 #beaker.session.type = ext:redis
470 469 #beaker.session.url = redis://redis:6379/2
471 470
472 471 ; DB based session, fast, and allows easy management over logged in users
473 472 #beaker.session.type = ext:database
474 473 #beaker.session.table_name = db_session
475 474 #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode
476 475 #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode
477 476 #beaker.session.sa.pool_recycle = 3600
478 477 #beaker.session.sa.echo = false
479 478
480 479 beaker.session.key = rhodecode
481 480 beaker.session.secret = test-rc-uytcxaz
482 481 beaker.session.lock_dir = %(here)s/rc-tests/data/sessions/lock
483 482
484 483 ; Secure encrypted cookie. Requires AES and AES python libraries
485 484 ; you must disable beaker.session.secret to use this
486 485 #beaker.session.encrypt_key = key_for_encryption
487 486 #beaker.session.validate_key = validation_key
488 487
489 488 ; Sets session as invalid (also logging out user) if it haven not been
490 489 ; accessed for given amount of time in seconds
491 490 beaker.session.timeout = 2592000
492 491 beaker.session.httponly = true
493 492
494 493 ; Path to use for the cookie. Set to prefix if you use prefix middleware
495 494 #beaker.session.cookie_path = /custom_prefix
496 495
497 496 ; Set https secure cookie
498 497 beaker.session.secure = false
499 498
500 499 ; default cookie expiration time in seconds, set to `true` to set expire
501 500 ; at browser close
502 501 #beaker.session.cookie_expires = 3600
503 502
504 503 ; #############################
505 504 ; SEARCH INDEXING CONFIGURATION
506 505 ; #############################
507 506
508 507 ; Full text search indexer is available in rhodecode-tools under
509 508 ; `rhodecode-tools index` command
510 509
511 510 ; WHOOSH Backend, doesn't require additional services to run
512 511 ; it works good with few dozen repos
513 512 search.module = rhodecode.lib.index.whoosh
514 513 search.location = %(here)s/rc-tests/data/index
515 514
516 515 ; ####################
517 516 ; CHANNELSTREAM CONFIG
518 517 ; ####################
519 518
520 519 ; channelstream enables persistent connections and live notification
521 520 ; in the system. It's also used by the chat system
522 521
523 522 channelstream.enabled = false
524 523
525 524 ; server address for channelstream server on the backend
526 525 channelstream.server = channelstream:9800
527 526
528 527 ; location of the channelstream server from outside world
529 528 ; use ws:// for http or wss:// for https. This address needs to be handled
530 529 ; by external HTTP server such as Nginx or Apache
531 530 ; see Nginx/Apache configuration examples in our docs
532 531 channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream
533 532 channelstream.secret = ENV_GENERATED
534 533 channelstream.history.location = %(here)s/rc-tests/channelstream_history
535 534
536 535 ; Internal application path that Javascript uses to connect into.
537 536 ; If you use proxy-prefix the prefix should be added before /_channelstream
538 537 channelstream.proxy_path = /_channelstream
539 538
540 539
541 540 ; ##############################
542 541 ; MAIN RHODECODE DATABASE CONFIG
543 542 ; ##############################
544 543
545 544 #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
546 545 #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
547 546 #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8
548 547 ; pymysql is an alternative driver for MySQL, use in case of problems with default one
549 548 #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode
550 549
551 550 sqlalchemy.db1.url = sqlite:///%(here)s/rc-tests/rhodecode_test.db?timeout=30
552 551
553 552 ; see sqlalchemy docs for other advanced settings
554 553 ; print the sql statements to output
555 554 sqlalchemy.db1.echo = false
556 555
557 556 ; recycle the connections after this amount of seconds
558 557 sqlalchemy.db1.pool_recycle = 3600
559 558
560 559 ; the number of connections to keep open inside the connection pool.
561 560 ; 0 indicates no limit
562 561 ; the general calculus with gevent is:
563 562 ; if your system allows 500 concurrent greenlets (max_connections) that all do database access,
564 563 ; then increase pool size + max overflow so that they add up to 500.
565 564 #sqlalchemy.db1.pool_size = 5
566 565
567 566 ; The number of connections to allow in connection pool "overflow", that is
568 567 ; connections that can be opened above and beyond the pool_size setting,
569 568 ; which defaults to five.
570 569 #sqlalchemy.db1.max_overflow = 10
571 570
572 571 ; Connection check ping, used to detect broken database connections
573 572 ; could be enabled to better handle cases if MySQL has gone away errors
574 573 #sqlalchemy.db1.ping_connection = true
575 574
576 575 ; ##########
577 576 ; VCS CONFIG
578 577 ; ##########
579 578 vcs.server.enable = true
580 579 vcs.server = vcsserver:10010
581 580
582 581 ; Web server connectivity protocol, responsible for web based VCS operations
583 582 ; Available protocols are:
584 583 ; `http` - use http-rpc backend (default)
585 584 vcs.server.protocol = http
586 585
587 586 ; Push/Pull operations protocol, available options are:
588 587 ; `http` - use http-rpc backend (default)
589 588 vcs.scm_app_implementation = http
590 589
591 590 ; Push/Pull operations hooks protocol, available options are:
592 591 ; `http` - use http-rpc backend (default)
593 592 ; `celery` - use celery based hooks
594 593 vcs.hooks.protocol = http
595 594
596 595 ; Host on which this instance is listening for hooks. vcsserver will call this host to pull/push hooks so it should be
597 596 ; accessible via network.
598 597 ; Use vcs.hooks.host = "*" to bind to current hostname (for Docker)
599 598 vcs.hooks.host = *
600 599
601 600 ; Start VCSServer with this instance as a subprocess, useful for development
602 601 vcs.start_server = false
603 602
604 603 ; List of enabled VCS backends, available options are:
605 604 ; `hg` - mercurial
606 605 ; `git` - git
607 606 ; `svn` - subversion
608 607 vcs.backends = hg, git, svn
609 608
610 609 ; Wait this number of seconds before killing connection to the vcsserver
611 610 vcs.connection_timeout = 3600
612 611
613 612 ; Cache flag to cache vcsserver remote calls locally
614 613 ; It uses cache_region `cache_repo`
615 614 vcs.methods.cache = false
616 615
617 616 ; ####################################################
618 617 ; Subversion proxy support (mod_dav_svn)
619 618 ; Maps RhodeCode repo groups into SVN paths for Apache
620 619 ; ####################################################
621 620
622 621 ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out.
623 622 ; Set a numeric version for your current SVN e.g 1.8, or 1.12
624 623 ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible
625 624 #vcs.svn.compatible_version = 1.8
626 625
627 626 ; Enable SVN proxy of requests over HTTP
628 627 vcs.svn.proxy.enabled = true
629 628
630 629 ; host to connect to running SVN subsystem
631 630 vcs.svn.proxy.host = http://svn:8090
632 631
633 632 ; Enable or disable the config file generation.
634 633 svn.proxy.generate_config = false
635 634
636 635 ; Generate config file with `SVNListParentPath` set to `On`.
637 636 svn.proxy.list_parent_path = true
638 637
639 638 ; Set location and file name of generated config file.
640 639 svn.proxy.config_file_path = %(here)s/rc-tests/mod_dav_svn.conf
641 640
642 641 ; alternative mod_dav config template. This needs to be a valid mako template
643 642 ; Example template can be found in the source code:
644 643 ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako
645 644 #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako
646 645
647 646 ; Used as a prefix to the `Location` block in the generated config file.
648 647 ; In most cases it should be set to `/`.
649 648 svn.proxy.location_root = /
650 649
651 650 ; Command to reload the mod dav svn configuration on change.
652 651 ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh
653 652 ; Make sure user who runs RhodeCode process is allowed to reload Apache
654 653 #svn.proxy.reload_cmd = /etc/init.d/apache2 reload
655 654
656 655 ; If the timeout expires before the reload command finishes, the command will
657 656 ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds.
658 657 #svn.proxy.reload_timeout = 10
659 658
660 659 ; ####################
661 660 ; SSH Support Settings
662 661 ; ####################
663 662
664 663 ; Defines if a custom authorized_keys file should be created and written on
665 664 ; any change user ssh keys. Setting this to false also disables possibility
666 665 ; of adding SSH keys by users from web interface. Super admins can still
667 666 ; manage SSH Keys.
668 667 ssh.generate_authorized_keyfile = true
669 668
670 669 ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding`
671 670 # ssh.authorized_keys_ssh_opts =
672 671
673 672 ; Path to the authorized_keys file where the generate entries are placed.
674 673 ; It is possible to have multiple key files specified in `sshd_config` e.g.
675 674 ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode
676 675 ssh.authorized_keys_file_path = %(here)s/rc-tests/authorized_keys_rhodecode
677 676
678 677 ; Command to execute the SSH wrapper. The binary is available in the
679 678 ; RhodeCode installation directory.
680 679 ; legacy: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper
681 680 ; new rewrite: /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper-v2
682 681 ssh.wrapper_cmd = /usr/local/bin/rhodecode_bin/bin/rc-ssh-wrapper
683 682
684 683 ; Allow shell when executing the ssh-wrapper command
685 684 ssh.wrapper_cmd_allow_shell = false
686 685
687 686 ; Enables logging, and detailed output send back to the client during SSH
688 687 ; operations. Useful for debugging, shouldn't be used in production.
689 688 ssh.enable_debug_logging = true
690 689
691 690 ; Paths to binary executable, by default they are the names, but we can
692 691 ; override them if we want to use a custom one
693 692 ssh.executable.hg = /usr/local/bin/rhodecode_bin/vcs_bin/hg
694 693 ssh.executable.git = /usr/local/bin/rhodecode_bin/vcs_bin/git
695 694 ssh.executable.svn = /usr/local/bin/rhodecode_bin/vcs_bin/svnserve
696 695
697 696 ; Enables SSH key generator web interface. Disabling this still allows users
698 697 ; to add their own keys.
699 698 ssh.enable_ui_key_generator = true
700 699
701 700 ; Statsd client config, this is used to send metrics to statsd
702 701 ; We recommend setting statsd_exported and scrape them using Prometheus
703 702 #statsd.enabled = false
704 703 #statsd.statsd_host = 0.0.0.0
705 704 #statsd.statsd_port = 8125
706 705 #statsd.statsd_prefix =
707 706 #statsd.statsd_ipv6 = false
708 707
709 708 ; configure logging automatically at server startup set to false
710 709 ; to use the below custom logging config.
711 710 ; RC_LOGGING_FORMATTER
712 711 ; RC_LOGGING_LEVEL
713 712 ; env variables can control the settings for logging in case of autoconfigure
714 713
715 714 logging.autoconfigure = false
716 715
717 716 ; specify your own custom logging config file to configure logging
718 717 #logging.logging_conf_file = /path/to/custom_logging.ini
719 718
720 719 ; Dummy marker to add new entries after.
721 720 ; Add any custom entries below. Please don't remove this marker.
722 721 custom.conf = 1
723 722
724 723
725 724 ; #####################
726 725 ; LOGGING CONFIGURATION
727 726 ; #####################
728 727
729 728 [loggers]
730 729 keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper, dogpile
731 730
732 731 [handlers]
733 732 keys = console, console_sql
734 733
735 734 [formatters]
736 735 keys = generic, json, color_formatter, color_formatter_sql
737 736
738 737 ; #######
739 738 ; LOGGERS
740 739 ; #######
741 740 [logger_root]
742 741 level = NOTSET
743 742 handlers = console
744 743
745 744 [logger_routes]
746 745 level = DEBUG
747 746 handlers =
748 747 qualname = routes.middleware
749 748 ## "level = DEBUG" logs the route matched and routing variables.
750 749 propagate = 1
751 750
752 751 [logger_sqlalchemy]
753 752 level = INFO
754 753 handlers = console_sql
755 754 qualname = sqlalchemy.engine
756 755 propagate = 0
757 756
758 757 [logger_beaker]
759 758 level = DEBUG
760 759 handlers =
761 760 qualname = beaker.container
762 761 propagate = 1
763 762
764 763 [logger_dogpile]
765 764 level = INFO
766 765 handlers = console
767 766 qualname = dogpile
768 767 propagate = 1
769 768
770 769 [logger_rhodecode]
771 770 level = DEBUG
772 771 handlers =
773 772 qualname = rhodecode
774 773 propagate = 1
775 774
776 775 [logger_ssh_wrapper]
777 776 level = DEBUG
778 777 handlers =
779 778 qualname = ssh_wrapper
780 779 propagate = 1
781 780
782 781 [logger_celery]
783 782 level = DEBUG
784 783 handlers =
785 784 qualname = celery
786 785
787 786
788 787 ; ########
789 788 ; HANDLERS
790 789 ; ########
791 790
792 791 [handler_console]
793 792 class = StreamHandler
794 793 args = (sys.stderr, )
795 794 level = DEBUG
796 795 ; To enable JSON formatted logs replace 'generic/color_formatter' with 'json'
797 796 ; This allows sending properly formatted logs to grafana loki or elasticsearch
798 797 formatter = generic
799 798
800 799 [handler_console_sql]
801 800 ; "level = DEBUG" logs SQL queries and results.
802 801 ; "level = INFO" logs SQL queries.
803 802 ; "level = WARN" logs neither. (Recommended for production systems.)
804 803 class = StreamHandler
805 804 args = (sys.stderr, )
806 805 level = WARN
807 806 ; To enable JSON formatted logs replace 'generic/color_formatter_sql' with 'json'
808 807 ; This allows sending properly formatted logs to grafana loki or elasticsearch
809 808 formatter = generic
810 809
811 810 ; ##########
812 811 ; FORMATTERS
813 812 ; ##########
814 813
815 814 [formatter_generic]
816 815 class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter
817 816 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
818 817 datefmt = %Y-%m-%d %H:%M:%S
819 818
820 819 [formatter_color_formatter]
821 820 class = rhodecode.lib.logging_formatter.ColorFormatter
822 821 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
823 822 datefmt = %Y-%m-%d %H:%M:%S
824 823
825 824 [formatter_color_formatter_sql]
826 825 class = rhodecode.lib.logging_formatter.ColorFormatterSql
827 826 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
828 827 datefmt = %Y-%m-%d %H:%M:%S
829 828
830 829 [formatter_json]
831 830 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
832 831 class = rhodecode.lib._vendor.jsonlogger.JsonFormatter
General Comments 0
You need to be logged in to leave comments. Login now