##// END OF EJS Templates
cache: allow controlling lock_auto_renewal via .ini config
super-admin -
r4719:b8138785 default
parent child Browse files
Show More
@@ -1,796 +1,800 b''
1 1 ## -*- coding: utf-8 -*-
2 2
3 3 ; #########################################
4 4 ; RHODECODE COMMUNITY EDITION CONFIGURATION
5 5 ; #########################################
6 6
7 7 [DEFAULT]
8 8 ; Debug flag sets all loggers to debug, and enables request tracking
9 9 debug = false
10 10
11 11 ; ########################################################################
12 12 ; EMAIL CONFIGURATION
13 13 ; These settings will be used by the RhodeCode mailing system
14 14 ; ########################################################################
15 15
16 16 ; prefix all emails subjects with given prefix, helps filtering out emails
17 17 #email_prefix = [RhodeCode]
18 18
19 19 ; email FROM address all mails will be sent
20 20 #app_email_from = rhodecode-noreply@localhost
21 21
22 22 #smtp_server = mail.server.com
23 23 #smtp_username =
24 24 #smtp_password =
25 25 #smtp_port =
26 26 #smtp_use_tls = false
27 27 #smtp_use_ssl = true
28 28
29 29 [server:main]
30 30 ; COMMON HOST/IP CONFIG
31 31 host = 127.0.0.1
32 32 port = 5000
33 33
34 34
35 35 ; ###########################
36 36 ; GUNICORN APPLICATION SERVER
37 37 ; ###########################
38 38
39 39 ; run with gunicorn --log-config rhodecode.ini --paste rhodecode.ini
40 40
41 41 ; Module to use, this setting shouldn't be changed
42 42 use = egg:gunicorn#main
43 43
44 44 ; Sets the number of process workers. More workers means more concurrent connections
45 45 ; RhodeCode can handle at the same time. Each additional worker also it increases
46 46 ; memory usage as each has it's own set of caches.
47 47 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
48 48 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
49 49 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
50 50 ; when using more than 1 worker.
51 51 workers = 2
52 52
53 53 ; Gunicorn access log level
54 54 loglevel = info
55 55
56 56 ; Process name visible in process list
57 57 proc_name = rhodecode
58 58
59 59 ; Type of worker class, one of `sync`, `gevent`
60 60 ; Recommended type is `gevent`
61 61 worker_class = gevent
62 62
63 63 ; The maximum number of simultaneous clients per worker. Valid only for gevent
64 64 worker_connections = 10
65 65
66 66 ; Max number of requests that worker will handle before being gracefully restarted.
67 67 ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
68 68 max_requests = 1000
69 69 max_requests_jitter = 30
70 70
71 71 ; Amount of time a worker can spend with handling a request before it
72 72 ; gets killed and restarted. By default set to 21600 (6hrs)
73 73 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
74 74 timeout = 21600
75 75
76 76 ; The maximum size of HTTP request line in bytes.
77 77 ; 0 for unlimited
78 78 limit_request_line = 0
79 79
80 80 ; Limit the number of HTTP headers fields in a request.
81 81 ; By default this value is 100 and can't be larger than 32768.
82 82 limit_request_fields = 32768
83 83
84 84 ; Limit the allowed size of an HTTP request header field.
85 85 ; Value is a positive number or 0.
86 86 ; Setting it to 0 will allow unlimited header field sizes.
87 87 limit_request_field_size = 0
88 88
89 89 ; Timeout for graceful workers restart.
90 90 ; After receiving a restart signal, workers have this much time to finish
91 91 ; serving requests. Workers still alive after the timeout (starting from the
92 92 ; receipt of the restart signal) are force killed.
93 93 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
94 94 graceful_timeout = 3600
95 95
96 96 # The number of seconds to wait for requests on a Keep-Alive connection.
97 97 # Generally set in the 1-5 seconds range.
98 98 keepalive = 2
99 99
100 100 ; Maximum memory usage that each worker can use before it will receive a
101 101 ; graceful restart signal 0 = memory monitoring is disabled
102 102 ; Examples: 268435456 (256MB), 536870912 (512MB)
103 103 ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
104 104 memory_max_usage = 0
105 105
106 106 ; How often in seconds to check for memory usage for each gunicorn worker
107 107 memory_usage_check_interval = 60
108 108
109 109 ; Threshold value for which we don't recycle worker if GarbageCollection
110 110 ; frees up enough resources. Before each restart we try to run GC on worker
111 111 ; in case we get enough free memory after that, restart will not happen.
112 112 memory_usage_recovery_threshold = 0.8
113 113
114 114
115 115 ; Prefix middleware for RhodeCode.
116 116 ; recommended when using proxy setup.
117 117 ; allows to set RhodeCode under a prefix in server.
118 118 ; eg https://server.com/custom_prefix. Enable `filter-with =` option below as well.
119 119 ; And set your prefix like: `prefix = /custom_prefix`
120 120 ; be sure to also set beaker.session.cookie_path = /custom_prefix if you need
121 121 ; to make your cookies only work on prefix url
122 122 [filter:proxy-prefix]
123 123 use = egg:PasteDeploy#prefix
124 124 prefix = /
125 125
126 126 [app:main]
127 127 ; The %(here)s variable will be replaced with the absolute path of parent directory
128 128 ; of this file
129 129 ; In addition ENVIRONMENT variables usage is possible, e.g
130 130 ; sqlalchemy.db1.url = {ENV_RC_DB_URL}
131 131
132 132 use = egg:rhodecode-enterprise-ce
133 133
134 134 ; enable proxy prefix middleware, defined above
135 135 #filter-with = proxy-prefix
136 136
137 137 ; encryption key used to encrypt social plugin tokens,
138 138 ; remote_urls with credentials etc, if not set it defaults to
139 139 ; `beaker.session.secret`
140 140 #rhodecode.encrypted_values.secret =
141 141
142 142 ; decryption strict mode (enabled by default). It controls if decryption raises
143 143 ; `SignatureVerificationError` in case of wrong key, or damaged encryption data.
144 144 #rhodecode.encrypted_values.strict = false
145 145
146 146 ; Pick algorithm for encryption. Either fernet (more secure) or aes (default)
147 147 ; fernet is safer, and we strongly recommend switching to it.
148 148 ; Due to backward compatibility aes is used as default.
149 149 #rhodecode.encrypted_values.algorithm = fernet
150 150
151 151 ; Return gzipped responses from RhodeCode (static files/application)
152 152 gzip_responses = false
153 153
154 154 ; Auto-generate javascript routes file on startup
155 155 generate_js_files = false
156 156
157 157 ; System global default language.
158 158 ; All available languages: en (default), be, de, es, fr, it, ja, pl, pt, ru, zh
159 159 lang = en
160 160
161 161 ; Perform a full repository scan and import on each server start.
162 162 ; Settings this to true could lead to very long startup time.
163 163 startup.import_repos = false
164 164
165 165 ; Uncomment and set this path to use archive download cache.
166 166 ; Once enabled, generated archives will be cached at this location
167 167 ; and served from the cache during subsequent requests for the same archive of
168 168 ; the repository.
169 169 #archive_cache_dir = /tmp/tarballcache
170 170
171 171 ; URL at which the application is running. This is used for Bootstrapping
172 172 ; requests in context when no web request is available. Used in ishell, or
173 173 ; SSH calls. Set this for events to receive proper url for SSH calls.
174 174 app.base_url = http://rhodecode.local
175 175
176 176 ; Unique application ID. Should be a random unique string for security.
177 177 app_instance_uuid = rc-production
178 178
179 179 ; Cut off limit for large diffs (size in bytes). If overall diff size on
180 180 ; commit, or pull request exceeds this limit this diff will be displayed
181 181 ; partially. E.g 512000 == 512Kb
182 182 cut_off_limit_diff = 512000
183 183
184 184 ; Cut off limit for large files inside diffs (size in bytes). Each individual
185 185 ; file inside diff which exceeds this limit will be displayed partially.
186 186 ; E.g 128000 == 128Kb
187 187 cut_off_limit_file = 128000
188 188
189 189 ; Use cached version of vcs repositories everywhere. Recommended to be `true`
190 190 vcs_full_cache = true
191 191
192 192 ; Force https in RhodeCode, fixes https redirects, assumes it's always https.
193 193 ; Normally this is controlled by proper flags sent from http server such as Nginx or Apache
194 194 force_https = false
195 195
196 196 ; use Strict-Transport-Security headers
197 197 use_htsts = false
198 198
199 199 ; Set to true if your repos are exposed using the dumb protocol
200 200 git_update_server_info = false
201 201
202 202 ; RSS/ATOM feed options
203 203 rss_cut_off_limit = 256000
204 204 rss_items_per_page = 10
205 205 rss_include_diff = false
206 206
207 207 ; gist URL alias, used to create nicer urls for gist. This should be an
208 208 ; url that does rewrites to _admin/gists/{gistid}.
209 209 ; example: http://gist.rhodecode.org/{gistid}. Empty means use the internal
210 210 ; RhodeCode url, ie. http[s]://rhodecode.server/_admin/gists/{gistid}
211 211 gist_alias_url =
212 212
213 213 ; List of views (using glob pattern syntax) that AUTH TOKENS could be
214 214 ; used for access.
215 215 ; Adding ?auth_token=TOKEN_HASH to the url authenticates this request as if it
216 216 ; came from the the logged in user who own this authentication token.
217 217 ; Additionally @TOKEN syntax can be used to bound the view to specific
218 218 ; authentication token. Such view would be only accessible when used together
219 219 ; with this authentication token
220 220 ; list of all views can be found under `/_admin/permissions/auth_token_access`
221 221 ; The list should be "," separated and on a single line.
222 222 ; Most common views to enable:
223 223
224 224 # RepoCommitsView:repo_commit_download
225 225 # RepoCommitsView:repo_commit_patch
226 226 # RepoCommitsView:repo_commit_raw
227 227 # RepoCommitsView:repo_commit_raw@TOKEN
228 228 # RepoFilesView:repo_files_diff
229 229 # RepoFilesView:repo_archivefile
230 230 # RepoFilesView:repo_file_raw
231 231 # GistView:*
232 232 api_access_controllers_whitelist =
233 233
234 234 ; Default encoding used to convert from and to unicode
235 235 ; can be also a comma separated list of encoding in case of mixed encodings
236 236 default_encoding = UTF-8
237 237
238 238 ; instance-id prefix
239 239 ; a prefix key for this instance used for cache invalidation when running
240 240 ; multiple instances of RhodeCode, make sure it's globally unique for
241 241 ; all running RhodeCode instances. Leave empty if you don't use it
242 242 instance_id =
243 243
244 244 ; Fallback authentication plugin. Set this to a plugin ID to force the usage
245 245 ; of an authentication plugin also if it is disabled by it's settings.
246 246 ; This could be useful if you are unable to log in to the system due to broken
247 247 ; authentication settings. Then you can enable e.g. the internal RhodeCode auth
248 248 ; module to log in again and fix the settings.
249 249 ; Available builtin plugin IDs (hash is part of the ID):
250 250 ; egg:rhodecode-enterprise-ce#rhodecode
251 251 ; egg:rhodecode-enterprise-ce#pam
252 252 ; egg:rhodecode-enterprise-ce#ldap
253 253 ; egg:rhodecode-enterprise-ce#jasig_cas
254 254 ; egg:rhodecode-enterprise-ce#headers
255 255 ; egg:rhodecode-enterprise-ce#crowd
256 256
257 257 #rhodecode.auth_plugin_fallback = egg:rhodecode-enterprise-ce#rhodecode
258 258
259 259 ; Flag to control loading of legacy plugins in py:/path format
260 260 auth_plugin.import_legacy_plugins = true
261 261
262 262 ; alternative return HTTP header for failed authentication. Default HTTP
263 263 ; response is 401 HTTPUnauthorized. Currently HG clients have troubles with
264 264 ; handling that causing a series of failed authentication calls.
265 265 ; Set this variable to 403 to return HTTPForbidden, or any other HTTP code
266 266 ; This will be served instead of default 401 on bad authentication
267 267 auth_ret_code =
268 268
269 269 ; use special detection method when serving auth_ret_code, instead of serving
270 270 ; ret_code directly, use 401 initially (Which triggers credentials prompt)
271 271 ; and then serve auth_ret_code to clients
272 272 auth_ret_code_detection = false
273 273
274 274 ; locking return code. When repository is locked return this HTTP code. 2XX
275 275 ; codes don't break the transactions while 4XX codes do
276 276 lock_ret_code = 423
277 277
278 278 ; allows to change the repository location in settings page
279 279 allow_repo_location_change = true
280 280
281 281 ; allows to setup custom hooks in settings page
282 282 allow_custom_hooks_settings = true
283 283
284 284 ; Generated license token required for EE edition license.
285 285 ; New generated token value can be found in Admin > settings > license page.
286 286 license_token =
287 287
288 288 ; This flag hides sensitive information on the license page such as token, and license data
289 289 license.hide_license_info = false
290 290
291 291 ; supervisor connection uri, for managing supervisor and logs.
292 292 supervisor.uri =
293 293
294 294 ; supervisord group name/id we only want this RC instance to handle
295 295 supervisor.group_id = prod
296 296
297 297 ; Display extended labs settings
298 298 labs_settings_active = true
299 299
300 300 ; Custom exception store path, defaults to TMPDIR
301 301 ; This is used to store exception from RhodeCode in shared directory
302 302 #exception_tracker.store_path =
303 303
304 304 ; Send email with exception details when it happens
305 305 #exception_tracker.send_email = false
306 306
307 307 ; Comma separated list of recipients for exception emails,
308 308 ; e.g admin@rhodecode.com,devops@rhodecode.com
309 309 ; Can be left empty, then emails will be sent to ALL super-admins
310 310 #exception_tracker.send_email_recipients =
311 311
312 312 ; optional prefix to Add to email Subject
313 313 #exception_tracker.email_prefix = [RHODECODE ERROR]
314 314
315 315 ; File store configuration. This is used to store and serve uploaded files
316 316 file_store.enabled = true
317 317
318 318 ; Storage backend, available options are: local
319 319 file_store.backend = local
320 320
321 321 ; path to store the uploaded binaries
322 322 file_store.storage_path = %(here)s/data/file_store
323 323
324 324
325 325 ; #############
326 326 ; CELERY CONFIG
327 327 ; #############
328 328
329 329 ; manually run celery: /path/to/celery worker -E --beat --app rhodecode.lib.celerylib.loader --scheduler rhodecode.lib.celerylib.scheduler.RcScheduler --loglevel DEBUG --ini /path/to/rhodecode.ini
330 330
331 331 use_celery = false
332 332
333 333 ; connection url to the message broker (default redis)
334 334 celery.broker_url = redis://localhost:6379/8
335 335
336 336 ; rabbitmq example
337 337 #celery.broker_url = amqp://rabbitmq:qweqwe@localhost:5672/rabbitmqhost
338 338
339 339 ; maximum tasks to execute before worker restart
340 340 celery.max_tasks_per_child = 100
341 341
342 342 ; tasks will never be sent to the queue, but executed locally instead.
343 343 celery.task_always_eager = false
344 344
345 345 ; #############
346 346 ; DOGPILE CACHE
347 347 ; #############
348 348
349 349 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
350 350 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
351 351 cache_dir = %(here)s/data
352 352
353 353 ; *********************************************
354 354 ; `sql_cache_short` cache for heavy SQL queries
355 355 ; Only supported backend is `memory_lru`
356 356 ; *********************************************
357 357 rc_cache.sql_cache_short.backend = dogpile.cache.rc.memory_lru
358 358 rc_cache.sql_cache_short.expiration_time = 30
359 359
360 360
361 361 ; *****************************************************
362 362 ; `cache_repo_longterm` cache for repo object instances
363 363 ; Only supported backend is `memory_lru`
364 364 ; *****************************************************
365 365 rc_cache.cache_repo_longterm.backend = dogpile.cache.rc.memory_lru
366 366 ; by default we use 30 Days, cache is still invalidated on push
367 367 rc_cache.cache_repo_longterm.expiration_time = 2592000
368 368 ; max items in LRU cache, set to smaller number to save memory, and expire last used caches
369 369 rc_cache.cache_repo_longterm.max_size = 10000
370 370
371 371
372 372 ; *************************************************
373 373 ; `cache_perms` cache for permission tree, auth TTL
374 374 ; *************************************************
375 375 rc_cache.cache_perms.backend = dogpile.cache.rc.file_namespace
376 376 rc_cache.cache_perms.expiration_time = 300
377 377 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
378 378 #rc_cache.cache_perms.arguments.filename = /tmp/cache_perms.db
379 379
380 380 ; alternative `cache_perms` redis backend with distributed lock
381 381 #rc_cache.cache_perms.backend = dogpile.cache.rc.redis
382 382 #rc_cache.cache_perms.expiration_time = 300
383 383
384 384 ; redis_expiration_time needs to be greater then expiration_time
385 385 #rc_cache.cache_perms.arguments.redis_expiration_time = 7200
386 386
387 387 #rc_cache.cache_perms.arguments.host = localhost
388 388 #rc_cache.cache_perms.arguments.port = 6379
389 389 #rc_cache.cache_perms.arguments.db = 0
390 390 #rc_cache.cache_perms.arguments.socket_timeout = 30
391 391 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
392 392 #rc_cache.cache_perms.arguments.distributed_lock = true
393 393
394 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
395 #rc_cache.cache_perms.arguments.lock_auto_renewal = true
394 396
395 397 ; ***************************************************
396 398 ; `cache_repo` cache for file tree, Readme, RSS FEEDS
397 399 ; ***************************************************
398 400 rc_cache.cache_repo.backend = dogpile.cache.rc.file_namespace
399 401 rc_cache.cache_repo.expiration_time = 2592000
400 402 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
401 403 #rc_cache.cache_repo.arguments.filename = /tmp/cache_repo.db
402 404
403 405 ; alternative `cache_repo` redis backend with distributed lock
404 406 #rc_cache.cache_repo.backend = dogpile.cache.rc.redis
405 407 #rc_cache.cache_repo.expiration_time = 2592000
406 408
407 409 ; redis_expiration_time needs to be greater then expiration_time
408 410 #rc_cache.cache_repo.arguments.redis_expiration_time = 2678400
409 411
410 412 #rc_cache.cache_repo.arguments.host = localhost
411 413 #rc_cache.cache_repo.arguments.port = 6379
412 414 #rc_cache.cache_repo.arguments.db = 1
413 415 #rc_cache.cache_repo.arguments.socket_timeout = 30
414 416 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
415 417 #rc_cache.cache_repo.arguments.distributed_lock = true
416 418
419 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
420 #rc_cache.cache_repo.arguments.lock_auto_renewal = true
417 421
418 422 ; ##############
419 423 ; BEAKER SESSION
420 424 ; ##############
421 425
422 426 ; beaker.session.type is type of storage options for the logged users sessions. Current allowed
423 427 ; types are file, ext:redis, ext:database, ext:memcached, and memory (default if not specified).
424 428 ; Fastest ones are Redis and ext:database
425 429 beaker.session.type = file
426 430 beaker.session.data_dir = %(here)s/data/sessions
427 431
428 432 ; Redis based sessions
429 433 #beaker.session.type = ext:redis
430 434 #beaker.session.url = redis://127.0.0.1:6379/2
431 435
432 436 ; DB based session, fast, and allows easy management over logged in users
433 437 #beaker.session.type = ext:database
434 438 #beaker.session.table_name = db_session
435 439 #beaker.session.sa.url = postgresql://postgres:secret@localhost/rhodecode
436 440 #beaker.session.sa.url = mysql://root:secret@127.0.0.1/rhodecode
437 441 #beaker.session.sa.pool_recycle = 3600
438 442 #beaker.session.sa.echo = false
439 443
440 444 beaker.session.key = rhodecode
441 445 beaker.session.secret = production-rc-uytcxaz
442 446 beaker.session.lock_dir = %(here)s/data/sessions/lock
443 447
444 448 ; Secure encrypted cookie. Requires AES and AES python libraries
445 449 ; you must disable beaker.session.secret to use this
446 450 #beaker.session.encrypt_key = key_for_encryption
447 451 #beaker.session.validate_key = validation_key
448 452
449 453 ; Sets session as invalid (also logging out user) if it haven not been
450 454 ; accessed for given amount of time in seconds
451 455 beaker.session.timeout = 2592000
452 456 beaker.session.httponly = true
453 457
454 458 ; Path to use for the cookie. Set to prefix if you use prefix middleware
455 459 #beaker.session.cookie_path = /custom_prefix
456 460
457 461 ; Set https secure cookie
458 462 beaker.session.secure = false
459 463
460 464 ; default cookie expiration time in seconds, set to `true` to set expire
461 465 ; at browser close
462 466 #beaker.session.cookie_expires = 3600
463 467
464 468 ; #############################
465 469 ; SEARCH INDEXING CONFIGURATION
466 470 ; #############################
467 471
468 472 ; Full text search indexer is available in rhodecode-tools under
469 473 ; `rhodecode-tools index` command
470 474
471 475 ; WHOOSH Backend, doesn't require additional services to run
472 476 ; it works good with few dozen repos
473 477 search.module = rhodecode.lib.index.whoosh
474 478 search.location = %(here)s/data/index
475 479
476 480 ; ####################
477 481 ; CHANNELSTREAM CONFIG
478 482 ; ####################
479 483
480 484 ; channelstream enables persistent connections and live notification
481 485 ; in the system. It's also used by the chat system
482 486
483 487 channelstream.enabled = false
484 488
485 489 ; server address for channelstream server on the backend
486 490 channelstream.server = 127.0.0.1:9800
487 491
488 492 ; location of the channelstream server from outside world
489 493 ; use ws:// for http or wss:// for https. This address needs to be handled
490 494 ; by external HTTP server such as Nginx or Apache
491 495 ; see Nginx/Apache configuration examples in our docs
492 496 channelstream.ws_url = ws://rhodecode.yourserver.com/_channelstream
493 497 channelstream.secret = secret
494 498 channelstream.history.location = %(here)s/channelstream_history
495 499
496 500 ; Internal application path that Javascript uses to connect into.
497 501 ; If you use proxy-prefix the prefix should be added before /_channelstream
498 502 channelstream.proxy_path = /_channelstream
499 503
500 504
501 505 ; ##############################
502 506 ; MAIN RHODECODE DATABASE CONFIG
503 507 ; ##############################
504 508
505 509 #sqlalchemy.db1.url = sqlite:///%(here)s/rhodecode.db?timeout=30
506 510 #sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
507 511 #sqlalchemy.db1.url = mysql://root:qweqwe@localhost/rhodecode?charset=utf8
508 512 ; pymysql is an alternative driver for MySQL, use in case of problems with default one
509 513 #sqlalchemy.db1.url = mysql+pymysql://root:qweqwe@localhost/rhodecode
510 514
511 515 sqlalchemy.db1.url = postgresql://postgres:qweqwe@localhost/rhodecode
512 516
513 517 ; see sqlalchemy docs for other advanced settings
514 518 ; print the sql statements to output
515 519 sqlalchemy.db1.echo = false
516 520
517 521 ; recycle the connections after this amount of seconds
518 522 sqlalchemy.db1.pool_recycle = 3600
519 523 sqlalchemy.db1.convert_unicode = true
520 524
521 525 ; the number of connections to keep open inside the connection pool.
522 526 ; 0 indicates no limit
523 527 #sqlalchemy.db1.pool_size = 5
524 528
525 529 ; The number of connections to allow in connection pool "overflow", that is
526 530 ; connections that can be opened above and beyond the pool_size setting,
527 531 ; which defaults to five.
528 532 #sqlalchemy.db1.max_overflow = 10
529 533
530 534 ; Connection check ping, used to detect broken database connections
531 535 ; could be enabled to better handle cases if MySQL has gone away errors
532 536 #sqlalchemy.db1.ping_connection = true
533 537
534 538 ; ##########
535 539 ; VCS CONFIG
536 540 ; ##########
537 541 vcs.server.enable = true
538 542 vcs.server = localhost:9900
539 543
540 544 ; Web server connectivity protocol, responsible for web based VCS operations
541 545 ; Available protocols are:
542 546 ; `http` - use http-rpc backend (default)
543 547 vcs.server.protocol = http
544 548
545 549 ; Push/Pull operations protocol, available options are:
546 550 ; `http` - use http-rpc backend (default)
547 551 vcs.scm_app_implementation = http
548 552
549 553 ; Push/Pull operations hooks protocol, available options are:
550 554 ; `http` - use http-rpc backend (default)
551 555 vcs.hooks.protocol = http
552 556
553 557 ; Host on which this instance is listening for hooks. If vcsserver is in other location
554 558 ; this should be adjusted.
555 559 vcs.hooks.host = 127.0.0.1
556 560
557 561 ; Start VCSServer with this instance as a subprocess, useful for development
558 562 vcs.start_server = false
559 563
560 564 ; List of enabled VCS backends, available options are:
561 565 ; `hg` - mercurial
562 566 ; `git` - git
563 567 ; `svn` - subversion
564 568 vcs.backends = hg, git, svn
565 569
566 570 ; Wait this number of seconds before killing connection to the vcsserver
567 571 vcs.connection_timeout = 3600
568 572
569 573 ; Compatibility version when creating SVN repositories. Defaults to newest version when commented out.
570 574 ; Set a numeric version for your current SVN e.g 1.8, or 1.12
571 575 ; Legacy available options are: pre-1.4-compatible, pre-1.5-compatible, pre-1.6-compatible, pre-1.8-compatible, pre-1.9-compatible
572 576 #vcs.svn.compatible_version = 1.8
573 577
574 578
575 579 ; ####################################################
576 580 ; Subversion proxy support (mod_dav_svn)
577 581 ; Maps RhodeCode repo groups into SVN paths for Apache
578 582 ; ####################################################
579 583
580 584 ; Enable or disable the config file generation.
581 585 svn.proxy.generate_config = false
582 586
583 587 ; Generate config file with `SVNListParentPath` set to `On`.
584 588 svn.proxy.list_parent_path = true
585 589
586 590 ; Set location and file name of generated config file.
587 591 svn.proxy.config_file_path = %(here)s/mod_dav_svn.conf
588 592
589 593 ; alternative mod_dav config template. This needs to be a valid mako template
590 594 ; Example template can be found in the source code:
591 595 ; rhodecode/apps/svn_support/templates/mod-dav-svn.conf.mako
592 596 #svn.proxy.config_template = ~/.rccontrol/enterprise-1/custom_svn_conf.mako
593 597
594 598 ; Used as a prefix to the `Location` block in the generated config file.
595 599 ; In most cases it should be set to `/`.
596 600 svn.proxy.location_root = /
597 601
598 602 ; Command to reload the mod dav svn configuration on change.
599 603 ; Example: `/etc/init.d/apache2 reload` or /home/USER/apache_reload.sh
600 604 ; Make sure user who runs RhodeCode process is allowed to reload Apache
601 605 #svn.proxy.reload_cmd = /etc/init.d/apache2 reload
602 606
603 607 ; If the timeout expires before the reload command finishes, the command will
604 608 ; be killed. Setting it to zero means no timeout. Defaults to 10 seconds.
605 609 #svn.proxy.reload_timeout = 10
606 610
607 611 ; ####################
608 612 ; SSH Support Settings
609 613 ; ####################
610 614
611 615 ; Defines if a custom authorized_keys file should be created and written on
612 616 ; any change user ssh keys. Setting this to false also disables possibility
613 617 ; of adding SSH keys by users from web interface. Super admins can still
614 618 ; manage SSH Keys.
615 619 ssh.generate_authorized_keyfile = false
616 620
617 621 ; Options for ssh, default is `no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding`
618 622 # ssh.authorized_keys_ssh_opts =
619 623
620 624 ; Path to the authorized_keys file where the generate entries are placed.
621 625 ; It is possible to have multiple key files specified in `sshd_config` e.g.
622 626 ; AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys_rhodecode
623 627 ssh.authorized_keys_file_path = ~/.ssh/authorized_keys_rhodecode
624 628
625 629 ; Command to execute the SSH wrapper. The binary is available in the
626 630 ; RhodeCode installation directory.
627 631 ; e.g ~/.rccontrol/community-1/profile/bin/rc-ssh-wrapper
628 632 ssh.wrapper_cmd = ~/.rccontrol/community-1/rc-ssh-wrapper
629 633
630 634 ; Allow shell when executing the ssh-wrapper command
631 635 ssh.wrapper_cmd_allow_shell = false
632 636
633 637 ; Enables logging, and detailed output send back to the client during SSH
634 638 ; operations. Useful for debugging, shouldn't be used in production.
635 639 ssh.enable_debug_logging = false
636 640
637 641 ; Paths to binary executable, by default they are the names, but we can
638 642 ; override them if we want to use a custom one
639 643 ssh.executable.hg = ~/.rccontrol/vcsserver-1/profile/bin/hg
640 644 ssh.executable.git = ~/.rccontrol/vcsserver-1/profile/bin/git
641 645 ssh.executable.svn = ~/.rccontrol/vcsserver-1/profile/bin/svnserve
642 646
643 647 ; Enables SSH key generator web interface. Disabling this still allows users
644 648 ; to add their own keys.
645 649 ssh.enable_ui_key_generator = true
646 650
647 651
648 652 ; #################
649 653 ; APPENLIGHT CONFIG
650 654 ; #################
651 655
652 656 ; Appenlight is tailored to work with RhodeCode, see
653 657 ; http://appenlight.rhodecode.com for details how to obtain an account
654 658
655 659 ; Appenlight integration enabled
656 660 appenlight = false
657 661
658 662 appenlight.server_url = https://api.appenlight.com
659 663 appenlight.api_key = YOUR_API_KEY
660 664 #appenlight.transport_config = https://api.appenlight.com?threaded=1&timeout=5
661 665
662 666 ; used for JS client
663 667 appenlight.api_public_key = YOUR_API_PUBLIC_KEY
664 668
665 669 ; TWEAK AMOUNT OF INFO SENT HERE
666 670
667 671 ; enables 404 error logging (default False)
668 672 appenlight.report_404 = false
669 673
670 674 ; time in seconds after request is considered being slow (default 1)
671 675 appenlight.slow_request_time = 1
672 676
673 677 ; record slow requests in application
674 678 ; (needs to be enabled for slow datastore recording and time tracking)
675 679 appenlight.slow_requests = true
676 680
677 681 ; enable hooking to application loggers
678 682 appenlight.logging = true
679 683
680 684 ; minimum log level for log capture
681 685 appenlight.logging.level = WARNING
682 686
683 687 ; send logs only from erroneous/slow requests
684 688 ; (saves API quota for intensive logging)
685 689 appenlight.logging_on_error = false
686 690
687 691 ; list of additional keywords that should be grabbed from environ object
688 692 ; can be string with comma separated list of words in lowercase
689 693 ; (by default client will always send following info:
690 694 ; 'REMOTE_USER', 'REMOTE_ADDR', 'SERVER_NAME', 'CONTENT_TYPE' + all keys that
691 695 ; start with HTTP* this list be extended with additional keywords here
692 696 appenlight.environ_keys_whitelist =
693 697
694 698 ; list of keywords that should be blanked from request object
695 699 ; can be string with comma separated list of words in lowercase
696 700 ; (by default client will always blank keys that contain following words
697 701 ; 'password', 'passwd', 'pwd', 'auth_tkt', 'secret', 'csrf'
698 702 ; this list be extended with additional keywords set here
699 703 appenlight.request_keys_blacklist =
700 704
701 705 ; list of namespaces that should be ignores when gathering log entries
702 706 ; can be string with comma separated list of namespaces
703 707 ; (by default the client ignores own entries: appenlight_client.client)
704 708 appenlight.log_namespace_blacklist =
705 709
706 710 ; Dummy marker to add new entries after.
707 711 ; Add any custom entries below. Please don't remove this marker.
708 712 custom.conf = 1
709 713
710 714
711 715 ; #####################
712 716 ; LOGGING CONFIGURATION
713 717 ; #####################
714 718 [loggers]
715 719 keys = root, sqlalchemy, beaker, celery, rhodecode, ssh_wrapper
716 720
717 721 [handlers]
718 722 keys = console, console_sql
719 723
720 724 [formatters]
721 725 keys = generic, color_formatter, color_formatter_sql
722 726
723 727 ; #######
724 728 ; LOGGERS
725 729 ; #######
726 730 [logger_root]
727 731 level = NOTSET
728 732 handlers = console
729 733
730 734 [logger_sqlalchemy]
731 735 level = INFO
732 736 handlers = console_sql
733 737 qualname = sqlalchemy.engine
734 738 propagate = 0
735 739
736 740 [logger_beaker]
737 741 level = DEBUG
738 742 handlers =
739 743 qualname = beaker.container
740 744 propagate = 1
741 745
742 746 [logger_rhodecode]
743 747 level = DEBUG
744 748 handlers =
745 749 qualname = rhodecode
746 750 propagate = 1
747 751
748 752 [logger_ssh_wrapper]
749 753 level = DEBUG
750 754 handlers =
751 755 qualname = ssh_wrapper
752 756 propagate = 1
753 757
754 758 [logger_celery]
755 759 level = DEBUG
756 760 handlers =
757 761 qualname = celery
758 762
759 763
760 764 ; ########
761 765 ; HANDLERS
762 766 ; ########
763 767
764 768 [handler_console]
765 769 class = StreamHandler
766 770 args = (sys.stderr, )
767 771 level = INFO
768 772 formatter = generic
769 773
770 774 [handler_console_sql]
771 775 ; "level = DEBUG" logs SQL queries and results.
772 776 ; "level = INFO" logs SQL queries.
773 777 ; "level = WARN" logs neither. (Recommended for production systems.)
774 778 class = StreamHandler
775 779 args = (sys.stderr, )
776 780 level = WARN
777 781 formatter = generic
778 782
779 783 ; ##########
780 784 ; FORMATTERS
781 785 ; ##########
782 786
783 787 [formatter_generic]
784 788 class = rhodecode.lib.logging_formatter.ExceptionAwareFormatter
785 789 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
786 790 datefmt = %Y-%m-%d %H:%M:%S
787 791
788 792 [formatter_color_formatter]
789 793 class = rhodecode.lib.logging_formatter.ColorFormatter
790 794 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
791 795 datefmt = %Y-%m-%d %H:%M:%S
792 796
793 797 [formatter_color_formatter_sql]
794 798 class = rhodecode.lib.logging_formatter.ColorFormatterSql
795 799 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
796 800 datefmt = %Y-%m-%d %H:%M:%S
@@ -1,342 +1,346 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2015-2020 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 import time
22 22 import errno
23 23 import logging
24 24
25 25 import msgpack
26 26 import gevent
27 27 import redis
28 28
29 29 from dogpile.cache.api import CachedValue
30 30 from dogpile.cache.backends import memory as memory_backend
31 31 from dogpile.cache.backends import file as file_backend
32 32 from dogpile.cache.backends import redis as redis_backend
33 33 from dogpile.cache.backends.file import NO_VALUE, compat, FileLock
34 34 from dogpile.cache.util import memoized_property
35 35
36 36 from rhodecode.lib.memory_lru_dict import LRUDict, LRUDictDebug
37 37
38 38
39 39 _default_max_size = 1024
40 40
41 41 log = logging.getLogger(__name__)
42 42
43 43
44 44 class LRUMemoryBackend(memory_backend.MemoryBackend):
45 45 key_prefix = 'lru_mem_backend'
46 46 pickle_values = False
47 47
48 48 def __init__(self, arguments):
49 49 max_size = arguments.pop('max_size', _default_max_size)
50 50
51 51 LRUDictClass = LRUDict
52 52 if arguments.pop('log_key_count', None):
53 53 LRUDictClass = LRUDictDebug
54 54
55 55 arguments['cache_dict'] = LRUDictClass(max_size)
56 56 super(LRUMemoryBackend, self).__init__(arguments)
57 57
58 58 def delete(self, key):
59 59 try:
60 60 del self._cache[key]
61 61 except KeyError:
62 62 # we don't care if key isn't there at deletion
63 63 pass
64 64
65 65 def delete_multi(self, keys):
66 66 for key in keys:
67 67 self.delete(key)
68 68
69 69
70 70 class PickleSerializer(object):
71 71
72 72 def _dumps(self, value, safe=False):
73 73 try:
74 74 return compat.pickle.dumps(value)
75 75 except Exception:
76 76 if safe:
77 77 return NO_VALUE
78 78 else:
79 79 raise
80 80
81 81 def _loads(self, value, safe=True):
82 82 try:
83 83 return compat.pickle.loads(value)
84 84 except Exception:
85 85 if safe:
86 86 return NO_VALUE
87 87 else:
88 88 raise
89 89
90 90
91 91 class MsgPackSerializer(object):
92 92
93 93 def _dumps(self, value, safe=False):
94 94 try:
95 95 return msgpack.packb(value)
96 96 except Exception:
97 97 if safe:
98 98 return NO_VALUE
99 99 else:
100 100 raise
101 101
102 102 def _loads(self, value, safe=True):
103 103 """
104 104 pickle maintained the `CachedValue` wrapper of the tuple
105 105 msgpack does not, so it must be added back in.
106 106 """
107 107 try:
108 108 value = msgpack.unpackb(value, use_list=False)
109 109 return CachedValue(*value)
110 110 except Exception:
111 111 if safe:
112 112 return NO_VALUE
113 113 else:
114 114 raise
115 115
116 116
117 117 import fcntl
118 118 flock_org = fcntl.flock
119 119
120 120
121 121 class CustomLockFactory(FileLock):
122 122
123 123 @memoized_property
124 124 def _module(self):
125 125
126 126 def gevent_flock(fd, operation):
127 127 """
128 128 Gevent compatible flock
129 129 """
130 130 # set non-blocking, this will cause an exception if we cannot acquire a lock
131 131 operation |= fcntl.LOCK_NB
132 132 start_lock_time = time.time()
133 133 timeout = 60 * 15 # 15min
134 134 while True:
135 135 try:
136 136 flock_org(fd, operation)
137 137 # lock has been acquired
138 138 break
139 139 except (OSError, IOError) as e:
140 140 # raise on other errors than Resource temporarily unavailable
141 141 if e.errno != errno.EAGAIN:
142 142 raise
143 143 elif (time.time() - start_lock_time) > timeout:
144 144 # waited to much time on a lock, better fail than loop for ever
145 145 log.error('Failed to acquire lock on `%s` after waiting %ss',
146 146 self.filename, timeout)
147 147 raise
148 148 wait_timeout = 0.03
149 149 log.debug('Failed to acquire lock on `%s`, retry in %ss',
150 150 self.filename, wait_timeout)
151 151 gevent.sleep(wait_timeout)
152 152
153 153 fcntl.flock = gevent_flock
154 154 return fcntl
155 155
156 156
157 157 class FileNamespaceBackend(PickleSerializer, file_backend.DBMBackend):
158 158 key_prefix = 'file_backend'
159 159
160 160 def __init__(self, arguments):
161 161 arguments['lock_factory'] = CustomLockFactory
162 162 db_file = arguments.get('filename')
163 163
164 164 log.debug('initialing %s DB in %s', self.__class__.__name__, db_file)
165 165 try:
166 166 super(FileNamespaceBackend, self).__init__(arguments)
167 167 except Exception:
168 168 log.error('Failed to initialize db at: %s', db_file)
169 169 raise
170 170
171 171 def __repr__(self):
172 172 return '{} `{}`'.format(self.__class__, self.filename)
173 173
174 174 def list_keys(self, prefix=''):
175 175 prefix = '{}:{}'.format(self.key_prefix, prefix)
176 176
177 177 def cond(v):
178 178 if not prefix:
179 179 return True
180 180
181 181 if v.startswith(prefix):
182 182 return True
183 183 return False
184 184
185 185 with self._dbm_file(True) as dbm:
186 186 try:
187 187 return filter(cond, dbm.keys())
188 188 except Exception:
189 189 log.error('Failed to fetch DBM keys from DB: %s', self.get_store())
190 190 raise
191 191
192 192 def get_store(self):
193 193 return self.filename
194 194
195 195 def _dbm_get(self, key):
196 196 with self._dbm_file(False) as dbm:
197 197 if hasattr(dbm, 'get'):
198 198 value = dbm.get(key, NO_VALUE)
199 199 else:
200 200 # gdbm objects lack a .get method
201 201 try:
202 202 value = dbm[key]
203 203 except KeyError:
204 204 value = NO_VALUE
205 205 if value is not NO_VALUE:
206 206 value = self._loads(value)
207 207 return value
208 208
209 209 def get(self, key):
210 210 try:
211 211 return self._dbm_get(key)
212 212 except Exception:
213 213 log.error('Failed to fetch DBM key %s from DB: %s', key, self.get_store())
214 214 raise
215 215
216 216 def set(self, key, value):
217 217 with self._dbm_file(True) as dbm:
218 218 dbm[key] = self._dumps(value)
219 219
220 220 def set_multi(self, mapping):
221 221 with self._dbm_file(True) as dbm:
222 222 for key, value in mapping.items():
223 223 dbm[key] = self._dumps(value)
224 224
225 225
226 226 class BaseRedisBackend(redis_backend.RedisBackend):
227 key_prefix = ''
228
229 def __init__(self, arguments):
230 super(BaseRedisBackend, self).__init__(arguments)
231 self._lock_timeout = self.lock_timeout
232 self._lock_auto_renewal = arguments.pop("lock_auto_renewal", False)
233
234 if self._lock_auto_renewal and not self._lock_timeout:
235 # set default timeout for auto_renewal
236 self._lock_timeout = 60
227 237
228 238 def _create_client(self):
229 239 args = {}
230 240
231 241 if self.url is not None:
232 242 args.update(url=self.url)
233 243
234 244 else:
235 245 args.update(
236 246 host=self.host, password=self.password,
237 247 port=self.port, db=self.db
238 248 )
239 249
240 250 connection_pool = redis.ConnectionPool(**args)
241 251
242 252 return redis.StrictRedis(connection_pool=connection_pool)
243 253
244 254 def list_keys(self, prefix=''):
245 255 prefix = '{}:{}*'.format(self.key_prefix, prefix)
246 256 return self.client.keys(prefix)
247 257
248 258 def get_store(self):
249 259 return self.client.connection_pool
250 260
251 261 def get(self, key):
252 262 value = self.client.get(key)
253 263 if value is None:
254 264 return NO_VALUE
255 265 return self._loads(value)
256 266
257 267 def get_multi(self, keys):
258 268 if not keys:
259 269 return []
260 270 values = self.client.mget(keys)
261 271 loads = self._loads
262 272 return [
263 273 loads(v) if v is not None else NO_VALUE
264 274 for v in values]
265 275
266 276 def set(self, key, value):
267 277 if self.redis_expiration_time:
268 278 self.client.setex(key, self.redis_expiration_time,
269 279 self._dumps(value))
270 280 else:
271 281 self.client.set(key, self._dumps(value))
272 282
273 283 def set_multi(self, mapping):
274 284 dumps = self._dumps
275 285 mapping = dict(
276 286 (k, dumps(v))
277 287 for k, v in mapping.items()
278 288 )
279 289
280 290 if not self.redis_expiration_time:
281 291 self.client.mset(mapping)
282 292 else:
283 293 pipe = self.client.pipeline()
284 294 for key, value in mapping.items():
285 295 pipe.setex(key, self.redis_expiration_time, value)
286 296 pipe.execute()
287 297
288 298 def get_mutex(self, key):
289 299 if self.distributed_lock:
290 300 lock_key = redis_backend.u('_lock_{0}').format(key)
291 301 log.debug('Trying to acquire Redis lock for key %s', lock_key)
292
293 auto_renewal = True
294 lock_timeout = self.lock_timeout
295 if auto_renewal and not self.lock_timeout:
296 # set default timeout for auto_renewal
297 lock_timeout = 10
298 return get_mutex_lock(self.client, lock_key, lock_timeout,
299 auto_renewal=auto_renewal)
302 return get_mutex_lock(self.client, lock_key, self._lock_timeout,
303 auto_renewal=self._lock_auto_renewal)
300 304 else:
301 305 return None
302 306
303 307
304 308 class RedisPickleBackend(PickleSerializer, BaseRedisBackend):
305 309 key_prefix = 'redis_pickle_backend'
306 310 pass
307 311
308 312
309 313 class RedisMsgPackBackend(MsgPackSerializer, BaseRedisBackend):
310 314 key_prefix = 'redis_msgpack_backend'
311 315 pass
312 316
313 317
314 318 def get_mutex_lock(client, lock_key, lock_timeout, auto_renewal=False):
315 319 import redis_lock
316 320
317 321 class _RedisLockWrapper(object):
318 322 """LockWrapper for redis_lock"""
319 323
320 324 def __init__(self):
321 325 pass
322 326
323 327 @property
324 328 def lock(self):
325 329 return redis_lock.Lock(
326 330 redis_client=client,
327 331 name=lock_key,
328 332 expire=lock_timeout,
329 333 auto_renewal=auto_renewal,
330 334 strict=True,
331 335 )
332 336
333 337 def acquire(self, wait=True):
334 338 return self.lock.acquire(wait)
335 339
336 340 def release(self):
337 341 try:
338 342 self.lock.release()
339 343 except redis_lock.NotAcquired:
340 344 pass
341 345
342 346 return _RedisLockWrapper()
General Comments 0
You need to be logged in to leave comments. Login now